|
@@ -151,8 +151,9 @@ struct file_region {
|
|
long to;
|
|
long to;
|
|
};
|
|
};
|
|
|
|
|
|
-static long region_add(struct list_head *head, long f, long t)
|
|
|
|
|
|
+static long region_add(struct resv_map *resv, long f, long t)
|
|
{
|
|
{
|
|
|
|
+ struct list_head *head = &resv->regions;
|
|
struct file_region *rg, *nrg, *trg;
|
|
struct file_region *rg, *nrg, *trg;
|
|
|
|
|
|
/* Locate the region we are either in or before. */
|
|
/* Locate the region we are either in or before. */
|
|
@@ -187,8 +188,9 @@ static long region_add(struct list_head *head, long f, long t)
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-static long region_chg(struct list_head *head, long f, long t)
|
|
|
|
|
|
+static long region_chg(struct resv_map *resv, long f, long t)
|
|
{
|
|
{
|
|
|
|
+ struct list_head *head = &resv->regions;
|
|
struct file_region *rg, *nrg;
|
|
struct file_region *rg, *nrg;
|
|
long chg = 0;
|
|
long chg = 0;
|
|
|
|
|
|
@@ -236,8 +238,9 @@ static long region_chg(struct list_head *head, long f, long t)
|
|
return chg;
|
|
return chg;
|
|
}
|
|
}
|
|
|
|
|
|
-static long region_truncate(struct list_head *head, long end)
|
|
|
|
|
|
+static long region_truncate(struct resv_map *resv, long end)
|
|
{
|
|
{
|
|
|
|
+ struct list_head *head = &resv->regions;
|
|
struct file_region *rg, *trg;
|
|
struct file_region *rg, *trg;
|
|
long chg = 0;
|
|
long chg = 0;
|
|
|
|
|
|
@@ -266,8 +269,9 @@ static long region_truncate(struct list_head *head, long end)
|
|
return chg;
|
|
return chg;
|
|
}
|
|
}
|
|
|
|
|
|
-static long region_count(struct list_head *head, long f, long t)
|
|
|
|
|
|
+static long region_count(struct resv_map *resv, long f, long t)
|
|
{
|
|
{
|
|
|
|
+ struct list_head *head = &resv->regions;
|
|
struct file_region *rg;
|
|
struct file_region *rg;
|
|
long chg = 0;
|
|
long chg = 0;
|
|
|
|
|
|
@@ -393,7 +397,7 @@ void resv_map_release(struct kref *ref)
|
|
struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
|
|
struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
|
|
|
|
|
|
/* Clear out any active regions before we release the map. */
|
|
/* Clear out any active regions before we release the map. */
|
|
- region_truncate(&resv_map->regions, 0);
|
|
|
|
|
|
+ region_truncate(resv_map, 0);
|
|
kfree(resv_map);
|
|
kfree(resv_map);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1152,7 +1156,7 @@ static long vma_needs_reservation(struct hstate *h,
|
|
pgoff_t idx = vma_hugecache_offset(h, vma, addr);
|
|
pgoff_t idx = vma_hugecache_offset(h, vma, addr);
|
|
struct resv_map *resv = inode->i_mapping->private_data;
|
|
struct resv_map *resv = inode->i_mapping->private_data;
|
|
|
|
|
|
- return region_chg(&resv->regions, idx, idx + 1);
|
|
|
|
|
|
+ return region_chg(resv, idx, idx + 1);
|
|
|
|
|
|
} else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
|
|
} else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
|
|
return 1;
|
|
return 1;
|
|
@@ -1162,7 +1166,7 @@ static long vma_needs_reservation(struct hstate *h,
|
|
pgoff_t idx = vma_hugecache_offset(h, vma, addr);
|
|
pgoff_t idx = vma_hugecache_offset(h, vma, addr);
|
|
struct resv_map *resv = vma_resv_map(vma);
|
|
struct resv_map *resv = vma_resv_map(vma);
|
|
|
|
|
|
- err = region_chg(&resv->regions, idx, idx + 1);
|
|
|
|
|
|
+ err = region_chg(resv, idx, idx + 1);
|
|
if (err < 0)
|
|
if (err < 0)
|
|
return err;
|
|
return err;
|
|
return 0;
|
|
return 0;
|
|
@@ -1178,14 +1182,14 @@ static void vma_commit_reservation(struct hstate *h,
|
|
pgoff_t idx = vma_hugecache_offset(h, vma, addr);
|
|
pgoff_t idx = vma_hugecache_offset(h, vma, addr);
|
|
struct resv_map *resv = inode->i_mapping->private_data;
|
|
struct resv_map *resv = inode->i_mapping->private_data;
|
|
|
|
|
|
- region_add(&resv->regions, idx, idx + 1);
|
|
|
|
|
|
+ region_add(resv, idx, idx + 1);
|
|
|
|
|
|
} else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
|
|
} else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
|
|
pgoff_t idx = vma_hugecache_offset(h, vma, addr);
|
|
pgoff_t idx = vma_hugecache_offset(h, vma, addr);
|
|
struct resv_map *resv = vma_resv_map(vma);
|
|
struct resv_map *resv = vma_resv_map(vma);
|
|
|
|
|
|
/* Mark this page used in the map. */
|
|
/* Mark this page used in the map. */
|
|
- region_add(&resv->regions, idx, idx + 1);
|
|
|
|
|
|
+ region_add(resv, idx, idx + 1);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2276,7 +2280,7 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
|
|
end = vma_hugecache_offset(h, vma, vma->vm_end);
|
|
end = vma_hugecache_offset(h, vma, vma->vm_end);
|
|
|
|
|
|
reserve = (end - start) -
|
|
reserve = (end - start) -
|
|
- region_count(&resv->regions, start, end);
|
|
|
|
|
|
+ region_count(resv, start, end);
|
|
|
|
|
|
resv_map_put(vma);
|
|
resv_map_put(vma);
|
|
|
|
|
|
@@ -3178,7 +3182,7 @@ int hugetlb_reserve_pages(struct inode *inode,
|
|
if (!vma || vma->vm_flags & VM_MAYSHARE) {
|
|
if (!vma || vma->vm_flags & VM_MAYSHARE) {
|
|
resv_map = inode->i_mapping->private_data;
|
|
resv_map = inode->i_mapping->private_data;
|
|
|
|
|
|
- chg = region_chg(&resv_map->regions, from, to);
|
|
|
|
|
|
+ chg = region_chg(resv_map, from, to);
|
|
|
|
|
|
} else {
|
|
} else {
|
|
resv_map = resv_map_alloc();
|
|
resv_map = resv_map_alloc();
|
|
@@ -3224,7 +3228,7 @@ int hugetlb_reserve_pages(struct inode *inode,
|
|
* else has to be done for private mappings here
|
|
* else has to be done for private mappings here
|
|
*/
|
|
*/
|
|
if (!vma || vma->vm_flags & VM_MAYSHARE)
|
|
if (!vma || vma->vm_flags & VM_MAYSHARE)
|
|
- region_add(&resv_map->regions, from, to);
|
|
|
|
|
|
+ region_add(resv_map, from, to);
|
|
return 0;
|
|
return 0;
|
|
out_err:
|
|
out_err:
|
|
if (vma)
|
|
if (vma)
|
|
@@ -3240,7 +3244,7 @@ void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
|
|
struct hugepage_subpool *spool = subpool_inode(inode);
|
|
struct hugepage_subpool *spool = subpool_inode(inode);
|
|
|
|
|
|
if (resv_map)
|
|
if (resv_map)
|
|
- chg = region_truncate(&resv_map->regions, offset);
|
|
|
|
|
|
+ chg = region_truncate(resv_map, offset);
|
|
spin_lock(&inode->i_lock);
|
|
spin_lock(&inode->i_lock);
|
|
inode->i_blocks -= (blocks_per_huge_page(h) * freed);
|
|
inode->i_blocks -= (blocks_per_huge_page(h) * freed);
|
|
spin_unlock(&inode->i_lock);
|
|
spin_unlock(&inode->i_lock);
|