|
@@ -419,13 +419,24 @@ void resv_map_release(struct kref *ref)
|
|
kfree(resv_map);
|
|
kfree(resv_map);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static inline struct resv_map *inode_resv_map(struct inode *inode)
|
|
|
|
+{
|
|
|
|
+ return inode->i_mapping->private_data;
|
|
|
|
+}
|
|
|
|
+
|
|
static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
|
|
static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
|
|
{
|
|
{
|
|
VM_BUG_ON(!is_vm_hugetlb_page(vma));
|
|
VM_BUG_ON(!is_vm_hugetlb_page(vma));
|
|
- if (!(vma->vm_flags & VM_MAYSHARE))
|
|
|
|
|
|
+ if (vma->vm_flags & VM_MAYSHARE) {
|
|
|
|
+ struct address_space *mapping = vma->vm_file->f_mapping;
|
|
|
|
+ struct inode *inode = mapping->host;
|
|
|
|
+
|
|
|
|
+ return inode_resv_map(inode);
|
|
|
|
+
|
|
|
|
+ } else {
|
|
return (struct resv_map *)(get_vma_private_data(vma) &
|
|
return (struct resv_map *)(get_vma_private_data(vma) &
|
|
~HPAGE_RESV_MASK);
|
|
~HPAGE_RESV_MASK);
|
|
- return NULL;
|
|
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
|
|
static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
|
|
@@ -1167,48 +1178,34 @@ static void return_unused_surplus_pages(struct hstate *h,
|
|
static long vma_needs_reservation(struct hstate *h,
|
|
static long vma_needs_reservation(struct hstate *h,
|
|
struct vm_area_struct *vma, unsigned long addr)
|
|
struct vm_area_struct *vma, unsigned long addr)
|
|
{
|
|
{
|
|
- struct address_space *mapping = vma->vm_file->f_mapping;
|
|
|
|
- struct inode *inode = mapping->host;
|
|
|
|
-
|
|
|
|
- if (vma->vm_flags & VM_MAYSHARE) {
|
|
|
|
- pgoff_t idx = vma_hugecache_offset(h, vma, addr);
|
|
|
|
- struct resv_map *resv = inode->i_mapping->private_data;
|
|
|
|
-
|
|
|
|
- return region_chg(resv, idx, idx + 1);
|
|
|
|
|
|
+ struct resv_map *resv;
|
|
|
|
+ pgoff_t idx;
|
|
|
|
+ long chg;
|
|
|
|
|
|
- } else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
|
|
|
|
|
|
+ resv = vma_resv_map(vma);
|
|
|
|
+ if (!resv)
|
|
return 1;
|
|
return 1;
|
|
|
|
|
|
- } else {
|
|
|
|
- long err;
|
|
|
|
- pgoff_t idx = vma_hugecache_offset(h, vma, addr);
|
|
|
|
- struct resv_map *resv = vma_resv_map(vma);
|
|
|
|
|
|
+ idx = vma_hugecache_offset(h, vma, addr);
|
|
|
|
+ chg = region_chg(resv, idx, idx + 1);
|
|
|
|
|
|
- err = region_chg(resv, idx, idx + 1);
|
|
|
|
- if (err < 0)
|
|
|
|
- return err;
|
|
|
|
- return 0;
|
|
|
|
- }
|
|
|
|
|
|
+ if (vma->vm_flags & VM_MAYSHARE)
|
|
|
|
+ return chg;
|
|
|
|
+ else
|
|
|
|
+ return chg < 0 ? chg : 0;
|
|
}
|
|
}
|
|
static void vma_commit_reservation(struct hstate *h,
|
|
static void vma_commit_reservation(struct hstate *h,
|
|
struct vm_area_struct *vma, unsigned long addr)
|
|
struct vm_area_struct *vma, unsigned long addr)
|
|
{
|
|
{
|
|
- struct address_space *mapping = vma->vm_file->f_mapping;
|
|
|
|
- struct inode *inode = mapping->host;
|
|
|
|
-
|
|
|
|
- if (vma->vm_flags & VM_MAYSHARE) {
|
|
|
|
- pgoff_t idx = vma_hugecache_offset(h, vma, addr);
|
|
|
|
- struct resv_map *resv = inode->i_mapping->private_data;
|
|
|
|
-
|
|
|
|
- region_add(resv, idx, idx + 1);
|
|
|
|
|
|
+ struct resv_map *resv;
|
|
|
|
+ pgoff_t idx;
|
|
|
|
|
|
- } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
|
|
|
|
- pgoff_t idx = vma_hugecache_offset(h, vma, addr);
|
|
|
|
- struct resv_map *resv = vma_resv_map(vma);
|
|
|
|
|
|
+ resv = vma_resv_map(vma);
|
|
|
|
+ if (!resv)
|
|
|
|
+ return;
|
|
|
|
|
|
- /* Mark this page used in the map. */
|
|
|
|
- region_add(resv, idx, idx + 1);
|
|
|
|
- }
|
|
|
|
|
|
+ idx = vma_hugecache_offset(h, vma, addr);
|
|
|
|
+ region_add(resv, idx, idx + 1);
|
|
}
|
|
}
|
|
|
|
|
|
static struct page *alloc_huge_page(struct vm_area_struct *vma,
|
|
static struct page *alloc_huge_page(struct vm_area_struct *vma,
|
|
@@ -2271,7 +2268,7 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
|
|
* after this open call completes. It is therefore safe to take a
|
|
* after this open call completes. It is therefore safe to take a
|
|
* new reference here without additional locking.
|
|
* new reference here without additional locking.
|
|
*/
|
|
*/
|
|
- if (resv)
|
|
|
|
|
|
+ if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
|
|
kref_get(&resv->refs);
|
|
kref_get(&resv->refs);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2280,23 +2277,21 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
|
|
struct hstate *h = hstate_vma(vma);
|
|
struct hstate *h = hstate_vma(vma);
|
|
struct resv_map *resv = vma_resv_map(vma);
|
|
struct resv_map *resv = vma_resv_map(vma);
|
|
struct hugepage_subpool *spool = subpool_vma(vma);
|
|
struct hugepage_subpool *spool = subpool_vma(vma);
|
|
- unsigned long reserve;
|
|
|
|
- unsigned long start;
|
|
|
|
- unsigned long end;
|
|
|
|
|
|
+ unsigned long reserve, start, end;
|
|
|
|
|
|
- if (resv) {
|
|
|
|
- start = vma_hugecache_offset(h, vma, vma->vm_start);
|
|
|
|
- end = vma_hugecache_offset(h, vma, vma->vm_end);
|
|
|
|
|
|
+ if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
|
|
|
|
+ return;
|
|
|
|
|
|
- reserve = (end - start) -
|
|
|
|
- region_count(resv, start, end);
|
|
|
|
|
|
+ start = vma_hugecache_offset(h, vma, vma->vm_start);
|
|
|
|
+ end = vma_hugecache_offset(h, vma, vma->vm_end);
|
|
|
|
|
|
- kref_put(&resv->refs, resv_map_release);
|
|
|
|
|
|
+ reserve = (end - start) - region_count(resv, start, end);
|
|
|
|
|
|
- if (reserve) {
|
|
|
|
- hugetlb_acct_memory(h, -reserve);
|
|
|
|
- hugepage_subpool_put_pages(spool, reserve);
|
|
|
|
- }
|
|
|
|
|
|
+ kref_put(&resv->refs, resv_map_release);
|
|
|
|
+
|
|
|
|
+ if (reserve) {
|
|
|
|
+ hugetlb_acct_memory(h, -reserve);
|
|
|
|
+ hugepage_subpool_put_pages(spool, reserve);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -3189,7 +3184,7 @@ int hugetlb_reserve_pages(struct inode *inode,
|
|
* called to make the mapping read-write. Assume !vma is a shm mapping
|
|
* called to make the mapping read-write. Assume !vma is a shm mapping
|
|
*/
|
|
*/
|
|
if (!vma || vma->vm_flags & VM_MAYSHARE) {
|
|
if (!vma || vma->vm_flags & VM_MAYSHARE) {
|
|
- resv_map = inode->i_mapping->private_data;
|
|
|
|
|
|
+ resv_map = inode_resv_map(inode);
|
|
|
|
|
|
chg = region_chg(resv_map, from, to);
|
|
chg = region_chg(resv_map, from, to);
|
|
|
|
|
|
@@ -3248,7 +3243,7 @@ out_err:
|
|
void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
|
|
void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
|
|
{
|
|
{
|
|
struct hstate *h = hstate_inode(inode);
|
|
struct hstate *h = hstate_inode(inode);
|
|
- struct resv_map *resv_map = inode->i_mapping->private_data;
|
|
|
|
|
|
+ struct resv_map *resv_map = inode_resv_map(inode);
|
|
long chg = 0;
|
|
long chg = 0;
|
|
struct hugepage_subpool *spool = subpool_inode(inode);
|
|
struct hugepage_subpool *spool = subpool_inode(inode);
|
|
|
|
|