|
@@ -3375,6 +3375,23 @@ static bool hugetlbfs_pagecache_present(struct hstate *h,
|
|
|
return page != NULL;
|
|
|
}
|
|
|
|
|
|
+int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
|
|
|
+ pgoff_t idx)
|
|
|
+{
|
|
|
+ struct inode *inode = mapping->host;
|
|
|
+ struct hstate *h = hstate_inode(inode);
|
|
|
+ int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
|
|
|
+
|
|
|
+ if (err)
|
|
|
+ return err;
|
|
|
+ ClearPagePrivate(page);
|
|
|
+
|
|
|
+ spin_lock(&inode->i_lock);
|
|
|
+ inode->i_blocks += blocks_per_huge_page(h);
|
|
|
+ spin_unlock(&inode->i_lock);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
struct address_space *mapping, pgoff_t idx,
|
|
|
unsigned long address, pte_t *ptep, unsigned int flags)
|
|
@@ -3422,21 +3439,13 @@ retry:
|
|
|
set_page_huge_active(page);
|
|
|
|
|
|
if (vma->vm_flags & VM_MAYSHARE) {
|
|
|
- int err;
|
|
|
- struct inode *inode = mapping->host;
|
|
|
-
|
|
|
- err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
|
|
|
+ int err = huge_add_to_page_cache(page, mapping, idx);
|
|
|
if (err) {
|
|
|
put_page(page);
|
|
|
if (err == -EEXIST)
|
|
|
goto retry;
|
|
|
goto out;
|
|
|
}
|
|
|
- ClearPagePrivate(page);
|
|
|
-
|
|
|
- spin_lock(&inode->i_lock);
|
|
|
- inode->i_blocks += blocks_per_huge_page(h);
|
|
|
- spin_unlock(&inode->i_lock);
|
|
|
} else {
|
|
|
lock_page(page);
|
|
|
if (unlikely(anon_vma_prepare(vma))) {
|