|
@@ -708,7 +708,7 @@ static inline pmd_t mk_huge_pmd(struct page *page, pgprot_t prot)
|
|
|
static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
|
|
|
struct vm_area_struct *vma,
|
|
|
unsigned long haddr, pmd_t *pmd,
|
|
|
- struct page *page)
|
|
|
+ struct page *page, gfp_t gfp)
|
|
|
{
|
|
|
struct mem_cgroup *memcg;
|
|
|
pgtable_t pgtable;
|
|
@@ -716,7 +716,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
|
|
|
|
|
|
VM_BUG_ON_PAGE(!PageCompound(page), page);
|
|
|
|
|
|
- if (mem_cgroup_try_charge(page, mm, GFP_TRANSHUGE, &memcg))
|
|
|
+ if (mem_cgroup_try_charge(page, mm, gfp, &memcg))
|
|
|
return VM_FAULT_OOM;
|
|
|
|
|
|
pgtable = pte_alloc_one(mm, haddr);
|
|
@@ -822,7 +822,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
count_vm_event(THP_FAULT_FALLBACK);
|
|
|
return VM_FAULT_FALLBACK;
|
|
|
}
|
|
|
- if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page))) {
|
|
|
+ if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page, gfp))) {
|
|
|
put_page(page);
|
|
|
count_vm_event(THP_FAULT_FALLBACK);
|
|
|
return VM_FAULT_FALLBACK;
|
|
@@ -1080,6 +1080,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
unsigned long haddr;
|
|
|
unsigned long mmun_start; /* For mmu_notifiers */
|
|
|
unsigned long mmun_end; /* For mmu_notifiers */
|
|
|
+ gfp_t huge_gfp; /* for allocation and charge */
|
|
|
|
|
|
ptl = pmd_lockptr(mm, pmd);
|
|
|
VM_BUG_ON_VMA(!vma->anon_vma, vma);
|
|
@@ -1106,10 +1107,8 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
alloc:
|
|
|
if (transparent_hugepage_enabled(vma) &&
|
|
|
!transparent_hugepage_debug_cow()) {
|
|
|
- gfp_t gfp;
|
|
|
-
|
|
|
- gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0);
|
|
|
- new_page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
|
|
|
+ huge_gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0);
|
|
|
+ new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER);
|
|
|
} else
|
|
|
new_page = NULL;
|
|
|
|
|
@@ -1130,8 +1129,7 @@ alloc:
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- if (unlikely(mem_cgroup_try_charge(new_page, mm,
|
|
|
- GFP_TRANSHUGE, &memcg))) {
|
|
|
+ if (unlikely(mem_cgroup_try_charge(new_page, mm, huge_gfp, &memcg))) {
|
|
|
put_page(new_page);
|
|
|
if (page) {
|
|
|
split_huge_page(page);
|
|
@@ -2323,19 +2321,13 @@ static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
|
|
|
return true;
|
|
|
}
|
|
|
|
|
|
-static struct page
|
|
|
-*khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm,
|
|
|
+static struct page *
|
|
|
+khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm,
|
|
|
struct vm_area_struct *vma, unsigned long address,
|
|
|
int node)
|
|
|
{
|
|
|
- gfp_t flags;
|
|
|
-
|
|
|
VM_BUG_ON_PAGE(*hpage, *hpage);
|
|
|
|
|
|
- /* Only allocate from the target node */
|
|
|
- flags = alloc_hugepage_gfpmask(khugepaged_defrag(), __GFP_OTHER_NODE) |
|
|
|
- __GFP_THISNODE;
|
|
|
-
|
|
|
/*
|
|
|
* Before allocating the hugepage, release the mmap_sem read lock.
|
|
|
* The allocation can take potentially a long time if it involves
|
|
@@ -2344,7 +2336,7 @@ static struct page
|
|
|
*/
|
|
|
up_read(&mm->mmap_sem);
|
|
|
|
|
|
- *hpage = alloc_pages_exact_node(node, flags, HPAGE_PMD_ORDER);
|
|
|
+ *hpage = alloc_pages_exact_node(node, gfp, HPAGE_PMD_ORDER);
|
|
|
if (unlikely(!*hpage)) {
|
|
|
count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
|
|
|
*hpage = ERR_PTR(-ENOMEM);
|
|
@@ -2397,13 +2389,14 @@ static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
|
|
|
return true;
|
|
|
}
|
|
|
|
|
|
-static struct page
|
|
|
-*khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm,
|
|
|
+static struct page *
|
|
|
+khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm,
|
|
|
struct vm_area_struct *vma, unsigned long address,
|
|
|
int node)
|
|
|
{
|
|
|
up_read(&mm->mmap_sem);
|
|
|
VM_BUG_ON(!*hpage);
|
|
|
+
|
|
|
return *hpage;
|
|
|
}
|
|
|
#endif
|
|
@@ -2438,16 +2431,21 @@ static void collapse_huge_page(struct mm_struct *mm,
|
|
|
struct mem_cgroup *memcg;
|
|
|
unsigned long mmun_start; /* For mmu_notifiers */
|
|
|
unsigned long mmun_end; /* For mmu_notifiers */
|
|
|
+ gfp_t gfp;
|
|
|
|
|
|
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
|
|
|
|
|
|
+ /* Only allocate from the target node */
|
|
|
+ gfp = alloc_hugepage_gfpmask(khugepaged_defrag(), __GFP_OTHER_NODE) |
|
|
|
+ __GFP_THISNODE;
|
|
|
+
|
|
|
/* release the mmap_sem read lock. */
|
|
|
- new_page = khugepaged_alloc_page(hpage, mm, vma, address, node);
|
|
|
+ new_page = khugepaged_alloc_page(hpage, gfp, mm, vma, address, node);
|
|
|
if (!new_page)
|
|
|
return;
|
|
|
|
|
|
if (unlikely(mem_cgroup_try_charge(new_page, mm,
|
|
|
- GFP_TRANSHUGE, &memcg)))
|
|
|
+ gfp, &memcg)))
|
|
|
return;
|
|
|
|
|
|
/*
|