|
@@ -761,15 +761,6 @@ static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
|
|
return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp;
|
|
return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp;
|
|
}
|
|
}
|
|
|
|
|
|
-static inline struct page *alloc_hugepage_vma(int defrag,
|
|
|
|
- struct vm_area_struct *vma,
|
|
|
|
- unsigned long haddr, int nd,
|
|
|
|
- gfp_t extra_gfp)
|
|
|
|
-{
|
|
|
|
- return alloc_pages_vma(alloc_hugepage_gfpmask(defrag, extra_gfp),
|
|
|
|
- HPAGE_PMD_ORDER, vma, haddr, nd);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/* Caller must hold page table lock. */
|
|
/* Caller must hold page table lock. */
|
|
static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
|
|
static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
|
|
struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
|
|
struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
|
|
@@ -790,6 +781,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
unsigned long address, pmd_t *pmd,
|
|
unsigned long address, pmd_t *pmd,
|
|
unsigned int flags)
|
|
unsigned int flags)
|
|
{
|
|
{
|
|
|
|
+ gfp_t gfp;
|
|
struct page *page;
|
|
struct page *page;
|
|
unsigned long haddr = address & HPAGE_PMD_MASK;
|
|
unsigned long haddr = address & HPAGE_PMD_MASK;
|
|
|
|
|
|
@@ -824,8 +816,8 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
}
|
|
}
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
- page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
|
|
|
|
- vma, haddr, numa_node_id(), 0);
|
|
|
|
|
|
+ gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0);
|
|
|
|
+ page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
|
|
if (unlikely(!page)) {
|
|
if (unlikely(!page)) {
|
|
count_vm_event(THP_FAULT_FALLBACK);
|
|
count_vm_event(THP_FAULT_FALLBACK);
|
|
return VM_FAULT_FALLBACK;
|
|
return VM_FAULT_FALLBACK;
|
|
@@ -1113,10 +1105,12 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
spin_unlock(ptl);
|
|
spin_unlock(ptl);
|
|
alloc:
|
|
alloc:
|
|
if (transparent_hugepage_enabled(vma) &&
|
|
if (transparent_hugepage_enabled(vma) &&
|
|
- !transparent_hugepage_debug_cow())
|
|
|
|
- new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
|
|
|
|
- vma, haddr, numa_node_id(), 0);
|
|
|
|
- else
|
|
|
|
|
|
+ !transparent_hugepage_debug_cow()) {
|
|
|
|
+ gfp_t gfp;
|
|
|
|
+
|
|
|
|
+ gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0);
|
|
|
|
+ new_page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
|
|
|
|
+ } else
|
|
new_page = NULL;
|
|
new_page = NULL;
|
|
|
|
|
|
if (unlikely(!new_page)) {
|
|
if (unlikely(!new_page)) {
|