|
@@ -632,37 +632,27 @@ release:
|
|
static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma, unsigned long addr)
|
|
static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma, unsigned long addr)
|
|
{
|
|
{
|
|
const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE);
|
|
const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE);
|
|
- gfp_t this_node = 0;
|
|
|
|
-
|
|
|
|
-#ifdef CONFIG_NUMA
|
|
|
|
- struct mempolicy *pol;
|
|
|
|
- /*
|
|
|
|
- * __GFP_THISNODE is used only when __GFP_DIRECT_RECLAIM is not
|
|
|
|
- * specified, to express a general desire to stay on the current
|
|
|
|
- * node for optimistic allocation attempts. If the defrag mode
|
|
|
|
- * and/or madvise hint requires the direct reclaim then we prefer
|
|
|
|
- * to fallback to other node rather than node reclaim because that
|
|
|
|
- * can lead to excessive reclaim even though there is free memory
|
|
|
|
- * on other nodes. We expect that NUMA preferences are specified
|
|
|
|
- * by memory policies.
|
|
|
|
- */
|
|
|
|
- pol = get_vma_policy(vma, addr);
|
|
|
|
- if (pol->mode != MPOL_BIND)
|
|
|
|
- this_node = __GFP_THISNODE;
|
|
|
|
- mpol_cond_put(pol);
|
|
|
|
-#endif
|
|
|
|
|
|
+ const gfp_t gfp_mask = GFP_TRANSHUGE_LIGHT | __GFP_THISNODE;
|
|
|
|
|
|
|
|
+ /* Always do synchronous compaction */
|
|
if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
|
|
if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
|
|
- return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
|
|
|
|
|
|
+ return GFP_TRANSHUGE | __GFP_THISNODE |
|
|
|
|
+ (vma_madvised ? 0 : __GFP_NORETRY);
|
|
|
|
+
|
|
|
|
+ /* Kick kcompactd and fail quickly */
|
|
if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
|
|
if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
|
|
- return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM | this_node;
|
|
|
|
|
|
+ return gfp_mask | __GFP_KSWAPD_RECLAIM;
|
|
|
|
+
|
|
|
|
+ /* Synchronous compaction if madvised, otherwise kick kcompactd */
|
|
if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
|
|
if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
|
|
- return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM :
|
|
|
|
- __GFP_KSWAPD_RECLAIM | this_node);
|
|
|
|
|
|
+ return gfp_mask | (vma_madvised ? __GFP_DIRECT_RECLAIM :
|
|
|
|
+ __GFP_KSWAPD_RECLAIM);
|
|
|
|
+
|
|
|
|
+ /* Only do synchronous compaction if madvised */
|
|
if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
|
|
if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
|
|
- return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM :
|
|
|
|
- this_node);
|
|
|
|
- return GFP_TRANSHUGE_LIGHT | this_node;
|
|
|
|
|
|
+ return gfp_mask | (vma_madvised ? __GFP_DIRECT_RECLAIM : 0);
|
|
|
|
+
|
|
|
|
+ return gfp_mask;
|
|
}
|
|
}
|
|
|
|
|
|
/* Caller must hold page table lock. */
|
|
/* Caller must hold page table lock. */
|