|
@@ -3085,7 +3085,6 @@ out:
|
|
|
return page;
|
|
|
}
|
|
|
|
|
|
-
|
|
|
/*
|
|
|
* Maximum number of compaction retries wit a progress before OOM
|
|
|
* killer is consider as the only way to move forward.
|
|
@@ -3373,11 +3372,6 @@ bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
-static inline bool is_thp_gfp_mask(gfp_t gfp_mask)
|
|
|
-{
|
|
|
- return (gfp_mask & (GFP_TRANSHUGE | __GFP_KSWAPD_RECLAIM)) == GFP_TRANSHUGE;
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* Maximum number of reclaim retries without any progress before OOM killer
|
|
|
* is consider as the only way to move forward.
|
|
@@ -3536,8 +3530,11 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
|
|
|
if (page)
|
|
|
goto got_pg;
|
|
|
|
|
|
- /* Checks for THP-specific high-order allocations */
|
|
|
- if (is_thp_gfp_mask(gfp_mask)) {
|
|
|
+ /*
|
|
|
+ * Checks for costly allocations with __GFP_NORETRY, which
|
|
|
+ * includes THP page fault allocations
|
|
|
+ */
|
|
|
+ if (gfp_mask & __GFP_NORETRY) {
|
|
|
/*
|
|
|
* If compaction is deferred for high-order allocations,
|
|
|
* it is because sync compaction recently failed. If
|
|
@@ -3557,11 +3554,10 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
|
|
|
goto nopage;
|
|
|
|
|
|
/*
|
|
|
- * It can become very expensive to allocate transparent
|
|
|
- * hugepages at fault, so use asynchronous memory
|
|
|
- * compaction for THP unless it is khugepaged trying to
|
|
|
- * collapse. All other requests should tolerate at
|
|
|
- * least light sync migration.
|
|
|
+ * Looks like reclaim/compaction is worth trying, but
|
|
|
+ * sync compaction could be very expensive, so keep
|
|
|
+ * using async compaction, unless it's khugepaged
|
|
|
+ * trying to collapse.
|
|
|
*/
|
|
|
if (!(current->flags & PF_KTHREAD))
|
|
|
migration_mode = MIGRATE_ASYNC;
|