|
@@ -3185,29 +3185,21 @@ out:
|
|
|
static struct page *
|
|
|
__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
|
|
|
unsigned int alloc_flags, const struct alloc_context *ac,
|
|
|
- enum migrate_mode mode, int *contended_compaction,
|
|
|
- bool *deferred_compaction)
|
|
|
+ enum migrate_mode mode, enum compact_result *compact_result)
|
|
|
{
|
|
|
- enum compact_result compact_result;
|
|
|
struct page *page;
|
|
|
+ int contended_compaction;
|
|
|
|
|
|
if (!order)
|
|
|
return NULL;
|
|
|
|
|
|
current->flags |= PF_MEMALLOC;
|
|
|
- compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
|
|
|
- mode, contended_compaction);
|
|
|
+ *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
|
|
|
+ mode, &contended_compaction);
|
|
|
current->flags &= ~PF_MEMALLOC;
|
|
|
|
|
|
- switch (compact_result) {
|
|
|
- case COMPACT_DEFERRED:
|
|
|
- *deferred_compaction = true;
|
|
|
- /* fall-through */
|
|
|
- case COMPACT_SKIPPED:
|
|
|
+ if (*compact_result <= COMPACT_INACTIVE)
|
|
|
return NULL;
|
|
|
- default:
|
|
|
- break;
|
|
|
- }
|
|
|
|
|
|
/*
|
|
|
* At least in one zone compaction wasn't deferred or skipped, so let's
|
|
@@ -3233,6 +3225,24 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
|
|
|
*/
|
|
|
count_vm_event(COMPACTFAIL);
|
|
|
|
|
|
+ /*
|
|
|
+ * In all zones where compaction was attempted (and not
|
|
|
+ * deferred or skipped), lock contention has been detected.
|
|
|
+ * For THP allocation we do not want to disrupt the others
|
|
|
+ * so we fallback to base pages instead.
|
|
|
+ */
|
|
|
+ if (contended_compaction == COMPACT_CONTENDED_LOCK)
|
|
|
+ *compact_result = COMPACT_CONTENDED;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If compaction was aborted due to need_resched(), we do not
|
|
|
+ * want to further increase allocation latency, unless it is
|
|
|
+ * khugepaged trying to collapse.
|
|
|
+ */
|
|
|
+ if (contended_compaction == COMPACT_CONTENDED_SCHED
|
|
|
+ && !(current->flags & PF_KTHREAD))
|
|
|
+ *compact_result = COMPACT_CONTENDED;
|
|
|
+
|
|
|
cond_resched();
|
|
|
|
|
|
return NULL;
|
|
@@ -3241,8 +3251,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
|
|
|
static inline struct page *
|
|
|
__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
|
|
|
unsigned int alloc_flags, const struct alloc_context *ac,
|
|
|
- enum migrate_mode mode, int *contended_compaction,
|
|
|
- bool *deferred_compaction)
|
|
|
+ enum migrate_mode mode, enum compact_result *compact_result)
|
|
|
{
|
|
|
return NULL;
|
|
|
}
|
|
@@ -3387,8 +3396,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
|
|
|
unsigned long pages_reclaimed = 0;
|
|
|
unsigned long did_some_progress;
|
|
|
enum migrate_mode migration_mode = MIGRATE_ASYNC;
|
|
|
- bool deferred_compaction = false;
|
|
|
- int contended_compaction = COMPACT_CONTENDED_NONE;
|
|
|
+ enum compact_result compact_result;
|
|
|
|
|
|
/*
|
|
|
* In the slowpath, we sanity check order to avoid ever trying to
|
|
@@ -3475,8 +3483,7 @@ retry:
|
|
|
*/
|
|
|
page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
|
|
|
migration_mode,
|
|
|
- &contended_compaction,
|
|
|
- &deferred_compaction);
|
|
|
+ &compact_result);
|
|
|
if (page)
|
|
|
goto got_pg;
|
|
|
|
|
@@ -3489,25 +3496,14 @@ retry:
|
|
|
* to heavily disrupt the system, so we fail the allocation
|
|
|
* instead of entering direct reclaim.
|
|
|
*/
|
|
|
- if (deferred_compaction)
|
|
|
- goto nopage;
|
|
|
-
|
|
|
- /*
|
|
|
- * In all zones where compaction was attempted (and not
|
|
|
- * deferred or skipped), lock contention has been detected.
|
|
|
- * For THP allocation we do not want to disrupt the others
|
|
|
- * so we fallback to base pages instead.
|
|
|
- */
|
|
|
- if (contended_compaction == COMPACT_CONTENDED_LOCK)
|
|
|
+ if (compact_result == COMPACT_DEFERRED)
|
|
|
goto nopage;
|
|
|
|
|
|
/*
|
|
|
- * If compaction was aborted due to need_resched(), we do not
|
|
|
- * want to further increase allocation latency, unless it is
|
|
|
- * khugepaged trying to collapse.
|
|
|
+ * Compaction is contended so rather back off than cause
|
|
|
+ * excessive stalls.
|
|
|
*/
|
|
|
- if (contended_compaction == COMPACT_CONTENDED_SCHED
|
|
|
- && !(current->flags & PF_KTHREAD))
|
|
|
+ if(compact_result == COMPACT_CONTENDED)
|
|
|
goto nopage;
|
|
|
}
|
|
|
|
|
@@ -3555,8 +3551,7 @@ noretry:
|
|
|
*/
|
|
|
page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags,
|
|
|
ac, migration_mode,
|
|
|
- &contended_compaction,
|
|
|
- &deferred_compaction);
|
|
|
+ &compact_result);
|
|
|
if (page)
|
|
|
goto got_pg;
|
|
|
nopage:
|