|
@@ -1252,7 +1252,10 @@ static enum compact_result __compact_finished(struct zone *zone, struct compact_
|
|
|
if (cc->direct_compaction)
|
|
|
zone->compact_blockskip_flush = true;
|
|
|
|
|
|
- return COMPACT_COMPLETE;
|
|
|
+ if (cc->whole_zone)
|
|
|
+ return COMPACT_COMPLETE;
|
|
|
+ else
|
|
|
+ return COMPACT_PARTIAL_SKIPPED;
|
|
|
}
|
|
|
|
|
|
if (is_via_compact_memory(cc->order))
|
|
@@ -1413,6 +1416,10 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro
|
|
|
zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
|
|
|
zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
|
|
|
}
|
|
|
+
|
|
|
+ if (cc->migrate_pfn == start_pfn)
|
|
|
+ cc->whole_zone = true;
|
|
|
+
|
|
|
cc->last_migrated_pfn = 0;
|
|
|
|
|
|
trace_mm_compaction_begin(start_pfn, cc->migrate_pfn,
|
|
@@ -1634,7 +1641,8 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
|
|
|
goto break_loop;
|
|
|
}
|
|
|
|
|
|
- if (mode != MIGRATE_ASYNC && status == COMPACT_COMPLETE) {
|
|
|
+ if (mode != MIGRATE_ASYNC && (status == COMPACT_COMPLETE ||
|
|
|
+ status == COMPACT_PARTIAL_SKIPPED)) {
|
|
|
/*
|
|
|
* We think that allocation won't succeed in this zone
|
|
|
* so we defer compaction there. If it ends up
|
|
@@ -1881,7 +1889,7 @@ static void kcompactd_do_work(pg_data_t *pgdat)
|
|
|
cc.classzone_idx, 0)) {
|
|
|
success = true;
|
|
|
compaction_defer_reset(zone, cc.order, false);
|
|
|
- } else if (status == COMPACT_COMPLETE) {
|
|
|
+ } else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) {
|
|
|
/*
|
|
|
* We use sync migration mode here, so we defer like
|
|
|
* sync direct compaction does.
|