|
@@ -459,6 +459,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
|
|
|
unsigned long flags;
|
|
|
bool locked = false;
|
|
|
struct page *page = NULL, *valid_page = NULL;
|
|
|
+ bool skipped_async_unsuitable = false;
|
|
|
|
|
|
/*
|
|
|
* Ensure that there are not too many pages isolated from the LRU
|
|
@@ -534,6 +535,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
|
|
|
if (!cc->sync && last_pageblock_nr != pageblock_nr &&
|
|
|
!migrate_async_suitable(get_pageblock_migratetype(page))) {
|
|
|
cc->finished_update_migrate = true;
|
|
|
+ skipped_async_unsuitable = true;
|
|
|
goto next_pageblock;
|
|
|
}
|
|
|
|
|
@@ -627,8 +629,13 @@ next_pageblock:
|
|
|
if (locked)
|
|
|
spin_unlock_irqrestore(&zone->lru_lock, flags);
|
|
|
|
|
|
- /* Update the pageblock-skip if the whole pageblock was scanned */
|
|
|
- if (low_pfn == end_pfn)
|
|
|
+ /*
|
|
|
+ * Update the pageblock-skip information and cached scanner pfn,
|
|
|
+ * if the whole pageblock was scanned without isolating any page.
|
|
|
+ * This is not done when pageblock was skipped due to being unsuitable
|
|
|
+ * for async compaction, so that eventual sync compaction can try.
|
|
|
+ */
|
|
|
+ if (low_pfn == end_pfn && !skipped_async_unsuitable)
|
|
|
update_pageblock_skip(cc, valid_page, nr_isolated, true);
|
|
|
|
|
|
trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
|