|
@@ -638,12 +638,13 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
|
|
|
{
|
|
|
struct zone *zone = cc->zone;
|
|
|
unsigned long nr_scanned = 0, nr_isolated = 0;
|
|
|
- struct list_head *migratelist = &cc->migratepages;
|
|
|
struct lruvec *lruvec;
|
|
|
unsigned long flags = 0;
|
|
|
bool locked = false;
|
|
|
struct page *page = NULL, *valid_page = NULL;
|
|
|
unsigned long start_pfn = low_pfn;
|
|
|
+ bool skip_on_failure = false;
|
|
|
+ unsigned long next_skip_pfn = 0;
|
|
|
|
|
|
/*
|
|
|
* Ensure that there are not too many pages isolated from the LRU
|
|
@@ -664,10 +665,37 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
|
|
|
if (compact_should_abort(cc))
|
|
|
return 0;
|
|
|
|
|
|
+ if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) {
|
|
|
+ skip_on_failure = true;
|
|
|
+ next_skip_pfn = block_end_pfn(low_pfn, cc->order);
|
|
|
+ }
|
|
|
+
|
|
|
/* Time to isolate some pages for migration */
|
|
|
for (; low_pfn < end_pfn; low_pfn++) {
|
|
|
bool is_lru;
|
|
|
|
|
|
+ if (skip_on_failure && low_pfn >= next_skip_pfn) {
|
|
|
+ /*
|
|
|
+ * We have isolated all migration candidates in the
|
|
|
+ * previous order-aligned block, and did not skip it due
|
|
|
+ * to failure. We should migrate the pages now and
|
|
|
+ * hopefully succeed compaction.
|
|
|
+ */
|
|
|
+ if (nr_isolated)
|
|
|
+ break;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * We failed to isolate in the previous order-aligned
|
|
|
+ * block. Set the new boundary to the end of the
|
|
|
+ * current block. Note we can't simply increase
|
|
|
+ * next_skip_pfn by 1 << order, as low_pfn might have
|
|
|
+ * been incremented by a higher number due to skipping
|
|
|
+ * a compound or a high-order buddy page in the
|
|
|
+ * previous loop iteration.
|
|
|
+ */
|
|
|
+ next_skip_pfn = block_end_pfn(low_pfn, cc->order);
|
|
|
+ }
|
|
|
+
|
|
|
/*
|
|
|
* Periodically drop the lock (if held) regardless of its
|
|
|
* contention, to give chance to IRQs. Abort async compaction
|
|
@@ -679,7 +707,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
|
|
|
break;
|
|
|
|
|
|
if (!pfn_valid_within(low_pfn))
|
|
|
- continue;
|
|
|
+ goto isolate_fail;
|
|
|
nr_scanned++;
|
|
|
|
|
|
page = pfn_to_page(low_pfn);
|
|
@@ -734,11 +762,11 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
|
|
|
if (likely(comp_order < MAX_ORDER))
|
|
|
low_pfn += (1UL << comp_order) - 1;
|
|
|
|
|
|
- continue;
|
|
|
+ goto isolate_fail;
|
|
|
}
|
|
|
|
|
|
if (!is_lru)
|
|
|
- continue;
|
|
|
+ goto isolate_fail;
|
|
|
|
|
|
/*
|
|
|
* Migration will fail if an anonymous page is pinned in memory,
|
|
@@ -747,7 +775,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
|
|
|
*/
|
|
|
if (!page_mapping(page) &&
|
|
|
page_count(page) > page_mapcount(page))
|
|
|
- continue;
|
|
|
+ goto isolate_fail;
|
|
|
|
|
|
/* If we already hold the lock, we can skip some rechecking */
|
|
|
if (!locked) {
|
|
@@ -758,7 +786,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
|
|
|
|
|
|
/* Recheck PageLRU and PageCompound under lock */
|
|
|
if (!PageLRU(page))
|
|
|
- continue;
|
|
|
+ goto isolate_fail;
|
|
|
|
|
|
/*
|
|
|
* Page become compound since the non-locked check,
|
|
@@ -767,7 +795,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
|
|
|
*/
|
|
|
if (unlikely(PageCompound(page))) {
|
|
|
low_pfn += (1UL << compound_order(page)) - 1;
|
|
|
- continue;
|
|
|
+ goto isolate_fail;
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -775,7 +803,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
|
|
|
|
|
|
/* Try isolate the page */
|
|
|
if (__isolate_lru_page(page, isolate_mode) != 0)
|
|
|
- continue;
|
|
|
+ goto isolate_fail;
|
|
|
|
|
|
VM_BUG_ON_PAGE(PageCompound(page), page);
|
|
|
|
|
@@ -783,7 +811,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
|
|
|
del_page_from_lru_list(page, lruvec, page_lru(page));
|
|
|
|
|
|
isolate_success:
|
|
|
- list_add(&page->lru, migratelist);
|
|
|
+ list_add(&page->lru, &cc->migratepages);
|
|
|
cc->nr_migratepages++;
|
|
|
nr_isolated++;
|
|
|
|
|
@@ -801,6 +829,37 @@ isolate_success:
|
|
|
++low_pfn;
|
|
|
break;
|
|
|
}
|
|
|
+
|
|
|
+ continue;
|
|
|
+isolate_fail:
|
|
|
+ if (!skip_on_failure)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * We have isolated some pages, but then failed. Release them
|
|
|
+ * instead of migrating, as we cannot form the cc->order buddy
|
|
|
+ * page anyway.
|
|
|
+ */
|
|
|
+ if (nr_isolated) {
|
|
|
+ if (locked) {
|
|
|
+ spin_unlock_irqrestore(&zone->lru_lock, flags);
|
|
|
+ locked = false;
|
|
|
+ }
|
|
|
+ acct_isolated(zone, cc);
|
|
|
+ putback_movable_pages(&cc->migratepages);
|
|
|
+ cc->nr_migratepages = 0;
|
|
|
+ cc->last_migrated_pfn = 0;
|
|
|
+ nr_isolated = 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (low_pfn < next_skip_pfn) {
|
|
|
+ low_pfn = next_skip_pfn - 1;
|
|
|
+ /*
|
|
|
+ * The check near the loop beginning would have updated
|
|
|
+ * next_skip_pfn too, but this is a bit simpler.
|
|
|
+ */
|
|
|
+ next_skip_pfn += 1UL << cc->order;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1401,6 +1460,18 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
|
|
|
ret = COMPACT_CONTENDED;
|
|
|
goto out;
|
|
|
}
|
|
|
+ /*
|
|
|
+ * We failed to migrate at least one page in the current
|
|
|
+ * order-aligned block, so skip the rest of it.
|
|
|
+ */
|
|
|
+ if (cc->direct_compaction &&
|
|
|
+ (cc->mode == MIGRATE_ASYNC)) {
|
|
|
+ cc->migrate_pfn = block_end_pfn(
|
|
|
+ cc->migrate_pfn - 1, cc->order);
|
|
|
+ /* Draining pcplists is useless in this case */
|
|
|
+ cc->last_migrated_pfn = 0;
|
|
|
+
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
check_drain:
|