|
@@ -313,8 +313,15 @@ static inline bool compact_should_abort(struct compact_control *cc)
|
|
|
static bool suitable_migration_target(struct page *page)
|
|
|
{
|
|
|
/* If the page is a large free page, then disallow migration */
|
|
|
- if (PageBuddy(page) && page_order(page) >= pageblock_order)
|
|
|
- return false;
|
|
|
+ if (PageBuddy(page)) {
|
|
|
+ /*
|
|
|
+ * We are checking page_order without zone->lock taken. But
|
|
|
+ * the only small danger is that we skip a potentially suitable
|
|
|
+ * pageblock, so it's not worth to check order for valid range.
|
|
|
+ */
|
|
|
+ if (page_order_unsafe(page) >= pageblock_order)
|
|
|
+ return false;
|
|
|
+ }
|
|
|
|
|
|
/* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
|
|
|
if (migrate_async_suitable(get_pageblock_migratetype(page)))
|
|
@@ -608,11 +615,23 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
|
|
|
valid_page = page;
|
|
|
|
|
|
/*
|
|
|
- * Skip if free. page_order cannot be used without zone->lock
|
|
|
- * as nothing prevents parallel allocations or buddy merging.
|
|
|
+ * Skip if free. We read page order here without zone lock
|
|
|
+ * which is generally unsafe, but the race window is small and
|
|
|
+ * the worst thing that can happen is that we skip some
|
|
|
+ * potential isolation targets.
|
|
|
*/
|
|
|
- if (PageBuddy(page))
|
|
|
+ if (PageBuddy(page)) {
|
|
|
+ unsigned long freepage_order = page_order_unsafe(page);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Without lock, we cannot be sure that what we got is
|
|
|
+ * a valid page order. Consider only values in the
|
|
|
+ * valid order range to prevent low_pfn overflow.
|
|
|
+ */
|
|
|
+ if (freepage_order > 0 && freepage_order < MAX_ORDER)
|
|
|
+ low_pfn += (1UL << freepage_order) - 1;
|
|
|
continue;
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
|
* Check may be lockless but that's ok as we recheck later.
|
|
@@ -698,6 +717,13 @@ isolate_success:
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ /*
|
|
|
+ * The PageBuddy() check could have potentially brought us outside
|
|
|
+ * the range to be scanned.
|
|
|
+ */
|
|
|
+ if (unlikely(low_pfn > end_pfn))
|
|
|
+ low_pfn = end_pfn;
|
|
|
+
|
|
|
if (locked)
|
|
|
spin_unlock_irqrestore(&zone->lru_lock, flags);
|
|
|
|