|
@@ -212,7 +212,7 @@ int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
|
|
|
*
|
|
|
* Returns 1 if all pages in the range are isolated.
|
|
|
*/
|
|
|
-static int
|
|
|
+static unsigned long
|
|
|
__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
|
|
|
bool skip_hwpoisoned_pages)
|
|
|
{
|
|
@@ -237,9 +237,8 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
|
|
|
else
|
|
|
break;
|
|
|
}
|
|
|
- if (pfn < end_pfn)
|
|
|
- return 0;
|
|
|
- return 1;
|
|
|
+
|
|
|
+ return pfn;
|
|
|
}
|
|
|
|
|
|
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
|
|
@@ -248,7 +247,6 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
|
|
|
unsigned long pfn, flags;
|
|
|
struct page *page;
|
|
|
struct zone *zone;
|
|
|
- int ret;
|
|
|
|
|
|
/*
|
|
|
* Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
|
|
@@ -266,10 +264,11 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
|
|
|
/* Check all pages are free or marked as ISOLATED */
|
|
|
zone = page_zone(page);
|
|
|
spin_lock_irqsave(&zone->lock, flags);
|
|
|
- ret = __test_page_isolated_in_pageblock(start_pfn, end_pfn,
|
|
|
+ pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn,
|
|
|
skip_hwpoisoned_pages);
|
|
|
spin_unlock_irqrestore(&zone->lock, flags);
|
|
|
- return ret ? 0 : -EBUSY;
|
|
|
+
|
|
|
+ return pfn < end_pfn ? -EBUSY : 0;
|
|
|
}
|
|
|
|
|
|
struct page *alloc_migrate_target(struct page *page, unsigned long private,
|