|
@@ -1128,6 +1128,75 @@ void __init __free_pages_bootmem(struct page *page, unsigned long pfn,
|
|
|
return __free_pages_boot_core(page, pfn, order);
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Check that the whole (or subset of) a pageblock given by the interval of
|
|
|
+ * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
|
|
|
+ * with the migration of free compaction scanner. The scanners then need to
|
|
|
+ * use only pfn_valid_within() check for arches that allow holes within
|
|
|
+ * pageblocks.
|
|
|
+ *
|
|
|
+ * Return struct page pointer of start_pfn, or NULL if checks were not passed.
|
|
|
+ *
|
|
|
+ * It's possible on some configurations to have a setup like node0 node1 node0
|
|
|
+ * i.e. it's possible that all pages within a zones range of pages do not
|
|
|
+ * belong to a single zone. We assume that a border between node0 and node1
|
|
|
+ * can occur within a single pageblock, but not a node0 node1 node0
|
|
|
+ * interleaving within a single pageblock. It is therefore sufficient to check
|
|
|
+ * the first and last page of a pageblock and avoid checking each individual
|
|
|
+ * page in a pageblock.
|
|
|
+ */
|
|
|
+struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
|
|
|
+ unsigned long end_pfn, struct zone *zone)
|
|
|
+{
|
|
|
+ struct page *start_page;
|
|
|
+ struct page *end_page;
|
|
|
+
|
|
|
+ /* end_pfn is one past the range we are checking */
|
|
|
+ end_pfn--;
|
|
|
+
|
|
|
+ if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ start_page = pfn_to_page(start_pfn);
|
|
|
+
|
|
|
+ if (page_zone(start_page) != zone)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ end_page = pfn_to_page(end_pfn);
|
|
|
+
|
|
|
+ /* This gives a shorter code than deriving page_zone(end_page) */
|
|
|
+ if (page_zone_id(start_page) != page_zone_id(end_page))
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ return start_page;
|
|
|
+}
|
|
|
+
|
|
|
+void set_zone_contiguous(struct zone *zone)
|
|
|
+{
|
|
|
+ unsigned long block_start_pfn = zone->zone_start_pfn;
|
|
|
+ unsigned long block_end_pfn;
|
|
|
+
|
|
|
+ block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages);
|
|
|
+ for (; block_start_pfn < zone_end_pfn(zone);
|
|
|
+ block_start_pfn = block_end_pfn,
|
|
|
+ block_end_pfn += pageblock_nr_pages) {
|
|
|
+
|
|
|
+ block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
|
|
|
+
|
|
|
+ if (!__pageblock_pfn_to_page(block_start_pfn,
|
|
|
+ block_end_pfn, zone))
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* We confirm that there is no hole */
|
|
|
+ zone->contiguous = true;
|
|
|
+}
|
|
|
+
|
|
|
+void clear_zone_contiguous(struct zone *zone)
|
|
|
+{
|
|
|
+ zone->contiguous = false;
|
|
|
+}
|
|
|
+
|
|
|
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
|
|
|
static void __init deferred_free_range(struct page *page,
|
|
|
unsigned long pfn, int nr_pages)
|
|
@@ -1278,9 +1347,13 @@ free_range:
|
|
|
pgdat_init_report_one_done();
|
|
|
return 0;
|
|
|
}
|
|
|
+#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
|
|
|
|
|
|
void __init page_alloc_init_late(void)
|
|
|
{
|
|
|
+ struct zone *zone;
|
|
|
+
|
|
|
+#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
|
|
|
int nid;
|
|
|
|
|
|
/* There will be num_node_state(N_MEMORY) threads */
|
|
@@ -1294,8 +1367,11 @@ void __init page_alloc_init_late(void)
|
|
|
|
|
|
/* Reinit limits that are based on free pages after the kernel is up */
|
|
|
files_maxfiles_init();
|
|
|
+#endif
|
|
|
+
|
|
|
+ for_each_populated_zone(zone)
|
|
|
+ set_zone_contiguous(zone);
|
|
|
}
|
|
|
-#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
|
|
|
|
|
|
#ifdef CONFIG_CMA
|
|
|
/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
|