|
|
@@ -764,6 +764,51 @@ static int free_tail_pages_check(struct page *head_page, struct page *page)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static void __meminit __init_single_page(struct page *page, unsigned long pfn,
|
|
|
+ unsigned long zone, int nid)
|
|
|
+{
|
|
|
+ struct zone *z = &NODE_DATA(nid)->node_zones[zone];
|
|
|
+
|
|
|
+ set_page_links(page, zone, nid, pfn);
|
|
|
+ mminit_verify_page_links(page, zone, nid, pfn);
|
|
|
+ init_page_count(page);
|
|
|
+ page_mapcount_reset(page);
|
|
|
+ page_cpupid_reset_last(page);
|
|
|
+ SetPageReserved(page);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Mark the block movable so that blocks are reserved for
|
|
|
+ * movable at startup. This will force kernel allocations
|
|
|
+ * to reserve their blocks rather than leaking throughout
|
|
|
+ * the address space during boot when many long-lived
|
|
|
+ * kernel allocations are made. Later some blocks near
|
|
|
+ * the start are marked MIGRATE_RESERVE by
|
|
|
+ * setup_zone_migrate_reserve()
|
|
|
+ *
|
|
|
+ * bitmap is created for zone's valid pfn range. but memmap
|
|
|
+ * can be created for invalid pages (for alignment)
|
|
|
+ * check here not to call set_pageblock_migratetype() against
|
|
|
+ * pfn out of zone.
|
|
|
+ */
|
|
|
+ if ((z->zone_start_pfn <= pfn)
|
|
|
+ && (pfn < zone_end_pfn(z))
|
|
|
+ && !(pfn & (pageblock_nr_pages - 1)))
|
|
|
+ set_pageblock_migratetype(page, MIGRATE_MOVABLE);
|
|
|
+
|
|
|
+ INIT_LIST_HEAD(&page->lru);
|
|
|
+#ifdef WANT_PAGE_VIRTUAL
|
|
|
+ /* The shift won't overflow because ZONE_NORMAL is below 4G. */
|
|
|
+ if (!is_highmem_idx(zone))
|
|
|
+ set_page_address(page, __va(pfn << PAGE_SHIFT));
|
|
|
+#endif
|
|
|
+}
|
|
|
+
|
|
|
+static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone,
|
|
|
+ int nid)
|
|
|
+{
|
|
|
+ return __init_single_page(pfn_to_page(pfn), pfn, zone, nid);
|
|
|
+}
|
|
|
+
|
|
|
static bool free_pages_prepare(struct page *page, unsigned int order)
|
|
|
{
|
|
|
bool compound = PageCompound(page);
|
|
|
@@ -4212,7 +4257,6 @@ static void setup_zone_migrate_reserve(struct zone *zone)
|
|
|
void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
|
|
|
unsigned long start_pfn, enum memmap_context context)
|
|
|
{
|
|
|
- struct page *page;
|
|
|
unsigned long end_pfn = start_pfn + size;
|
|
|
unsigned long pfn;
|
|
|
struct zone *z;
|
|
|
@@ -4233,38 +4277,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
|
|
|
if (!early_pfn_in_nid(pfn, nid))
|
|
|
continue;
|
|
|
}
|
|
|
- page = pfn_to_page(pfn);
|
|
|
- set_page_links(page, zone, nid, pfn);
|
|
|
- mminit_verify_page_links(page, zone, nid, pfn);
|
|
|
- init_page_count(page);
|
|
|
- page_mapcount_reset(page);
|
|
|
- page_cpupid_reset_last(page);
|
|
|
- SetPageReserved(page);
|
|
|
- /*
|
|
|
- * Mark the block movable so that blocks are reserved for
|
|
|
- * movable at startup. This will force kernel allocations
|
|
|
- * to reserve their blocks rather than leaking throughout
|
|
|
- * the address space during boot when many long-lived
|
|
|
- * kernel allocations are made. Later some blocks near
|
|
|
- * the start are marked MIGRATE_RESERVE by
|
|
|
- * setup_zone_migrate_reserve()
|
|
|
- *
|
|
|
- * bitmap is created for zone's valid pfn range. but memmap
|
|
|
- * can be created for invalid pages (for alignment)
|
|
|
- * check here not to call set_pageblock_migratetype() against
|
|
|
- * pfn out of zone.
|
|
|
- */
|
|
|
- if ((z->zone_start_pfn <= pfn)
|
|
|
- && (pfn < zone_end_pfn(z))
|
|
|
- && !(pfn & (pageblock_nr_pages - 1)))
|
|
|
- set_pageblock_migratetype(page, MIGRATE_MOVABLE);
|
|
|
-
|
|
|
- INIT_LIST_HEAD(&page->lru);
|
|
|
-#ifdef WANT_PAGE_VIRTUAL
|
|
|
- /* The shift won't overflow because ZONE_NORMAL is below 4G. */
|
|
|
- if (!is_highmem_idx(zone))
|
|
|
- set_page_address(page, __va(pfn << PAGE_SHIFT));
|
|
|
-#endif
|
|
|
+ __init_single_pfn(pfn, zone, nid);
|
|
|
}
|
|
|
}
|
|
|
|