|
@@ -5465,12 +5465,23 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
|
|
|
if (highest_memmap_pfn < end_pfn - 1)
|
|
|
highest_memmap_pfn = end_pfn - 1;
|
|
|
|
|
|
+#ifdef CONFIG_ZONE_DEVICE
|
|
|
/*
|
|
|
* Honor reservation requested by the driver for this ZONE_DEVICE
|
|
|
- * memory
|
|
|
+ * memory. We limit the total number of pages to initialize to just
|
|
|
+ * those that might contain the memory mapping. We will defer the
|
|
|
+ * ZONE_DEVICE page initialization until after we have released
|
|
|
+ * the hotplug lock.
|
|
|
*/
|
|
|
- if (altmap && start_pfn == altmap->base_pfn)
|
|
|
- start_pfn += altmap->reserve;
|
|
|
+ if (zone == ZONE_DEVICE) {
|
|
|
+ if (!altmap)
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (start_pfn == altmap->base_pfn)
|
|
|
+ start_pfn += altmap->reserve;
|
|
|
+ end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
|
|
|
+ }
|
|
|
+#endif
|
|
|
|
|
|
for (pfn = start_pfn; pfn < end_pfn; pfn++) {
|
|
|
/*
|
|
@@ -5537,6 +5548,81 @@ not_early:
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_ZONE_DEVICE
|
|
|
+void __ref memmap_init_zone_device(struct zone *zone,
|
|
|
+ unsigned long start_pfn,
|
|
|
+ unsigned long size,
|
|
|
+ struct dev_pagemap *pgmap)
|
|
|
+{
|
|
|
+ unsigned long pfn, end_pfn = start_pfn + size;
|
|
|
+ struct pglist_data *pgdat = zone->zone_pgdat;
|
|
|
+ unsigned long zone_idx = zone_idx(zone);
|
|
|
+ unsigned long start = jiffies;
|
|
|
+ int nid = pgdat->node_id;
|
|
|
+
|
|
|
+ if (WARN_ON_ONCE(!pgmap || !is_dev_zone(zone)))
|
|
|
+ return;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * The call to memmap_init_zone should have already taken care
|
|
|
+ * of the pages reserved for the memmap, so we can just jump to
|
|
|
+ * the end of that region and start processing the device pages.
|
|
|
+ */
|
|
|
+ if (pgmap->altmap_valid) {
|
|
|
+ struct vmem_altmap *altmap = &pgmap->altmap;
|
|
|
+
|
|
|
+ start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
|
|
|
+ size = end_pfn - start_pfn;
|
|
|
+ }
|
|
|
+
|
|
|
+ for (pfn = start_pfn; pfn < end_pfn; pfn++) {
|
|
|
+ struct page *page = pfn_to_page(pfn);
|
|
|
+
|
|
|
+ __init_single_page(page, pfn, zone_idx, nid);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Mark page reserved as it will need to wait for onlining
|
|
|
+ * phase for it to be fully associated with a zone.
|
|
|
+ *
|
|
|
+ * We can use the non-atomic __set_bit operation for setting
|
|
|
+ * the flag as we are still initializing the pages.
|
|
|
+ */
|
|
|
+ __SetPageReserved(page);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * ZONE_DEVICE pages union ->lru with a ->pgmap back
|
|
|
+ * pointer and hmm_data. It is a bug if a ZONE_DEVICE
|
|
|
+ * page is ever freed or placed on a driver-private list.
|
|
|
+ */
|
|
|
+ page->pgmap = pgmap;
|
|
|
+ page->hmm_data = 0;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Mark the block movable so that blocks are reserved for
|
|
|
+ * movable at startup. This will force kernel allocations
|
|
|
+ * to reserve their blocks rather than leaking throughout
|
|
|
+ * the address space during boot when many long-lived
|
|
|
+ * kernel allocations are made.
|
|
|
+ *
|
|
|
+ * bitmap is created for zone's valid pfn range. but memmap
|
|
|
+ * can be created for invalid pages (for alignment)
|
|
|
+ * check here not to call set_pageblock_migratetype() against
|
|
|
+ * pfn out of zone.
|
|
|
+ *
|
|
|
+ * Please note that MEMMAP_HOTPLUG path doesn't clear memmap
|
|
|
+ * because this is done early in sparse_add_one_section
|
|
|
+ */
|
|
|
+ if (!(pfn & (pageblock_nr_pages - 1))) {
|
|
|
+ set_pageblock_migratetype(page, MIGRATE_MOVABLE);
|
|
|
+ cond_resched();
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ pr_info("%s initialised, %lu pages in %ums\n", dev_name(pgmap->dev),
|
|
|
+ size, jiffies_to_msecs(jiffies - start));
|
|
|
+}
|
|
|
+
|
|
|
+#endif
|
|
|
static void __meminit zone_init_free_lists(struct zone *zone)
|
|
|
{
|
|
|
unsigned int order, t;
|