|
@@ -1747,16 +1747,38 @@ void __init page_alloc_init_late(void)
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_CMA
|
|
|
+static void __init adjust_present_page_count(struct page *page, long count)
|
|
|
+{
|
|
|
+ struct zone *zone = page_zone(page);
|
|
|
+
|
|
|
+ /* We don't need to hold a lock since it is boot-up process */
|
|
|
+ zone->present_pages += count;
|
|
|
+}
|
|
|
+
|
|
|
/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
|
|
|
void __init init_cma_reserved_pageblock(struct page *page)
|
|
|
{
|
|
|
unsigned i = pageblock_nr_pages;
|
|
|
+ unsigned long pfn = page_to_pfn(page);
|
|
|
struct page *p = page;
|
|
|
+ int nid = page_to_nid(page);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * ZONE_MOVABLE will steal present pages from other zones by
|
|
|
+ * changing page links so page_zone() is changed. Before that,
|
|
|
+ * we need to adjust previous zone's page count first.
|
|
|
+ */
|
|
|
+ adjust_present_page_count(page, -pageblock_nr_pages);
|
|
|
|
|
|
do {
|
|
|
__ClearPageReserved(p);
|
|
|
set_page_count(p, 0);
|
|
|
- } while (++p, --i);
|
|
|
+
|
|
|
+ /* Steal pages from other zones */
|
|
|
+ set_page_links(p, ZONE_MOVABLE, nid, pfn);
|
|
|
+ } while (++p, ++pfn, --i);
|
|
|
+
|
|
|
+ adjust_present_page_count(page, pageblock_nr_pages);
|
|
|
|
|
|
set_pageblock_migratetype(page, MIGRATE_CMA);
|
|
|
|
|
@@ -6208,6 +6230,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat)
|
|
|
{
|
|
|
enum zone_type j;
|
|
|
int nid = pgdat->node_id;
|
|
|
+ unsigned long node_end_pfn = 0;
|
|
|
|
|
|
pgdat_resize_init(pgdat);
|
|
|
#ifdef CONFIG_NUMA_BALANCING
|
|
@@ -6235,9 +6258,13 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat)
|
|
|
struct zone *zone = pgdat->node_zones + j;
|
|
|
unsigned long size, realsize, freesize, memmap_pages;
|
|
|
unsigned long zone_start_pfn = zone->zone_start_pfn;
|
|
|
+ unsigned long movable_size = 0;
|
|
|
|
|
|
size = zone->spanned_pages;
|
|
|
realsize = freesize = zone->present_pages;
|
|
|
+ if (zone_end_pfn(zone) > node_end_pfn)
|
|
|
+ node_end_pfn = zone_end_pfn(zone);
|
|
|
+
|
|
|
|
|
|
/*
|
|
|
* Adjust freesize so that it accounts for how much memory
|
|
@@ -6286,12 +6313,30 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat)
|
|
|
zone_seqlock_init(zone);
|
|
|
zone_pcp_init(zone);
|
|
|
|
|
|
- if (!size)
|
|
|
+ /*
|
|
|
+ * The size of the CMA area is unknown now so we need to
|
|
|
+ * prepare the memory for the usemap at maximum.
|
|
|
+ */
|
|
|
+ if (IS_ENABLED(CONFIG_CMA) && j == ZONE_MOVABLE &&
|
|
|
+ pgdat->node_spanned_pages) {
|
|
|
+ movable_size = node_end_pfn - pgdat->node_start_pfn;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (!size && !movable_size)
|
|
|
continue;
|
|
|
|
|
|
set_pageblock_order();
|
|
|
- setup_usemap(pgdat, zone, zone_start_pfn, size);
|
|
|
- init_currently_empty_zone(zone, zone_start_pfn, size);
|
|
|
+ if (movable_size) {
|
|
|
+ zone->zone_start_pfn = pgdat->node_start_pfn;
|
|
|
+ zone->spanned_pages = movable_size;
|
|
|
+ setup_usemap(pgdat, zone,
|
|
|
+ pgdat->node_start_pfn, movable_size);
|
|
|
+ init_currently_empty_zone(zone,
|
|
|
+ pgdat->node_start_pfn, movable_size);
|
|
|
+ } else {
|
|
|
+ setup_usemap(pgdat, zone, zone_start_pfn, size);
|
|
|
+ init_currently_empty_zone(zone, zone_start_pfn, size);
|
|
|
+ }
|
|
|
memmap_init(size, nid, j, zone_start_pfn);
|
|
|
}
|
|
|
}
|
|
@@ -7932,7 +7977,7 @@ void free_contig_range(unsigned long pfn, unsigned nr_pages)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
-#ifdef CONFIG_MEMORY_HOTPLUG
|
|
|
+#if defined CONFIG_MEMORY_HOTPLUG || defined CONFIG_CMA
|
|
|
/*
|
|
|
* The zone indicated has a new number of managed_pages; batch sizes and percpu
|
|
|
* page high values need to be recalulated.
|