|
@@ -4215,7 +4215,6 @@ static noinline __init_refok
|
|
|
int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
|
|
|
{
|
|
|
int i;
|
|
|
- struct pglist_data *pgdat = zone->zone_pgdat;
|
|
|
size_t alloc_size;
|
|
|
|
|
|
/*
|
|
@@ -4231,7 +4230,8 @@ int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
|
|
|
|
|
|
if (!slab_is_available()) {
|
|
|
zone->wait_table = (wait_queue_head_t *)
|
|
|
- alloc_bootmem_node_nopanic(pgdat, alloc_size);
|
|
|
+ memblock_virt_alloc_node_nopanic(
|
|
|
+ alloc_size, zone->zone_pgdat->node_id);
|
|
|
} else {
|
|
|
/*
|
|
|
* This case means that a zone whose size was 0 gets new memory
|
|
@@ -4351,13 +4351,14 @@ bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
|
|
|
#endif
|
|
|
|
|
|
/**
|
|
|
- * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
|
|
|
+ * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range
|
|
|
* @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
|
|
|
- * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
|
|
|
+ * @max_low_pfn: The highest PFN that will be passed to memblock_free_early_nid
|
|
|
*
|
|
|
* If an architecture guarantees that all ranges registered with
|
|
|
* add_active_ranges() contain no holes and may be freed, this
|
|
|
- * this function may be used instead of calling free_bootmem() manually.
|
|
|
+ * this function may be used instead of calling memblock_free_early_nid()
|
|
|
+ * manually.
|
|
|
*/
|
|
|
void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
|
|
|
{
|
|
@@ -4369,9 +4370,9 @@ void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
|
|
|
end_pfn = min(end_pfn, max_low_pfn);
|
|
|
|
|
|
if (start_pfn < end_pfn)
|
|
|
- free_bootmem_node(NODE_DATA(this_nid),
|
|
|
- PFN_PHYS(start_pfn),
|
|
|
- (end_pfn - start_pfn) << PAGE_SHIFT);
|
|
|
+ memblock_free_early_nid(PFN_PHYS(start_pfn),
|
|
|
+ (end_pfn - start_pfn) << PAGE_SHIFT,
|
|
|
+ this_nid);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -4642,8 +4643,9 @@ static void __init setup_usemap(struct pglist_data *pgdat,
|
|
|
unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
|
|
|
zone->pageblock_flags = NULL;
|
|
|
if (usemapsize)
|
|
|
- zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat,
|
|
|
- usemapsize);
|
|
|
+ zone->pageblock_flags =
|
|
|
+ memblock_virt_alloc_node_nopanic(usemapsize,
|
|
|
+ pgdat->node_id);
|
|
|
}
|
|
|
#else
|
|
|
static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
|
|
@@ -4837,7 +4839,8 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
|
|
|
size = (end - start) * sizeof(struct page);
|
|
|
map = alloc_remap(pgdat->node_id, size);
|
|
|
if (!map)
|
|
|
- map = alloc_bootmem_node_nopanic(pgdat, size);
|
|
|
+ map = memblock_virt_alloc_node_nopanic(size,
|
|
|
+ pgdat->node_id);
|
|
|
pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
|
|
|
}
|
|
|
#ifndef CONFIG_NEED_MULTIPLE_NODES
|
|
@@ -5887,7 +5890,7 @@ void *__init alloc_large_system_hash(const char *tablename,
|
|
|
do {
|
|
|
size = bucketsize << log2qty;
|
|
|
if (flags & HASH_EARLY)
|
|
|
- table = alloc_bootmem_nopanic(size);
|
|
|
+ table = memblock_virt_alloc_nopanic(size, 0);
|
|
|
else if (hashdist)
|
|
|
table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
|
|
|
else {
|