|
@@ -248,7 +248,6 @@ paging_init (void)
|
|
|
efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
|
|
efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
|
|
|
if (max_gap < LARGE_GAP) {
|
|
if (max_gap < LARGE_GAP) {
|
|
|
vmem_map = (struct page *) 0;
|
|
vmem_map = (struct page *) 0;
|
|
|
- free_area_init_nodes(max_zone_pfns);
|
|
|
|
|
} else {
|
|
} else {
|
|
|
unsigned long map_size;
|
|
unsigned long map_size;
|
|
|
|
|
|
|
@@ -266,13 +265,12 @@ paging_init (void)
|
|
|
*/
|
|
*/
|
|
|
NODE_DATA(0)->node_mem_map = vmem_map +
|
|
NODE_DATA(0)->node_mem_map = vmem_map +
|
|
|
find_min_pfn_with_active_regions();
|
|
find_min_pfn_with_active_regions();
|
|
|
- free_area_init_nodes(max_zone_pfns);
|
|
|
|
|
|
|
|
|
|
printk("Virtual mem_map starts at 0x%p\n", mem_map);
|
|
printk("Virtual mem_map starts at 0x%p\n", mem_map);
|
|
|
}
|
|
}
|
|
|
#else /* !CONFIG_VIRTUAL_MEM_MAP */
|
|
#else /* !CONFIG_VIRTUAL_MEM_MAP */
|
|
|
memblock_add_node(0, PFN_PHYS(max_low_pfn), 0);
|
|
memblock_add_node(0, PFN_PHYS(max_low_pfn), 0);
|
|
|
- free_area_init_nodes(max_zone_pfns);
|
|
|
|
|
#endif /* !CONFIG_VIRTUAL_MEM_MAP */
|
|
#endif /* !CONFIG_VIRTUAL_MEM_MAP */
|
|
|
|
|
+ free_area_init_nodes(max_zone_pfns);
|
|
|
zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
|
|
zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
|
|
|
}
|
|
}
|