|
@@ -241,6 +241,44 @@ static void __init request_standard_resources(void)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static int __init reserve_memblock_reserved_regions(void)
|
|
|
+{
|
|
|
+ phys_addr_t start, end, roundup_end = 0;
|
|
|
+ struct resource *mem, *res;
|
|
|
+ u64 i;
|
|
|
+
|
|
|
+ for_each_reserved_mem_region(i, &start, &end) {
|
|
|
+ if (end <= roundup_end)
|
|
|
+ continue; /* done already */
|
|
|
+
|
|
|
+ start = __pfn_to_phys(PFN_DOWN(start));
|
|
|
+ end = __pfn_to_phys(PFN_UP(end)) - 1;
|
|
|
+ roundup_end = end;
|
|
|
+
|
|
|
+ res = kzalloc(sizeof(*res), GFP_ATOMIC);
|
|
|
+ if (WARN_ON(!res))
|
|
|
+ return -ENOMEM;
|
|
|
+ res->start = start;
|
|
|
+ res->end = end;
|
|
|
+ res->name = "reserved";
|
|
|
+ res->flags = IORESOURCE_MEM;
|
|
|
+
|
|
|
+ mem = request_resource_conflict(&iomem_resource, res);
|
|
|
+ /*
|
|
|
+ * We expected memblock_reserve() regions to conflict with
|
|
|
+ * memory created by request_standard_resources().
|
|
|
+ */
|
|
|
+ if (WARN_ON_ONCE(!mem))
|
|
|
+ continue;
|
|
|
+ kfree(res);
|
|
|
+
|
|
|
+ reserve_region_with_split(mem, start, end, "reserved");
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+arch_initcall(reserve_memblock_reserved_regions);
|
|
|
+
|
|
|
u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
|
|
|
|
|
|
void __init setup_arch(char **cmdline_p)
|