init.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689
  1. /*
  2. * Based on arch/arm/mm/init.c
  3. *
  4. * Copyright (C) 1995-2005 Russell King
  5. * Copyright (C) 2012 ARM Ltd.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include <linux/kernel.h>
  20. #include <linux/export.h>
  21. #include <linux/errno.h>
  22. #include <linux/swap.h>
  23. #include <linux/init.h>
  24. #include <linux/bootmem.h>
  25. #include <linux/cache.h>
  26. #include <linux/mman.h>
  27. #include <linux/nodemask.h>
  28. #include <linux/initrd.h>
  29. #include <linux/gfp.h>
  30. #include <linux/memblock.h>
  31. #include <linux/sort.h>
  32. #include <linux/of.h>
  33. #include <linux/of_fdt.h>
  34. #include <linux/dma-mapping.h>
  35. #include <linux/dma-contiguous.h>
  36. #include <linux/efi.h>
  37. #include <linux/swiotlb.h>
  38. #include <linux/vmalloc.h>
  39. #include <linux/mm.h>
  40. #include <linux/kexec.h>
  41. #include <linux/crash_dump.h>
  42. #include <asm/boot.h>
  43. #include <asm/fixmap.h>
  44. #include <asm/kasan.h>
  45. #include <asm/kernel-pgtable.h>
  46. #include <asm/memory.h>
  47. #include <asm/numa.h>
  48. #include <asm/sections.h>
  49. #include <asm/setup.h>
  50. #include <asm/sizes.h>
  51. #include <asm/tlb.h>
  52. #include <asm/alternative.h>
  53. /*
  54. * We need to be able to catch inadvertent references to memstart_addr
  55. * that occur (potentially in generic code) before arm64_memblock_init()
  56. * executes, which assigns it its actual value. So use a default value
  57. * that cannot be mistaken for a real physical address.
  58. */
  59. s64 memstart_addr __ro_after_init = -1;
  60. phys_addr_t arm64_dma_phys_limit __ro_after_init;
  61. #ifdef CONFIG_BLK_DEV_INITRD
  62. static int __init early_initrd(char *p)
  63. {
  64. unsigned long start, size;
  65. char *endp;
  66. start = memparse(p, &endp);
  67. if (*endp == ',') {
  68. size = memparse(endp + 1, NULL);
  69. initrd_start = start;
  70. initrd_end = start + size;
  71. }
  72. return 0;
  73. }
  74. early_param("initrd", early_initrd);
  75. #endif
  76. #ifdef CONFIG_KEXEC_CORE
  77. /*
  78. * reserve_crashkernel() - reserves memory for crash kernel
  79. *
  80. * This function reserves memory area given in "crashkernel=" kernel command
  81. * line parameter. The memory reserved is used by dump capture kernel when
  82. * primary kernel is crashing.
  83. */
  84. static void __init reserve_crashkernel(void)
  85. {
  86. unsigned long long crash_base, crash_size;
  87. int ret;
  88. ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
  89. &crash_size, &crash_base);
  90. /* no crashkernel= or invalid value specified */
  91. if (ret || !crash_size)
  92. return;
  93. crash_size = PAGE_ALIGN(crash_size);
  94. if (crash_base == 0) {
  95. /* Current arm64 boot protocol requires 2MB alignment */
  96. crash_base = memblock_find_in_range(0, ARCH_LOW_ADDRESS_LIMIT,
  97. crash_size, SZ_2M);
  98. if (crash_base == 0) {
  99. pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
  100. crash_size);
  101. return;
  102. }
  103. } else {
  104. /* User specifies base address explicitly. */
  105. if (!memblock_is_region_memory(crash_base, crash_size)) {
  106. pr_warn("cannot reserve crashkernel: region is not memory\n");
  107. return;
  108. }
  109. if (memblock_is_region_reserved(crash_base, crash_size)) {
  110. pr_warn("cannot reserve crashkernel: region overlaps reserved memory\n");
  111. return;
  112. }
  113. if (!IS_ALIGNED(crash_base, SZ_2M)) {
  114. pr_warn("cannot reserve crashkernel: base address is not 2MB aligned\n");
  115. return;
  116. }
  117. }
  118. memblock_reserve(crash_base, crash_size);
  119. pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
  120. crash_base, crash_base + crash_size, crash_size >> 20);
  121. crashk_res.start = crash_base;
  122. crashk_res.end = crash_base + crash_size - 1;
  123. }
  124. static void __init kexec_reserve_crashkres_pages(void)
  125. {
  126. #ifdef CONFIG_HIBERNATION
  127. phys_addr_t addr;
  128. struct page *page;
  129. if (!crashk_res.end)
  130. return;
  131. /*
  132. * To reduce the size of hibernation image, all the pages are
  133. * marked as Reserved initially.
  134. */
  135. for (addr = crashk_res.start; addr < (crashk_res.end + 1);
  136. addr += PAGE_SIZE) {
  137. page = phys_to_page(addr);
  138. SetPageReserved(page);
  139. }
  140. #endif
  141. }
  142. #else
  143. static void __init reserve_crashkernel(void)
  144. {
  145. }
  146. static void __init kexec_reserve_crashkres_pages(void)
  147. {
  148. }
  149. #endif /* CONFIG_KEXEC_CORE */
  150. #ifdef CONFIG_CRASH_DUMP
  151. static int __init early_init_dt_scan_elfcorehdr(unsigned long node,
  152. const char *uname, int depth, void *data)
  153. {
  154. const __be32 *reg;
  155. int len;
  156. if (depth != 1 || strcmp(uname, "chosen") != 0)
  157. return 0;
  158. reg = of_get_flat_dt_prop(node, "linux,elfcorehdr", &len);
  159. if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells)))
  160. return 1;
  161. elfcorehdr_addr = dt_mem_next_cell(dt_root_addr_cells, &reg);
  162. elfcorehdr_size = dt_mem_next_cell(dt_root_size_cells, &reg);
  163. return 1;
  164. }
  165. /*
  166. * reserve_elfcorehdr() - reserves memory for elf core header
  167. *
  168. * This function reserves the memory occupied by an elf core header
  169. * described in the device tree. This region contains all the
  170. * information about primary kernel's core image and is used by a dump
  171. * capture kernel to access the system memory on primary kernel.
  172. */
  173. static void __init reserve_elfcorehdr(void)
  174. {
  175. of_scan_flat_dt(early_init_dt_scan_elfcorehdr, NULL);
  176. if (!elfcorehdr_size)
  177. return;
  178. if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) {
  179. pr_warn("elfcorehdr is overlapped\n");
  180. return;
  181. }
  182. memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
  183. pr_info("Reserving %lldKB of memory at 0x%llx for elfcorehdr\n",
  184. elfcorehdr_size >> 10, elfcorehdr_addr);
  185. }
  186. #else
  187. static void __init reserve_elfcorehdr(void)
  188. {
  189. }
  190. #endif /* CONFIG_CRASH_DUMP */
  191. /*
  192. * Return the maximum physical address for ZONE_DMA32 (DMA_BIT_MASK(32)). It
  193. * currently assumes that for memory starting above 4G, 32-bit devices will
  194. * use a DMA offset.
  195. */
  196. static phys_addr_t __init max_zone_dma_phys(void)
  197. {
  198. phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
  199. return min(offset + (1ULL << 32), memblock_end_of_DRAM());
  200. }
  201. #ifdef CONFIG_NUMA
  202. static void __init zone_sizes_init(unsigned long min, unsigned long max)
  203. {
  204. unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
  205. if (IS_ENABLED(CONFIG_ZONE_DMA32))
  206. max_zone_pfns[ZONE_DMA32] = PFN_DOWN(max_zone_dma_phys());
  207. max_zone_pfns[ZONE_NORMAL] = max;
  208. free_area_init_nodes(max_zone_pfns);
  209. }
  210. #else
  211. static void __init zone_sizes_init(unsigned long min, unsigned long max)
  212. {
  213. struct memblock_region *reg;
  214. unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
  215. unsigned long max_dma = min;
  216. memset(zone_size, 0, sizeof(zone_size));
  217. /* 4GB maximum for 32-bit only capable devices */
  218. #ifdef CONFIG_ZONE_DMA32
  219. max_dma = PFN_DOWN(arm64_dma_phys_limit);
  220. zone_size[ZONE_DMA32] = max_dma - min;
  221. #endif
  222. zone_size[ZONE_NORMAL] = max - max_dma;
  223. memcpy(zhole_size, zone_size, sizeof(zhole_size));
  224. for_each_memblock(memory, reg) {
  225. unsigned long start = memblock_region_memory_base_pfn(reg);
  226. unsigned long end = memblock_region_memory_end_pfn(reg);
  227. if (start >= max)
  228. continue;
  229. #ifdef CONFIG_ZONE_DMA32
  230. if (start < max_dma) {
  231. unsigned long dma_end = min(end, max_dma);
  232. zhole_size[ZONE_DMA32] -= dma_end - start;
  233. }
  234. #endif
  235. if (end > max_dma) {
  236. unsigned long normal_end = min(end, max);
  237. unsigned long normal_start = max(start, max_dma);
  238. zhole_size[ZONE_NORMAL] -= normal_end - normal_start;
  239. }
  240. }
  241. free_area_init_node(0, zone_size, min, zhole_size);
  242. }
  243. #endif /* CONFIG_NUMA */
  244. #ifdef CONFIG_HAVE_ARCH_PFN_VALID
  245. int pfn_valid(unsigned long pfn)
  246. {
  247. return memblock_is_map_memory(pfn << PAGE_SHIFT);
  248. }
  249. EXPORT_SYMBOL(pfn_valid);
  250. #endif
  251. #ifndef CONFIG_SPARSEMEM
  252. static void __init arm64_memory_present(void)
  253. {
  254. }
  255. #else
  256. static void __init arm64_memory_present(void)
  257. {
  258. struct memblock_region *reg;
  259. for_each_memblock(memory, reg) {
  260. int nid = memblock_get_region_node(reg);
  261. memory_present(nid, memblock_region_memory_base_pfn(reg),
  262. memblock_region_memory_end_pfn(reg));
  263. }
  264. }
  265. #endif
  266. static phys_addr_t memory_limit = PHYS_ADDR_MAX;
  267. /*
  268. * Limit the memory size that was specified via FDT.
  269. */
  270. static int __init early_mem(char *p)
  271. {
  272. if (!p)
  273. return 1;
  274. memory_limit = memparse(p, &p) & PAGE_MASK;
  275. pr_notice("Memory limited to %lldMB\n", memory_limit >> 20);
  276. return 0;
  277. }
  278. early_param("mem", early_mem);
  279. static int __init early_init_dt_scan_usablemem(unsigned long node,
  280. const char *uname, int depth, void *data)
  281. {
  282. struct memblock_region *usablemem = data;
  283. const __be32 *reg;
  284. int len;
  285. if (depth != 1 || strcmp(uname, "chosen") != 0)
  286. return 0;
  287. reg = of_get_flat_dt_prop(node, "linux,usable-memory-range", &len);
  288. if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells)))
  289. return 1;
  290. usablemem->base = dt_mem_next_cell(dt_root_addr_cells, &reg);
  291. usablemem->size = dt_mem_next_cell(dt_root_size_cells, &reg);
  292. return 1;
  293. }
  294. static void __init fdt_enforce_memory_region(void)
  295. {
  296. struct memblock_region reg = {
  297. .size = 0,
  298. };
  299. of_scan_flat_dt(early_init_dt_scan_usablemem, &reg);
  300. if (reg.size)
  301. memblock_cap_memory_range(reg.base, reg.size);
  302. }
  303. void __init arm64_memblock_init(void)
  304. {
  305. const s64 linear_region_size = -(s64)PAGE_OFFSET;
  306. /* Handle linux,usable-memory-range property */
  307. fdt_enforce_memory_region();
  308. /* Remove memory above our supported physical address size */
  309. memblock_remove(1ULL << PHYS_MASK_SHIFT, ULLONG_MAX);
  310. /*
  311. * Ensure that the linear region takes up exactly half of the kernel
  312. * virtual address space. This way, we can distinguish a linear address
  313. * from a kernel/module/vmalloc address by testing a single bit.
  314. */
  315. BUILD_BUG_ON(linear_region_size != BIT(VA_BITS - 1));
  316. /*
  317. * Select a suitable value for the base of physical memory.
  318. */
  319. memstart_addr = round_down(memblock_start_of_DRAM(),
  320. ARM64_MEMSTART_ALIGN);
  321. /*
  322. * Remove the memory that we will not be able to cover with the
  323. * linear mapping. Take care not to clip the kernel which may be
  324. * high in memory.
  325. */
  326. memblock_remove(max_t(u64, memstart_addr + linear_region_size,
  327. __pa_symbol(_end)), ULLONG_MAX);
  328. if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) {
  329. /* ensure that memstart_addr remains sufficiently aligned */
  330. memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size,
  331. ARM64_MEMSTART_ALIGN);
  332. memblock_remove(0, memstart_addr);
  333. }
  334. /*
  335. * Apply the memory limit if it was set. Since the kernel may be loaded
  336. * high up in memory, add back the kernel region that must be accessible
  337. * via the linear mapping.
  338. */
  339. if (memory_limit != PHYS_ADDR_MAX) {
  340. memblock_mem_limit_remove_map(memory_limit);
  341. memblock_add(__pa_symbol(_text), (u64)(_end - _text));
  342. }
  343. if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_start) {
  344. /*
  345. * Add back the memory we just removed if it results in the
  346. * initrd to become inaccessible via the linear mapping.
  347. * Otherwise, this is a no-op
  348. */
  349. u64 base = initrd_start & PAGE_MASK;
  350. u64 size = PAGE_ALIGN(initrd_end) - base;
  351. /*
  352. * We can only add back the initrd memory if we don't end up
  353. * with more memory than we can address via the linear mapping.
  354. * It is up to the bootloader to position the kernel and the
  355. * initrd reasonably close to each other (i.e., within 32 GB of
  356. * each other) so that all granule/#levels combinations can
  357. * always access both.
  358. */
  359. if (WARN(base < memblock_start_of_DRAM() ||
  360. base + size > memblock_start_of_DRAM() +
  361. linear_region_size,
  362. "initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) {
  363. initrd_start = 0;
  364. } else {
  365. memblock_remove(base, size); /* clear MEMBLOCK_ flags */
  366. memblock_add(base, size);
  367. memblock_reserve(base, size);
  368. }
  369. }
  370. if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
  371. extern u16 memstart_offset_seed;
  372. u64 range = linear_region_size -
  373. (memblock_end_of_DRAM() - memblock_start_of_DRAM());
  374. /*
  375. * If the size of the linear region exceeds, by a sufficient
  376. * margin, the size of the region that the available physical
  377. * memory spans, randomize the linear region as well.
  378. */
  379. if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) {
  380. range = range / ARM64_MEMSTART_ALIGN + 1;
  381. memstart_addr -= ARM64_MEMSTART_ALIGN *
  382. ((range * memstart_offset_seed) >> 16);
  383. }
  384. }
  385. /*
  386. * Register the kernel text, kernel data, initrd, and initial
  387. * pagetables with memblock.
  388. */
  389. memblock_reserve(__pa_symbol(_text), _end - _text);
  390. #ifdef CONFIG_BLK_DEV_INITRD
  391. if (initrd_start) {
  392. memblock_reserve(initrd_start, initrd_end - initrd_start);
  393. /* the generic initrd code expects virtual addresses */
  394. initrd_start = __phys_to_virt(initrd_start);
  395. initrd_end = __phys_to_virt(initrd_end);
  396. }
  397. #endif
  398. early_init_fdt_scan_reserved_mem();
  399. /* 4GB maximum for 32-bit only capable devices */
  400. if (IS_ENABLED(CONFIG_ZONE_DMA32))
  401. arm64_dma_phys_limit = max_zone_dma_phys();
  402. else
  403. arm64_dma_phys_limit = PHYS_MASK + 1;
  404. reserve_crashkernel();
  405. reserve_elfcorehdr();
  406. high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
  407. dma_contiguous_reserve(arm64_dma_phys_limit);
  408. memblock_allow_resize();
  409. }
  410. void __init bootmem_init(void)
  411. {
  412. unsigned long min, max;
  413. min = PFN_UP(memblock_start_of_DRAM());
  414. max = PFN_DOWN(memblock_end_of_DRAM());
  415. early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT);
  416. max_pfn = max_low_pfn = max;
  417. arm64_numa_init();
  418. /*
  419. * Sparsemem tries to allocate bootmem in memory_present(), so must be
  420. * done after the fixed reservations.
  421. */
  422. arm64_memory_present();
  423. sparse_init();
  424. zone_sizes_init(min, max);
  425. memblock_dump_all();
  426. }
  427. #ifndef CONFIG_SPARSEMEM_VMEMMAP
  428. static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
  429. {
  430. struct page *start_pg, *end_pg;
  431. unsigned long pg, pgend;
  432. /*
  433. * Convert start_pfn/end_pfn to a struct page pointer.
  434. */
  435. start_pg = pfn_to_page(start_pfn - 1) + 1;
  436. end_pg = pfn_to_page(end_pfn - 1) + 1;
  437. /*
  438. * Convert to physical addresses, and round start upwards and end
  439. * downwards.
  440. */
  441. pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
  442. pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;
  443. /*
  444. * If there are free pages between these, free the section of the
  445. * memmap array.
  446. */
  447. if (pg < pgend)
  448. free_bootmem(pg, pgend - pg);
  449. }
  450. /*
  451. * The mem_map array can get very big. Free the unused area of the memory map.
  452. */
  453. static void __init free_unused_memmap(void)
  454. {
  455. unsigned long start, prev_end = 0;
  456. struct memblock_region *reg;
  457. for_each_memblock(memory, reg) {
  458. start = __phys_to_pfn(reg->base);
  459. #ifdef CONFIG_SPARSEMEM
  460. /*
  461. * Take care not to free memmap entries that don't exist due
  462. * to SPARSEMEM sections which aren't present.
  463. */
  464. start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
  465. #endif
  466. /*
  467. * If we had a previous bank, and there is a space between the
  468. * current bank and the previous, free it.
  469. */
  470. if (prev_end && prev_end < start)
  471. free_memmap(prev_end, start);
  472. /*
  473. * Align up here since the VM subsystem insists that the
  474. * memmap entries are valid from the bank end aligned to
  475. * MAX_ORDER_NR_PAGES.
  476. */
  477. prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size),
  478. MAX_ORDER_NR_PAGES);
  479. }
  480. #ifdef CONFIG_SPARSEMEM
  481. if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
  482. free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
  483. #endif
  484. }
  485. #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
  486. /*
  487. * mem_init() marks the free areas in the mem_map and tells us how much memory
  488. * is free. This is done after various parts of the system have claimed their
  489. * memory after the kernel image.
  490. */
  491. void __init mem_init(void)
  492. {
  493. if (swiotlb_force == SWIOTLB_FORCE ||
  494. max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
  495. swiotlb_init(1);
  496. else
  497. swiotlb_force = SWIOTLB_NO_FORCE;
  498. set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
  499. #ifndef CONFIG_SPARSEMEM_VMEMMAP
  500. free_unused_memmap();
  501. #endif
  502. /* this will put all unused low memory onto the freelists */
  503. free_all_bootmem();
  504. kexec_reserve_crashkres_pages();
  505. mem_init_print_info(NULL);
  506. /*
  507. * Check boundaries twice: Some fundamental inconsistencies can be
  508. * detected at build time already.
  509. */
  510. #ifdef CONFIG_COMPAT
  511. BUILD_BUG_ON(TASK_SIZE_32 > TASK_SIZE_64);
  512. #endif
  513. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  514. /*
  515. * Make sure we chose the upper bound of sizeof(struct page)
  516. * correctly when sizing the VMEMMAP array.
  517. */
  518. BUILD_BUG_ON(sizeof(struct page) > (1 << STRUCT_PAGE_MAX_SHIFT));
  519. #endif
  520. if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
  521. extern int sysctl_overcommit_memory;
  522. /*
  523. * On a machine this small we won't get anywhere without
  524. * overcommit, so turn it on by default.
  525. */
  526. sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
  527. }
  528. }
  529. void free_initmem(void)
  530. {
  531. free_reserved_area(lm_alias(__init_begin),
  532. lm_alias(__init_end),
  533. 0, "unused kernel");
  534. /*
  535. * Unmap the __init region but leave the VM area in place. This
  536. * prevents the region from being reused for kernel modules, which
  537. * is not supported by kallsyms.
  538. */
  539. unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin));
  540. }
  541. #ifdef CONFIG_BLK_DEV_INITRD
  542. static int keep_initrd __initdata;
  543. void __init free_initrd_mem(unsigned long start, unsigned long end)
  544. {
  545. if (!keep_initrd) {
  546. free_reserved_area((void *)start, (void *)end, 0, "initrd");
  547. memblock_free(__virt_to_phys(start), end - start);
  548. }
  549. }
  550. static int __init keepinitrd_setup(char *__unused)
  551. {
  552. keep_initrd = 1;
  553. return 1;
  554. }
  555. __setup("keepinitrd", keepinitrd_setup);
  556. #endif
  557. /*
  558. * Dump out memory limit information on panic.
  559. */
  560. static int dump_mem_limit(struct notifier_block *self, unsigned long v, void *p)
  561. {
  562. if (memory_limit != PHYS_ADDR_MAX) {
  563. pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20);
  564. } else {
  565. pr_emerg("Memory Limit: none\n");
  566. }
  567. return 0;
  568. }
  569. static struct notifier_block mem_limit_notifier = {
  570. .notifier_call = dump_mem_limit,
  571. };
  572. static int __init register_mem_limit_dumper(void)
  573. {
  574. atomic_notifier_chain_register(&panic_notifier_list,
  575. &mem_limit_notifier);
  576. return 0;
  577. }
  578. __initcall(register_mem_limit_dumper);