memblock.c 56 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013
  1. /*
  2. * Procedures for maintaining information about logical memory blocks.
  3. *
  4. * Peter Bergner, IBM Corp. June 2001.
  5. * Copyright (C) 2001 Peter Bergner.
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/slab.h>
  14. #include <linux/init.h>
  15. #include <linux/bitops.h>
  16. #include <linux/poison.h>
  17. #include <linux/pfn.h>
  18. #include <linux/debugfs.h>
  19. #include <linux/kmemleak.h>
  20. #include <linux/seq_file.h>
  21. #include <linux/memblock.h>
  22. #include <asm/sections.h>
  23. #include <linux/io.h>
  24. #include "internal.h"
  25. /**
  26. * DOC: memblock overview
  27. *
  28. * Memblock is a method of managing memory regions during the early
  29. * boot period when the usual kernel memory allocators are not up and
  30. * running.
  31. *
  32. * Memblock views the system memory as collections of contiguous
  33. * regions. There are several types of these collections:
  34. *
  35. * * ``memory`` - describes the physical memory available to the
  36. * kernel; this may differ from the actual physical memory installed
  37. * in the system, for instance when the memory is restricted with
  38. * ``mem=`` command line parameter
  39. * * ``reserved`` - describes the regions that were allocated
  40. * * ``physmap`` - describes the actual physical memory regardless of
  41. * the possible restrictions; the ``physmap`` type is only available
  42. * on some architectures.
  43. *
  44. * Each region is represented by :c:type:`struct memblock_region` that
  45. * defines the region extents, its attributes and NUMA node id on NUMA
  46. * systems. Every memory type is described by the :c:type:`struct
  47. * memblock_type` which contains an array of memory regions along with
  48. * the allocator metadata. The memory types are nicely wrapped with
  49. * :c:type:`struct memblock`. This structure is statically initialzed
  50. * at build time. The region arrays for the "memory" and "reserved"
  51. * types are initially sized to %INIT_MEMBLOCK_REGIONS and for the
  52. * "physmap" type to %INIT_PHYSMEM_REGIONS.
  53. * The :c:func:`memblock_allow_resize` enables automatic resizing of
  54. * the region arrays during addition of new regions. This feature
  55. * should be used with care so that memory allocated for the region
  56. * array will not overlap with areas that should be reserved, for
  57. * example initrd.
  58. *
  59. * The early architecture setup should tell memblock what the physical
  60. * memory layout is by using :c:func:`memblock_add` or
  61. * :c:func:`memblock_add_node` functions. The first function does not
  62. * assign the region to a NUMA node and it is appropriate for UMA
  63. * systems. Yet, it is possible to use it on NUMA systems as well and
  64. * assign the region to a NUMA node later in the setup process using
  65. * :c:func:`memblock_set_node`. The :c:func:`memblock_add_node`
  66. * performs such an assignment directly.
  67. *
  68. * Once memblock is setup the memory can be allocated using either
  69. * memblock or bootmem APIs.
  70. *
  71. * As the system boot progresses, the architecture specific
  72. * :c:func:`mem_init` function frees all the memory to the buddy page
  73. * allocator.
  74. *
  75. * If an architecure enables %CONFIG_ARCH_DISCARD_MEMBLOCK, the
  76. * memblock data structures will be discarded after the system
  77. * initialization compltes.
  78. */
  79. #ifndef CONFIG_NEED_MULTIPLE_NODES
  80. struct pglist_data __refdata contig_page_data;
  81. EXPORT_SYMBOL(contig_page_data);
  82. #endif
  83. unsigned long max_low_pfn;
  84. unsigned long min_low_pfn;
  85. unsigned long max_pfn;
  86. unsigned long long max_possible_pfn;
  87. static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
  88. static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
  89. #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
  90. static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock;
  91. #endif
  92. struct memblock memblock __initdata_memblock = {
  93. .memory.regions = memblock_memory_init_regions,
  94. .memory.cnt = 1, /* empty dummy entry */
  95. .memory.max = INIT_MEMBLOCK_REGIONS,
  96. .memory.name = "memory",
  97. .reserved.regions = memblock_reserved_init_regions,
  98. .reserved.cnt = 1, /* empty dummy entry */
  99. .reserved.max = INIT_MEMBLOCK_REGIONS,
  100. .reserved.name = "reserved",
  101. #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
  102. .physmem.regions = memblock_physmem_init_regions,
  103. .physmem.cnt = 1, /* empty dummy entry */
  104. .physmem.max = INIT_PHYSMEM_REGIONS,
  105. .physmem.name = "physmem",
  106. #endif
  107. .bottom_up = false,
  108. .current_limit = MEMBLOCK_ALLOC_ANYWHERE,
  109. };
  110. int memblock_debug __initdata_memblock;
  111. static bool system_has_some_mirror __initdata_memblock = false;
  112. static int memblock_can_resize __initdata_memblock;
  113. static int memblock_memory_in_slab __initdata_memblock = 0;
  114. static int memblock_reserved_in_slab __initdata_memblock = 0;
  115. enum memblock_flags __init_memblock choose_memblock_flags(void)
  116. {
  117. return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
  118. }
  119. /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
  120. static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
  121. {
  122. return *size = min(*size, PHYS_ADDR_MAX - base);
  123. }
  124. /*
  125. * Address comparison utilities
  126. */
  127. static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
  128. phys_addr_t base2, phys_addr_t size2)
  129. {
  130. return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
  131. }
  132. bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
  133. phys_addr_t base, phys_addr_t size)
  134. {
  135. unsigned long i;
  136. for (i = 0; i < type->cnt; i++)
  137. if (memblock_addrs_overlap(base, size, type->regions[i].base,
  138. type->regions[i].size))
  139. break;
  140. return i < type->cnt;
  141. }
  142. /**
  143. * __memblock_find_range_bottom_up - find free area utility in bottom-up
  144. * @start: start of candidate range
  145. * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
  146. * %MEMBLOCK_ALLOC_ACCESSIBLE
  147. * @size: size of free area to find
  148. * @align: alignment of free area to find
  149. * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
  150. * @flags: pick from blocks based on memory attributes
  151. *
  152. * Utility called from memblock_find_in_range_node(), find free area bottom-up.
  153. *
  154. * Return:
  155. * Found address on success, 0 on failure.
  156. */
  157. static phys_addr_t __init_memblock
  158. __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
  159. phys_addr_t size, phys_addr_t align, int nid,
  160. enum memblock_flags flags)
  161. {
  162. phys_addr_t this_start, this_end, cand;
  163. u64 i;
  164. for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) {
  165. this_start = clamp(this_start, start, end);
  166. this_end = clamp(this_end, start, end);
  167. cand = round_up(this_start, align);
  168. if (cand < this_end && this_end - cand >= size)
  169. return cand;
  170. }
  171. return 0;
  172. }
  173. /**
  174. * __memblock_find_range_top_down - find free area utility, in top-down
  175. * @start: start of candidate range
  176. * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
  177. * %MEMBLOCK_ALLOC_ACCESSIBLE
  178. * @size: size of free area to find
  179. * @align: alignment of free area to find
  180. * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
  181. * @flags: pick from blocks based on memory attributes
  182. *
  183. * Utility called from memblock_find_in_range_node(), find free area top-down.
  184. *
  185. * Return:
  186. * Found address on success, 0 on failure.
  187. */
  188. static phys_addr_t __init_memblock
  189. __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
  190. phys_addr_t size, phys_addr_t align, int nid,
  191. enum memblock_flags flags)
  192. {
  193. phys_addr_t this_start, this_end, cand;
  194. u64 i;
  195. for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end,
  196. NULL) {
  197. this_start = clamp(this_start, start, end);
  198. this_end = clamp(this_end, start, end);
  199. if (this_end < size)
  200. continue;
  201. cand = round_down(this_end - size, align);
  202. if (cand >= this_start)
  203. return cand;
  204. }
  205. return 0;
  206. }
  207. /**
  208. * memblock_find_in_range_node - find free area in given range and node
  209. * @size: size of free area to find
  210. * @align: alignment of free area to find
  211. * @start: start of candidate range
  212. * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
  213. * %MEMBLOCK_ALLOC_ACCESSIBLE
  214. * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
  215. * @flags: pick from blocks based on memory attributes
  216. *
  217. * Find @size free area aligned to @align in the specified range and node.
  218. *
  219. * When allocation direction is bottom-up, the @start should be greater
  220. * than the end of the kernel image. Otherwise, it will be trimmed. The
  221. * reason is that we want the bottom-up allocation just near the kernel
  222. * image so it is highly likely that the allocated memory and the kernel
  223. * will reside in the same node.
  224. *
  225. * If bottom-up allocation failed, will try to allocate memory top-down.
  226. *
  227. * Return:
  228. * Found address on success, 0 on failure.
  229. */
  230. phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
  231. phys_addr_t align, phys_addr_t start,
  232. phys_addr_t end, int nid,
  233. enum memblock_flags flags)
  234. {
  235. phys_addr_t kernel_end, ret;
  236. /* pump up @end */
  237. if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
  238. end = memblock.current_limit;
  239. /* avoid allocating the first page */
  240. start = max_t(phys_addr_t, start, PAGE_SIZE);
  241. end = max(start, end);
  242. kernel_end = __pa_symbol(_end);
  243. /*
  244. * try bottom-up allocation only when bottom-up mode
  245. * is set and @end is above the kernel image.
  246. */
  247. if (memblock_bottom_up() && end > kernel_end) {
  248. phys_addr_t bottom_up_start;
  249. /* make sure we will allocate above the kernel */
  250. bottom_up_start = max(start, kernel_end);
  251. /* ok, try bottom-up allocation first */
  252. ret = __memblock_find_range_bottom_up(bottom_up_start, end,
  253. size, align, nid, flags);
  254. if (ret)
  255. return ret;
  256. /*
  257. * we always limit bottom-up allocation above the kernel,
  258. * but top-down allocation doesn't have the limit, so
  259. * retrying top-down allocation may succeed when bottom-up
  260. * allocation failed.
  261. *
  262. * bottom-up allocation is expected to be fail very rarely,
  263. * so we use WARN_ONCE() here to see the stack trace if
  264. * fail happens.
  265. */
  266. WARN_ONCE(IS_ENABLED(CONFIG_MEMORY_HOTREMOVE),
  267. "memblock: bottom-up allocation failed, memory hotremove may be affected\n");
  268. }
  269. return __memblock_find_range_top_down(start, end, size, align, nid,
  270. flags);
  271. }
  272. /**
  273. * memblock_find_in_range - find free area in given range
  274. * @start: start of candidate range
  275. * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
  276. * %MEMBLOCK_ALLOC_ACCESSIBLE
  277. * @size: size of free area to find
  278. * @align: alignment of free area to find
  279. *
  280. * Find @size free area aligned to @align in the specified range.
  281. *
  282. * Return:
  283. * Found address on success, 0 on failure.
  284. */
  285. phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
  286. phys_addr_t end, phys_addr_t size,
  287. phys_addr_t align)
  288. {
  289. phys_addr_t ret;
  290. enum memblock_flags flags = choose_memblock_flags();
  291. again:
  292. ret = memblock_find_in_range_node(size, align, start, end,
  293. NUMA_NO_NODE, flags);
  294. if (!ret && (flags & MEMBLOCK_MIRROR)) {
  295. pr_warn("Could not allocate %pap bytes of mirrored memory\n",
  296. &size);
  297. flags &= ~MEMBLOCK_MIRROR;
  298. goto again;
  299. }
  300. return ret;
  301. }
  302. static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
  303. {
  304. type->total_size -= type->regions[r].size;
  305. memmove(&type->regions[r], &type->regions[r + 1],
  306. (type->cnt - (r + 1)) * sizeof(type->regions[r]));
  307. type->cnt--;
  308. /* Special case for empty arrays */
  309. if (type->cnt == 0) {
  310. WARN_ON(type->total_size != 0);
  311. type->cnt = 1;
  312. type->regions[0].base = 0;
  313. type->regions[0].size = 0;
  314. type->regions[0].flags = 0;
  315. memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
  316. }
  317. }
  318. #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
  319. /**
  320. * memblock_discard - discard memory and reserved arrays if they were allocated
  321. */
  322. void __init memblock_discard(void)
  323. {
  324. phys_addr_t addr, size;
  325. if (memblock.reserved.regions != memblock_reserved_init_regions) {
  326. addr = __pa(memblock.reserved.regions);
  327. size = PAGE_ALIGN(sizeof(struct memblock_region) *
  328. memblock.reserved.max);
  329. __memblock_free_late(addr, size);
  330. }
  331. if (memblock.memory.regions != memblock_memory_init_regions) {
  332. addr = __pa(memblock.memory.regions);
  333. size = PAGE_ALIGN(sizeof(struct memblock_region) *
  334. memblock.memory.max);
  335. __memblock_free_late(addr, size);
  336. }
  337. }
  338. #endif
  339. /**
  340. * memblock_double_array - double the size of the memblock regions array
  341. * @type: memblock type of the regions array being doubled
  342. * @new_area_start: starting address of memory range to avoid overlap with
  343. * @new_area_size: size of memory range to avoid overlap with
  344. *
  345. * Double the size of the @type regions array. If memblock is being used to
  346. * allocate memory for a new reserved regions array and there is a previously
  347. * allocated memory range [@new_area_start, @new_area_start + @new_area_size]
  348. * waiting to be reserved, ensure the memory used by the new array does
  349. * not overlap.
  350. *
  351. * Return:
  352. * 0 on success, -1 on failure.
  353. */
  354. static int __init_memblock memblock_double_array(struct memblock_type *type,
  355. phys_addr_t new_area_start,
  356. phys_addr_t new_area_size)
  357. {
  358. struct memblock_region *new_array, *old_array;
  359. phys_addr_t old_alloc_size, new_alloc_size;
  360. phys_addr_t old_size, new_size, addr, new_end;
  361. int use_slab = slab_is_available();
  362. int *in_slab;
  363. /* We don't allow resizing until we know about the reserved regions
  364. * of memory that aren't suitable for allocation
  365. */
  366. if (!memblock_can_resize)
  367. return -1;
  368. /* Calculate new doubled size */
  369. old_size = type->max * sizeof(struct memblock_region);
  370. new_size = old_size << 1;
  371. /*
  372. * We need to allocated new one align to PAGE_SIZE,
  373. * so we can free them completely later.
  374. */
  375. old_alloc_size = PAGE_ALIGN(old_size);
  376. new_alloc_size = PAGE_ALIGN(new_size);
  377. /* Retrieve the slab flag */
  378. if (type == &memblock.memory)
  379. in_slab = &memblock_memory_in_slab;
  380. else
  381. in_slab = &memblock_reserved_in_slab;
  382. /* Try to find some space for it.
  383. *
  384. * WARNING: We assume that either slab_is_available() and we use it or
  385. * we use MEMBLOCK for allocations. That means that this is unsafe to
  386. * use when bootmem is currently active (unless bootmem itself is
  387. * implemented on top of MEMBLOCK which isn't the case yet)
  388. *
  389. * This should however not be an issue for now, as we currently only
  390. * call into MEMBLOCK while it's still active, or much later when slab
  391. * is active for memory hotplug operations
  392. */
  393. if (use_slab) {
  394. new_array = kmalloc(new_size, GFP_KERNEL);
  395. addr = new_array ? __pa(new_array) : 0;
  396. } else {
  397. /* only exclude range when trying to double reserved.regions */
  398. if (type != &memblock.reserved)
  399. new_area_start = new_area_size = 0;
  400. addr = memblock_find_in_range(new_area_start + new_area_size,
  401. memblock.current_limit,
  402. new_alloc_size, PAGE_SIZE);
  403. if (!addr && new_area_size)
  404. addr = memblock_find_in_range(0,
  405. min(new_area_start, memblock.current_limit),
  406. new_alloc_size, PAGE_SIZE);
  407. new_array = addr ? __va(addr) : NULL;
  408. }
  409. if (!addr) {
  410. pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
  411. type->name, type->max, type->max * 2);
  412. return -1;
  413. }
  414. new_end = addr + new_size - 1;
  415. memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]",
  416. type->name, type->max * 2, &addr, &new_end);
  417. /*
  418. * Found space, we now need to move the array over before we add the
  419. * reserved region since it may be our reserved array itself that is
  420. * full.
  421. */
  422. memcpy(new_array, type->regions, old_size);
  423. memset(new_array + type->max, 0, old_size);
  424. old_array = type->regions;
  425. type->regions = new_array;
  426. type->max <<= 1;
  427. /* Free old array. We needn't free it if the array is the static one */
  428. if (*in_slab)
  429. kfree(old_array);
  430. else if (old_array != memblock_memory_init_regions &&
  431. old_array != memblock_reserved_init_regions)
  432. memblock_free(__pa(old_array), old_alloc_size);
  433. /*
  434. * Reserve the new array if that comes from the memblock. Otherwise, we
  435. * needn't do it
  436. */
  437. if (!use_slab)
  438. BUG_ON(memblock_reserve(addr, new_alloc_size));
  439. /* Update slab flag */
  440. *in_slab = use_slab;
  441. return 0;
  442. }
  443. /**
  444. * memblock_merge_regions - merge neighboring compatible regions
  445. * @type: memblock type to scan
  446. *
  447. * Scan @type and merge neighboring compatible regions.
  448. */
  449. static void __init_memblock memblock_merge_regions(struct memblock_type *type)
  450. {
  451. int i = 0;
  452. /* cnt never goes below 1 */
  453. while (i < type->cnt - 1) {
  454. struct memblock_region *this = &type->regions[i];
  455. struct memblock_region *next = &type->regions[i + 1];
  456. if (this->base + this->size != next->base ||
  457. memblock_get_region_node(this) !=
  458. memblock_get_region_node(next) ||
  459. this->flags != next->flags) {
  460. BUG_ON(this->base + this->size > next->base);
  461. i++;
  462. continue;
  463. }
  464. this->size += next->size;
  465. /* move forward from next + 1, index of which is i + 2 */
  466. memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
  467. type->cnt--;
  468. }
  469. }
  470. /**
  471. * memblock_insert_region - insert new memblock region
  472. * @type: memblock type to insert into
  473. * @idx: index for the insertion point
  474. * @base: base address of the new region
  475. * @size: size of the new region
  476. * @nid: node id of the new region
  477. * @flags: flags of the new region
  478. *
  479. * Insert new memblock region [@base, @base + @size) into @type at @idx.
  480. * @type must already have extra room to accommodate the new region.
  481. */
  482. static void __init_memblock memblock_insert_region(struct memblock_type *type,
  483. int idx, phys_addr_t base,
  484. phys_addr_t size,
  485. int nid,
  486. enum memblock_flags flags)
  487. {
  488. struct memblock_region *rgn = &type->regions[idx];
  489. BUG_ON(type->cnt >= type->max);
  490. memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
  491. rgn->base = base;
  492. rgn->size = size;
  493. rgn->flags = flags;
  494. memblock_set_region_node(rgn, nid);
  495. type->cnt++;
  496. type->total_size += size;
  497. }
  498. /**
  499. * memblock_add_range - add new memblock region
  500. * @type: memblock type to add new region into
  501. * @base: base address of the new region
  502. * @size: size of the new region
  503. * @nid: nid of the new region
  504. * @flags: flags of the new region
  505. *
  506. * Add new memblock region [@base, @base + @size) into @type. The new region
  507. * is allowed to overlap with existing ones - overlaps don't affect already
  508. * existing regions. @type is guaranteed to be minimal (all neighbouring
  509. * compatible regions are merged) after the addition.
  510. *
  511. * Return:
  512. * 0 on success, -errno on failure.
  513. */
  514. int __init_memblock memblock_add_range(struct memblock_type *type,
  515. phys_addr_t base, phys_addr_t size,
  516. int nid, enum memblock_flags flags)
  517. {
  518. bool insert = false;
  519. phys_addr_t obase = base;
  520. phys_addr_t end = base + memblock_cap_size(base, &size);
  521. int idx, nr_new;
  522. struct memblock_region *rgn;
  523. if (!size)
  524. return 0;
  525. /* special case for empty array */
  526. if (type->regions[0].size == 0) {
  527. WARN_ON(type->cnt != 1 || type->total_size);
  528. type->regions[0].base = base;
  529. type->regions[0].size = size;
  530. type->regions[0].flags = flags;
  531. memblock_set_region_node(&type->regions[0], nid);
  532. type->total_size = size;
  533. return 0;
  534. }
  535. repeat:
  536. /*
  537. * The following is executed twice. Once with %false @insert and
  538. * then with %true. The first counts the number of regions needed
  539. * to accommodate the new area. The second actually inserts them.
  540. */
  541. base = obase;
  542. nr_new = 0;
  543. for_each_memblock_type(idx, type, rgn) {
  544. phys_addr_t rbase = rgn->base;
  545. phys_addr_t rend = rbase + rgn->size;
  546. if (rbase >= end)
  547. break;
  548. if (rend <= base)
  549. continue;
  550. /*
  551. * @rgn overlaps. If it separates the lower part of new
  552. * area, insert that portion.
  553. */
  554. if (rbase > base) {
  555. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  556. WARN_ON(nid != memblock_get_region_node(rgn));
  557. #endif
  558. WARN_ON(flags != rgn->flags);
  559. nr_new++;
  560. if (insert)
  561. memblock_insert_region(type, idx++, base,
  562. rbase - base, nid,
  563. flags);
  564. }
  565. /* area below @rend is dealt with, forget about it */
  566. base = min(rend, end);
  567. }
  568. /* insert the remaining portion */
  569. if (base < end) {
  570. nr_new++;
  571. if (insert)
  572. memblock_insert_region(type, idx, base, end - base,
  573. nid, flags);
  574. }
  575. if (!nr_new)
  576. return 0;
  577. /*
  578. * If this was the first round, resize array and repeat for actual
  579. * insertions; otherwise, merge and return.
  580. */
  581. if (!insert) {
  582. while (type->cnt + nr_new > type->max)
  583. if (memblock_double_array(type, obase, size) < 0)
  584. return -ENOMEM;
  585. insert = true;
  586. goto repeat;
  587. } else {
  588. memblock_merge_regions(type);
  589. return 0;
  590. }
  591. }
  592. /**
  593. * memblock_add_node - add new memblock region within a NUMA node
  594. * @base: base address of the new region
  595. * @size: size of the new region
  596. * @nid: nid of the new region
  597. *
  598. * Add new memblock region [@base, @base + @size) to the "memory"
  599. * type. See memblock_add_range() description for mode details
  600. *
  601. * Return:
  602. * 0 on success, -errno on failure.
  603. */
  604. int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
  605. int nid)
  606. {
  607. return memblock_add_range(&memblock.memory, base, size, nid, 0);
  608. }
  609. /**
  610. * memblock_add - add new memblock region
  611. * @base: base address of the new region
  612. * @size: size of the new region
  613. *
  614. * Add new memblock region [@base, @base + @size) to the "memory"
  615. * type. See memblock_add_range() description for mode details
  616. *
  617. * Return:
  618. * 0 on success, -errno on failure.
  619. */
  620. int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
  621. {
  622. phys_addr_t end = base + size - 1;
  623. memblock_dbg("memblock_add: [%pa-%pa] %pF\n",
  624. &base, &end, (void *)_RET_IP_);
  625. return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0);
  626. }
  627. /**
  628. * memblock_isolate_range - isolate given range into disjoint memblocks
  629. * @type: memblock type to isolate range for
  630. * @base: base of range to isolate
  631. * @size: size of range to isolate
  632. * @start_rgn: out parameter for the start of isolated region
  633. * @end_rgn: out parameter for the end of isolated region
  634. *
  635. * Walk @type and ensure that regions don't cross the boundaries defined by
  636. * [@base, @base + @size). Crossing regions are split at the boundaries,
  637. * which may create at most two more regions. The index of the first
  638. * region inside the range is returned in *@start_rgn and end in *@end_rgn.
  639. *
  640. * Return:
  641. * 0 on success, -errno on failure.
  642. */
  643. static int __init_memblock memblock_isolate_range(struct memblock_type *type,
  644. phys_addr_t base, phys_addr_t size,
  645. int *start_rgn, int *end_rgn)
  646. {
  647. phys_addr_t end = base + memblock_cap_size(base, &size);
  648. int idx;
  649. struct memblock_region *rgn;
  650. *start_rgn = *end_rgn = 0;
  651. if (!size)
  652. return 0;
  653. /* we'll create at most two more regions */
  654. while (type->cnt + 2 > type->max)
  655. if (memblock_double_array(type, base, size) < 0)
  656. return -ENOMEM;
  657. for_each_memblock_type(idx, type, rgn) {
  658. phys_addr_t rbase = rgn->base;
  659. phys_addr_t rend = rbase + rgn->size;
  660. if (rbase >= end)
  661. break;
  662. if (rend <= base)
  663. continue;
  664. if (rbase < base) {
  665. /*
  666. * @rgn intersects from below. Split and continue
  667. * to process the next region - the new top half.
  668. */
  669. rgn->base = base;
  670. rgn->size -= base - rbase;
  671. type->total_size -= base - rbase;
  672. memblock_insert_region(type, idx, rbase, base - rbase,
  673. memblock_get_region_node(rgn),
  674. rgn->flags);
  675. } else if (rend > end) {
  676. /*
  677. * @rgn intersects from above. Split and redo the
  678. * current region - the new bottom half.
  679. */
  680. rgn->base = end;
  681. rgn->size -= end - rbase;
  682. type->total_size -= end - rbase;
  683. memblock_insert_region(type, idx--, rbase, end - rbase,
  684. memblock_get_region_node(rgn),
  685. rgn->flags);
  686. } else {
  687. /* @rgn is fully contained, record it */
  688. if (!*end_rgn)
  689. *start_rgn = idx;
  690. *end_rgn = idx + 1;
  691. }
  692. }
  693. return 0;
  694. }
  695. static int __init_memblock memblock_remove_range(struct memblock_type *type,
  696. phys_addr_t base, phys_addr_t size)
  697. {
  698. int start_rgn, end_rgn;
  699. int i, ret;
  700. ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
  701. if (ret)
  702. return ret;
  703. for (i = end_rgn - 1; i >= start_rgn; i--)
  704. memblock_remove_region(type, i);
  705. return 0;
  706. }
  707. int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
  708. {
  709. phys_addr_t end = base + size - 1;
  710. memblock_dbg("memblock_remove: [%pa-%pa] %pS\n",
  711. &base, &end, (void *)_RET_IP_);
  712. return memblock_remove_range(&memblock.memory, base, size);
  713. }
  714. int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
  715. {
  716. phys_addr_t end = base + size - 1;
  717. memblock_dbg(" memblock_free: [%pa-%pa] %pF\n",
  718. &base, &end, (void *)_RET_IP_);
  719. kmemleak_free_part_phys(base, size);
  720. return memblock_remove_range(&memblock.reserved, base, size);
  721. }
  722. int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
  723. {
  724. phys_addr_t end = base + size - 1;
  725. memblock_dbg("memblock_reserve: [%pa-%pa] %pF\n",
  726. &base, &end, (void *)_RET_IP_);
  727. return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0);
  728. }
  729. /**
  730. * memblock_setclr_flag - set or clear flag for a memory region
  731. * @base: base address of the region
  732. * @size: size of the region
  733. * @set: set or clear the flag
  734. * @flag: the flag to udpate
  735. *
  736. * This function isolates region [@base, @base + @size), and sets/clears flag
  737. *
  738. * Return: 0 on success, -errno on failure.
  739. */
  740. static int __init_memblock memblock_setclr_flag(phys_addr_t base,
  741. phys_addr_t size, int set, int flag)
  742. {
  743. struct memblock_type *type = &memblock.memory;
  744. int i, ret, start_rgn, end_rgn;
  745. ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
  746. if (ret)
  747. return ret;
  748. for (i = start_rgn; i < end_rgn; i++)
  749. if (set)
  750. memblock_set_region_flags(&type->regions[i], flag);
  751. else
  752. memblock_clear_region_flags(&type->regions[i], flag);
  753. memblock_merge_regions(type);
  754. return 0;
  755. }
  756. /**
  757. * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
  758. * @base: the base phys addr of the region
  759. * @size: the size of the region
  760. *
  761. * Return: 0 on success, -errno on failure.
  762. */
  763. int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
  764. {
  765. return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG);
  766. }
  767. /**
  768. * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
  769. * @base: the base phys addr of the region
  770. * @size: the size of the region
  771. *
  772. * Return: 0 on success, -errno on failure.
  773. */
  774. int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
  775. {
  776. return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG);
  777. }
  778. /**
  779. * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR.
  780. * @base: the base phys addr of the region
  781. * @size: the size of the region
  782. *
  783. * Return: 0 on success, -errno on failure.
  784. */
  785. int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
  786. {
  787. system_has_some_mirror = true;
  788. return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR);
  789. }
  790. /**
  791. * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP.
  792. * @base: the base phys addr of the region
  793. * @size: the size of the region
  794. *
  795. * Return: 0 on success, -errno on failure.
  796. */
  797. int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
  798. {
  799. return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP);
  800. }
  801. /**
  802. * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region.
  803. * @base: the base phys addr of the region
  804. * @size: the size of the region
  805. *
  806. * Return: 0 on success, -errno on failure.
  807. */
  808. int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size)
  809. {
  810. return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP);
  811. }
  812. /**
  813. * __next_reserved_mem_region - next function for for_each_reserved_region()
  814. * @idx: pointer to u64 loop variable
  815. * @out_start: ptr to phys_addr_t for start address of the region, can be %NULL
  816. * @out_end: ptr to phys_addr_t for end address of the region, can be %NULL
  817. *
  818. * Iterate over all reserved memory regions.
  819. */
  820. void __init_memblock __next_reserved_mem_region(u64 *idx,
  821. phys_addr_t *out_start,
  822. phys_addr_t *out_end)
  823. {
  824. struct memblock_type *type = &memblock.reserved;
  825. if (*idx < type->cnt) {
  826. struct memblock_region *r = &type->regions[*idx];
  827. phys_addr_t base = r->base;
  828. phys_addr_t size = r->size;
  829. if (out_start)
  830. *out_start = base;
  831. if (out_end)
  832. *out_end = base + size - 1;
  833. *idx += 1;
  834. return;
  835. }
  836. /* signal end of iteration */
  837. *idx = ULLONG_MAX;
  838. }
  839. /**
  840. * __next__mem_range - next function for for_each_free_mem_range() etc.
  841. * @idx: pointer to u64 loop variable
  842. * @nid: node selector, %NUMA_NO_NODE for all nodes
  843. * @flags: pick from blocks based on memory attributes
  844. * @type_a: pointer to memblock_type from where the range is taken
  845. * @type_b: pointer to memblock_type which excludes memory from being taken
  846. * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
  847. * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
  848. * @out_nid: ptr to int for nid of the range, can be %NULL
  849. *
  850. * Find the first area from *@idx which matches @nid, fill the out
  851. * parameters, and update *@idx for the next iteration. The lower 32bit of
  852. * *@idx contains index into type_a and the upper 32bit indexes the
  853. * areas before each region in type_b. For example, if type_b regions
  854. * look like the following,
  855. *
  856. * 0:[0-16), 1:[32-48), 2:[128-130)
  857. *
  858. * The upper 32bit indexes the following regions.
  859. *
  860. * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
  861. *
  862. * As both region arrays are sorted, the function advances the two indices
  863. * in lockstep and returns each intersection.
  864. */
  865. void __init_memblock __next_mem_range(u64 *idx, int nid,
  866. enum memblock_flags flags,
  867. struct memblock_type *type_a,
  868. struct memblock_type *type_b,
  869. phys_addr_t *out_start,
  870. phys_addr_t *out_end, int *out_nid)
  871. {
  872. int idx_a = *idx & 0xffffffff;
  873. int idx_b = *idx >> 32;
  874. if (WARN_ONCE(nid == MAX_NUMNODES,
  875. "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
  876. nid = NUMA_NO_NODE;
  877. for (; idx_a < type_a->cnt; idx_a++) {
  878. struct memblock_region *m = &type_a->regions[idx_a];
  879. phys_addr_t m_start = m->base;
  880. phys_addr_t m_end = m->base + m->size;
  881. int m_nid = memblock_get_region_node(m);
  882. /* only memory regions are associated with nodes, check it */
  883. if (nid != NUMA_NO_NODE && nid != m_nid)
  884. continue;
  885. /* skip hotpluggable memory regions if needed */
  886. if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
  887. continue;
  888. /* if we want mirror memory skip non-mirror memory regions */
  889. if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
  890. continue;
  891. /* skip nomap memory unless we were asked for it explicitly */
  892. if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
  893. continue;
  894. if (!type_b) {
  895. if (out_start)
  896. *out_start = m_start;
  897. if (out_end)
  898. *out_end = m_end;
  899. if (out_nid)
  900. *out_nid = m_nid;
  901. idx_a++;
  902. *idx = (u32)idx_a | (u64)idx_b << 32;
  903. return;
  904. }
  905. /* scan areas before each reservation */
  906. for (; idx_b < type_b->cnt + 1; idx_b++) {
  907. struct memblock_region *r;
  908. phys_addr_t r_start;
  909. phys_addr_t r_end;
  910. r = &type_b->regions[idx_b];
  911. r_start = idx_b ? r[-1].base + r[-1].size : 0;
  912. r_end = idx_b < type_b->cnt ?
  913. r->base : PHYS_ADDR_MAX;
  914. /*
  915. * if idx_b advanced past idx_a,
  916. * break out to advance idx_a
  917. */
  918. if (r_start >= m_end)
  919. break;
  920. /* if the two regions intersect, we're done */
  921. if (m_start < r_end) {
  922. if (out_start)
  923. *out_start =
  924. max(m_start, r_start);
  925. if (out_end)
  926. *out_end = min(m_end, r_end);
  927. if (out_nid)
  928. *out_nid = m_nid;
  929. /*
  930. * The region which ends first is
  931. * advanced for the next iteration.
  932. */
  933. if (m_end <= r_end)
  934. idx_a++;
  935. else
  936. idx_b++;
  937. *idx = (u32)idx_a | (u64)idx_b << 32;
  938. return;
  939. }
  940. }
  941. }
  942. /* signal end of iteration */
  943. *idx = ULLONG_MAX;
  944. }
  945. /**
  946. * __next_mem_range_rev - generic next function for for_each_*_range_rev()
  947. *
  948. * @idx: pointer to u64 loop variable
  949. * @nid: node selector, %NUMA_NO_NODE for all nodes
  950. * @flags: pick from blocks based on memory attributes
  951. * @type_a: pointer to memblock_type from where the range is taken
  952. * @type_b: pointer to memblock_type which excludes memory from being taken
  953. * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
  954. * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
  955. * @out_nid: ptr to int for nid of the range, can be %NULL
  956. *
  957. * Finds the next range from type_a which is not marked as unsuitable
  958. * in type_b.
  959. *
  960. * Reverse of __next_mem_range().
  961. */
  962. void __init_memblock __next_mem_range_rev(u64 *idx, int nid,
  963. enum memblock_flags flags,
  964. struct memblock_type *type_a,
  965. struct memblock_type *type_b,
  966. phys_addr_t *out_start,
  967. phys_addr_t *out_end, int *out_nid)
  968. {
  969. int idx_a = *idx & 0xffffffff;
  970. int idx_b = *idx >> 32;
  971. if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
  972. nid = NUMA_NO_NODE;
  973. if (*idx == (u64)ULLONG_MAX) {
  974. idx_a = type_a->cnt - 1;
  975. if (type_b != NULL)
  976. idx_b = type_b->cnt;
  977. else
  978. idx_b = 0;
  979. }
  980. for (; idx_a >= 0; idx_a--) {
  981. struct memblock_region *m = &type_a->regions[idx_a];
  982. phys_addr_t m_start = m->base;
  983. phys_addr_t m_end = m->base + m->size;
  984. int m_nid = memblock_get_region_node(m);
  985. /* only memory regions are associated with nodes, check it */
  986. if (nid != NUMA_NO_NODE && nid != m_nid)
  987. continue;
  988. /* skip hotpluggable memory regions if needed */
  989. if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
  990. continue;
  991. /* if we want mirror memory skip non-mirror memory regions */
  992. if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
  993. continue;
  994. /* skip nomap memory unless we were asked for it explicitly */
  995. if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
  996. continue;
  997. if (!type_b) {
  998. if (out_start)
  999. *out_start = m_start;
  1000. if (out_end)
  1001. *out_end = m_end;
  1002. if (out_nid)
  1003. *out_nid = m_nid;
  1004. idx_a--;
  1005. *idx = (u32)idx_a | (u64)idx_b << 32;
  1006. return;
  1007. }
  1008. /* scan areas before each reservation */
  1009. for (; idx_b >= 0; idx_b--) {
  1010. struct memblock_region *r;
  1011. phys_addr_t r_start;
  1012. phys_addr_t r_end;
  1013. r = &type_b->regions[idx_b];
  1014. r_start = idx_b ? r[-1].base + r[-1].size : 0;
  1015. r_end = idx_b < type_b->cnt ?
  1016. r->base : PHYS_ADDR_MAX;
  1017. /*
  1018. * if idx_b advanced past idx_a,
  1019. * break out to advance idx_a
  1020. */
  1021. if (r_end <= m_start)
  1022. break;
  1023. /* if the two regions intersect, we're done */
  1024. if (m_end > r_start) {
  1025. if (out_start)
  1026. *out_start = max(m_start, r_start);
  1027. if (out_end)
  1028. *out_end = min(m_end, r_end);
  1029. if (out_nid)
  1030. *out_nid = m_nid;
  1031. if (m_start >= r_start)
  1032. idx_a--;
  1033. else
  1034. idx_b--;
  1035. *idx = (u32)idx_a | (u64)idx_b << 32;
  1036. return;
  1037. }
  1038. }
  1039. }
  1040. /* signal end of iteration */
  1041. *idx = ULLONG_MAX;
  1042. }
  1043. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  1044. /*
  1045. * Common iterator interface used to define for_each_mem_range().
  1046. */
  1047. void __init_memblock __next_mem_pfn_range(int *idx, int nid,
  1048. unsigned long *out_start_pfn,
  1049. unsigned long *out_end_pfn, int *out_nid)
  1050. {
  1051. struct memblock_type *type = &memblock.memory;
  1052. struct memblock_region *r;
  1053. while (++*idx < type->cnt) {
  1054. r = &type->regions[*idx];
  1055. if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
  1056. continue;
  1057. if (nid == MAX_NUMNODES || nid == r->nid)
  1058. break;
  1059. }
  1060. if (*idx >= type->cnt) {
  1061. *idx = -1;
  1062. return;
  1063. }
  1064. if (out_start_pfn)
  1065. *out_start_pfn = PFN_UP(r->base);
  1066. if (out_end_pfn)
  1067. *out_end_pfn = PFN_DOWN(r->base + r->size);
  1068. if (out_nid)
  1069. *out_nid = r->nid;
  1070. }
  1071. /**
  1072. * memblock_set_node - set node ID on memblock regions
  1073. * @base: base of area to set node ID for
  1074. * @size: size of area to set node ID for
  1075. * @type: memblock type to set node ID for
  1076. * @nid: node ID to set
  1077. *
  1078. * Set the nid of memblock @type regions in [@base, @base + @size) to @nid.
  1079. * Regions which cross the area boundaries are split as necessary.
  1080. *
  1081. * Return:
  1082. * 0 on success, -errno on failure.
  1083. */
  1084. int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
  1085. struct memblock_type *type, int nid)
  1086. {
  1087. int start_rgn, end_rgn;
  1088. int i, ret;
  1089. ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
  1090. if (ret)
  1091. return ret;
  1092. for (i = start_rgn; i < end_rgn; i++)
  1093. memblock_set_region_node(&type->regions[i], nid);
  1094. memblock_merge_regions(type);
  1095. return 0;
  1096. }
  1097. #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  1098. static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
  1099. phys_addr_t align, phys_addr_t start,
  1100. phys_addr_t end, int nid,
  1101. enum memblock_flags flags)
  1102. {
  1103. phys_addr_t found;
  1104. found = memblock_find_in_range_node(size, align, start, end, nid,
  1105. flags);
  1106. if (found && !memblock_reserve(found, size)) {
  1107. /*
  1108. * The min_count is set to 0 so that memblock allocations are
  1109. * never reported as leaks.
  1110. */
  1111. kmemleak_alloc_phys(found, size, 0, 0);
  1112. return found;
  1113. }
  1114. return 0;
  1115. }
  1116. phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
  1117. phys_addr_t start, phys_addr_t end,
  1118. enum memblock_flags flags)
  1119. {
  1120. return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
  1121. flags);
  1122. }
  1123. phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
  1124. phys_addr_t align, phys_addr_t max_addr,
  1125. int nid, enum memblock_flags flags)
  1126. {
  1127. return memblock_alloc_range_nid(size, align, 0, max_addr, nid, flags);
  1128. }
  1129. phys_addr_t __init memblock_phys_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
  1130. {
  1131. enum memblock_flags flags = choose_memblock_flags();
  1132. phys_addr_t ret;
  1133. again:
  1134. ret = memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE,
  1135. nid, flags);
  1136. if (!ret && (flags & MEMBLOCK_MIRROR)) {
  1137. flags &= ~MEMBLOCK_MIRROR;
  1138. goto again;
  1139. }
  1140. return ret;
  1141. }
  1142. phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
  1143. {
  1144. return memblock_alloc_base_nid(size, align, max_addr, NUMA_NO_NODE,
  1145. MEMBLOCK_NONE);
  1146. }
  1147. phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
  1148. {
  1149. phys_addr_t alloc;
  1150. alloc = __memblock_alloc_base(size, align, max_addr);
  1151. if (alloc == 0)
  1152. panic("ERROR: Failed to allocate %pa bytes below %pa.\n",
  1153. &size, &max_addr);
  1154. return alloc;
  1155. }
  1156. phys_addr_t __init memblock_phys_alloc(phys_addr_t size, phys_addr_t align)
  1157. {
  1158. return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
  1159. }
  1160. phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
  1161. {
  1162. phys_addr_t res = memblock_phys_alloc_nid(size, align, nid);
  1163. if (res)
  1164. return res;
  1165. return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
  1166. }
  1167. /**
  1168. * memblock_alloc_internal - allocate boot memory block
  1169. * @size: size of memory block to be allocated in bytes
  1170. * @align: alignment of the region and block's size
  1171. * @min_addr: the lower bound of the memory region to allocate (phys address)
  1172. * @max_addr: the upper bound of the memory region to allocate (phys address)
  1173. * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
  1174. *
  1175. * The @min_addr limit is dropped if it can not be satisfied and the allocation
  1176. * will fall back to memory below @min_addr. Also, allocation may fall back
  1177. * to any node in the system if the specified node can not
  1178. * hold the requested memory.
  1179. *
  1180. * The allocation is performed from memory region limited by
  1181. * memblock.current_limit if @max_addr == %MEMBLOCK_ALLOC_ACCESSIBLE.
  1182. *
  1183. * The phys address of allocated boot memory block is converted to virtual and
  1184. * allocated memory is reset to 0.
  1185. *
  1186. * In addition, function sets the min_count to 0 using kmemleak_alloc for
  1187. * allocated boot memory block, so that it is never reported as leaks.
  1188. *
  1189. * Return:
  1190. * Virtual address of allocated memory block on success, NULL on failure.
  1191. */
  1192. static void * __init memblock_alloc_internal(
  1193. phys_addr_t size, phys_addr_t align,
  1194. phys_addr_t min_addr, phys_addr_t max_addr,
  1195. int nid)
  1196. {
  1197. phys_addr_t alloc;
  1198. void *ptr;
  1199. enum memblock_flags flags = choose_memblock_flags();
  1200. if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
  1201. nid = NUMA_NO_NODE;
  1202. /*
  1203. * Detect any accidental use of these APIs after slab is ready, as at
  1204. * this moment memblock may be deinitialized already and its
  1205. * internal data may be destroyed (after execution of memblock_free_all)
  1206. */
  1207. if (WARN_ON_ONCE(slab_is_available()))
  1208. return kzalloc_node(size, GFP_NOWAIT, nid);
  1209. if (max_addr > memblock.current_limit)
  1210. max_addr = memblock.current_limit;
  1211. again:
  1212. alloc = memblock_find_in_range_node(size, align, min_addr, max_addr,
  1213. nid, flags);
  1214. if (alloc && !memblock_reserve(alloc, size))
  1215. goto done;
  1216. if (nid != NUMA_NO_NODE) {
  1217. alloc = memblock_find_in_range_node(size, align, min_addr,
  1218. max_addr, NUMA_NO_NODE,
  1219. flags);
  1220. if (alloc && !memblock_reserve(alloc, size))
  1221. goto done;
  1222. }
  1223. if (min_addr) {
  1224. min_addr = 0;
  1225. goto again;
  1226. }
  1227. if (flags & MEMBLOCK_MIRROR) {
  1228. flags &= ~MEMBLOCK_MIRROR;
  1229. pr_warn("Could not allocate %pap bytes of mirrored memory\n",
  1230. &size);
  1231. goto again;
  1232. }
  1233. return NULL;
  1234. done:
  1235. ptr = phys_to_virt(alloc);
  1236. /*
  1237. * The min_count is set to 0 so that bootmem allocated blocks
  1238. * are never reported as leaks. This is because many of these blocks
  1239. * are only referred via the physical address which is not
  1240. * looked up by kmemleak.
  1241. */
  1242. kmemleak_alloc(ptr, size, 0, 0);
  1243. return ptr;
  1244. }
  1245. /**
  1246. * memblock_alloc_try_nid_raw - allocate boot memory block without zeroing
  1247. * memory and without panicking
  1248. * @size: size of memory block to be allocated in bytes
  1249. * @align: alignment of the region and block's size
  1250. * @min_addr: the lower bound of the memory region from where the allocation
  1251. * is preferred (phys address)
  1252. * @max_addr: the upper bound of the memory region from where the allocation
  1253. * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
  1254. * allocate only from memory limited by memblock.current_limit value
  1255. * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
  1256. *
  1257. * Public function, provides additional debug information (including caller
  1258. * info), if enabled. Does not zero allocated memory, does not panic if request
  1259. * cannot be satisfied.
  1260. *
  1261. * Return:
  1262. * Virtual address of allocated memory block on success, NULL on failure.
  1263. */
  1264. void * __init memblock_alloc_try_nid_raw(
  1265. phys_addr_t size, phys_addr_t align,
  1266. phys_addr_t min_addr, phys_addr_t max_addr,
  1267. int nid)
  1268. {
  1269. void *ptr;
  1270. memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pF\n",
  1271. __func__, (u64)size, (u64)align, nid, &min_addr,
  1272. &max_addr, (void *)_RET_IP_);
  1273. ptr = memblock_alloc_internal(size, align,
  1274. min_addr, max_addr, nid);
  1275. if (ptr && size > 0)
  1276. page_init_poison(ptr, size);
  1277. return ptr;
  1278. }
  1279. /**
  1280. * memblock_alloc_try_nid_nopanic - allocate boot memory block
  1281. * @size: size of memory block to be allocated in bytes
  1282. * @align: alignment of the region and block's size
  1283. * @min_addr: the lower bound of the memory region from where the allocation
  1284. * is preferred (phys address)
  1285. * @max_addr: the upper bound of the memory region from where the allocation
  1286. * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
  1287. * allocate only from memory limited by memblock.current_limit value
  1288. * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
  1289. *
  1290. * Public function, provides additional debug information (including caller
  1291. * info), if enabled. This function zeroes the allocated memory.
  1292. *
  1293. * Return:
  1294. * Virtual address of allocated memory block on success, NULL on failure.
  1295. */
  1296. void * __init memblock_alloc_try_nid_nopanic(
  1297. phys_addr_t size, phys_addr_t align,
  1298. phys_addr_t min_addr, phys_addr_t max_addr,
  1299. int nid)
  1300. {
  1301. void *ptr;
  1302. memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pF\n",
  1303. __func__, (u64)size, (u64)align, nid, &min_addr,
  1304. &max_addr, (void *)_RET_IP_);
  1305. ptr = memblock_alloc_internal(size, align,
  1306. min_addr, max_addr, nid);
  1307. if (ptr)
  1308. memset(ptr, 0, size);
  1309. return ptr;
  1310. }
  1311. /**
  1312. * memblock_alloc_try_nid - allocate boot memory block with panicking
  1313. * @size: size of memory block to be allocated in bytes
  1314. * @align: alignment of the region and block's size
  1315. * @min_addr: the lower bound of the memory region from where the allocation
  1316. * is preferred (phys address)
  1317. * @max_addr: the upper bound of the memory region from where the allocation
  1318. * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
  1319. * allocate only from memory limited by memblock.current_limit value
  1320. * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
  1321. *
  1322. * Public panicking version of memblock_alloc_try_nid_nopanic()
  1323. * which provides debug information (including caller info), if enabled,
  1324. * and panics if the request can not be satisfied.
  1325. *
  1326. * Return:
  1327. * Virtual address of allocated memory block on success, NULL on failure.
  1328. */
  1329. void * __init memblock_alloc_try_nid(
  1330. phys_addr_t size, phys_addr_t align,
  1331. phys_addr_t min_addr, phys_addr_t max_addr,
  1332. int nid)
  1333. {
  1334. void *ptr;
  1335. memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pF\n",
  1336. __func__, (u64)size, (u64)align, nid, &min_addr,
  1337. &max_addr, (void *)_RET_IP_);
  1338. ptr = memblock_alloc_internal(size, align,
  1339. min_addr, max_addr, nid);
  1340. if (ptr) {
  1341. memset(ptr, 0, size);
  1342. return ptr;
  1343. }
  1344. panic("%s: Failed to allocate %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa\n",
  1345. __func__, (u64)size, (u64)align, nid, &min_addr, &max_addr);
  1346. return NULL;
  1347. }
  1348. /**
  1349. * __memblock_free_early - free boot memory block
  1350. * @base: phys starting address of the boot memory block
  1351. * @size: size of the boot memory block in bytes
  1352. *
  1353. * Free boot memory block previously allocated by memblock_alloc_xx() API.
  1354. * The freeing memory will not be released to the buddy allocator.
  1355. */
  1356. void __init __memblock_free_early(phys_addr_t base, phys_addr_t size)
  1357. {
  1358. phys_addr_t end = base + size - 1;
  1359. memblock_dbg("%s: [%pa-%pa] %pF\n",
  1360. __func__, &base, &end, (void *)_RET_IP_);
  1361. kmemleak_free_part_phys(base, size);
  1362. memblock_remove_range(&memblock.reserved, base, size);
  1363. }
  1364. /**
  1365. * __memblock_free_late - free bootmem block pages directly to buddy allocator
  1366. * @base: phys starting address of the boot memory block
  1367. * @size: size of the boot memory block in bytes
  1368. *
  1369. * This is only useful when the bootmem allocator has already been torn
  1370. * down, but we are still initializing the system. Pages are released directly
  1371. * to the buddy allocator, no bootmem metadata is updated because it is gone.
  1372. */
  1373. void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
  1374. {
  1375. phys_addr_t cursor, end;
  1376. end = base + size - 1;
  1377. memblock_dbg("%s: [%pa-%pa] %pF\n",
  1378. __func__, &base, &end, (void *)_RET_IP_);
  1379. kmemleak_free_part_phys(base, size);
  1380. cursor = PFN_UP(base);
  1381. end = PFN_DOWN(base + size);
  1382. for (; cursor < end; cursor++) {
  1383. memblock_free_pages(pfn_to_page(cursor), cursor, 0);
  1384. totalram_pages++;
  1385. }
  1386. }
  1387. /*
  1388. * Remaining API functions
  1389. */
  1390. phys_addr_t __init_memblock memblock_phys_mem_size(void)
  1391. {
  1392. return memblock.memory.total_size;
  1393. }
  1394. phys_addr_t __init_memblock memblock_reserved_size(void)
  1395. {
  1396. return memblock.reserved.total_size;
  1397. }
  1398. phys_addr_t __init memblock_mem_size(unsigned long limit_pfn)
  1399. {
  1400. unsigned long pages = 0;
  1401. struct memblock_region *r;
  1402. unsigned long start_pfn, end_pfn;
  1403. for_each_memblock(memory, r) {
  1404. start_pfn = memblock_region_memory_base_pfn(r);
  1405. end_pfn = memblock_region_memory_end_pfn(r);
  1406. start_pfn = min_t(unsigned long, start_pfn, limit_pfn);
  1407. end_pfn = min_t(unsigned long, end_pfn, limit_pfn);
  1408. pages += end_pfn - start_pfn;
  1409. }
  1410. return PFN_PHYS(pages);
  1411. }
  1412. /* lowest address */
  1413. phys_addr_t __init_memblock memblock_start_of_DRAM(void)
  1414. {
  1415. return memblock.memory.regions[0].base;
  1416. }
  1417. phys_addr_t __init_memblock memblock_end_of_DRAM(void)
  1418. {
  1419. int idx = memblock.memory.cnt - 1;
  1420. return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
  1421. }
  1422. static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit)
  1423. {
  1424. phys_addr_t max_addr = PHYS_ADDR_MAX;
  1425. struct memblock_region *r;
  1426. /*
  1427. * translate the memory @limit size into the max address within one of
  1428. * the memory memblock regions, if the @limit exceeds the total size
  1429. * of those regions, max_addr will keep original value PHYS_ADDR_MAX
  1430. */
  1431. for_each_memblock(memory, r) {
  1432. if (limit <= r->size) {
  1433. max_addr = r->base + limit;
  1434. break;
  1435. }
  1436. limit -= r->size;
  1437. }
  1438. return max_addr;
  1439. }
  1440. void __init memblock_enforce_memory_limit(phys_addr_t limit)
  1441. {
  1442. phys_addr_t max_addr = PHYS_ADDR_MAX;
  1443. if (!limit)
  1444. return;
  1445. max_addr = __find_max_addr(limit);
  1446. /* @limit exceeds the total size of the memory, do nothing */
  1447. if (max_addr == PHYS_ADDR_MAX)
  1448. return;
  1449. /* truncate both memory and reserved regions */
  1450. memblock_remove_range(&memblock.memory, max_addr,
  1451. PHYS_ADDR_MAX);
  1452. memblock_remove_range(&memblock.reserved, max_addr,
  1453. PHYS_ADDR_MAX);
  1454. }
  1455. void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size)
  1456. {
  1457. int start_rgn, end_rgn;
  1458. int i, ret;
  1459. if (!size)
  1460. return;
  1461. ret = memblock_isolate_range(&memblock.memory, base, size,
  1462. &start_rgn, &end_rgn);
  1463. if (ret)
  1464. return;
  1465. /* remove all the MAP regions */
  1466. for (i = memblock.memory.cnt - 1; i >= end_rgn; i--)
  1467. if (!memblock_is_nomap(&memblock.memory.regions[i]))
  1468. memblock_remove_region(&memblock.memory, i);
  1469. for (i = start_rgn - 1; i >= 0; i--)
  1470. if (!memblock_is_nomap(&memblock.memory.regions[i]))
  1471. memblock_remove_region(&memblock.memory, i);
  1472. /* truncate the reserved regions */
  1473. memblock_remove_range(&memblock.reserved, 0, base);
  1474. memblock_remove_range(&memblock.reserved,
  1475. base + size, PHYS_ADDR_MAX);
  1476. }
  1477. void __init memblock_mem_limit_remove_map(phys_addr_t limit)
  1478. {
  1479. phys_addr_t max_addr;
  1480. if (!limit)
  1481. return;
  1482. max_addr = __find_max_addr(limit);
  1483. /* @limit exceeds the total size of the memory, do nothing */
  1484. if (max_addr == PHYS_ADDR_MAX)
  1485. return;
  1486. memblock_cap_memory_range(0, max_addr);
  1487. }
  1488. static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
  1489. {
  1490. unsigned int left = 0, right = type->cnt;
  1491. do {
  1492. unsigned int mid = (right + left) / 2;
  1493. if (addr < type->regions[mid].base)
  1494. right = mid;
  1495. else if (addr >= (type->regions[mid].base +
  1496. type->regions[mid].size))
  1497. left = mid + 1;
  1498. else
  1499. return mid;
  1500. } while (left < right);
  1501. return -1;
  1502. }
  1503. bool __init memblock_is_reserved(phys_addr_t addr)
  1504. {
  1505. return memblock_search(&memblock.reserved, addr) != -1;
  1506. }
  1507. bool __init_memblock memblock_is_memory(phys_addr_t addr)
  1508. {
  1509. return memblock_search(&memblock.memory, addr) != -1;
  1510. }
  1511. bool __init_memblock memblock_is_map_memory(phys_addr_t addr)
  1512. {
  1513. int i = memblock_search(&memblock.memory, addr);
  1514. if (i == -1)
  1515. return false;
  1516. return !memblock_is_nomap(&memblock.memory.regions[i]);
  1517. }
  1518. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  1519. int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
  1520. unsigned long *start_pfn, unsigned long *end_pfn)
  1521. {
  1522. struct memblock_type *type = &memblock.memory;
  1523. int mid = memblock_search(type, PFN_PHYS(pfn));
  1524. if (mid == -1)
  1525. return -1;
  1526. *start_pfn = PFN_DOWN(type->regions[mid].base);
  1527. *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
  1528. return type->regions[mid].nid;
  1529. }
  1530. #endif
  1531. /**
  1532. * memblock_is_region_memory - check if a region is a subset of memory
  1533. * @base: base of region to check
  1534. * @size: size of region to check
  1535. *
  1536. * Check if the region [@base, @base + @size) is a subset of a memory block.
  1537. *
  1538. * Return:
  1539. * 0 if false, non-zero if true
  1540. */
  1541. bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
  1542. {
  1543. int idx = memblock_search(&memblock.memory, base);
  1544. phys_addr_t end = base + memblock_cap_size(base, &size);
  1545. if (idx == -1)
  1546. return false;
  1547. return (memblock.memory.regions[idx].base +
  1548. memblock.memory.regions[idx].size) >= end;
  1549. }
  1550. /**
  1551. * memblock_is_region_reserved - check if a region intersects reserved memory
  1552. * @base: base of region to check
  1553. * @size: size of region to check
  1554. *
  1555. * Check if the region [@base, @base + @size) intersects a reserved
  1556. * memory block.
  1557. *
  1558. * Return:
  1559. * True if they intersect, false if not.
  1560. */
  1561. bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
  1562. {
  1563. memblock_cap_size(base, &size);
  1564. return memblock_overlaps_region(&memblock.reserved, base, size);
  1565. }
  1566. void __init_memblock memblock_trim_memory(phys_addr_t align)
  1567. {
  1568. phys_addr_t start, end, orig_start, orig_end;
  1569. struct memblock_region *r;
  1570. for_each_memblock(memory, r) {
  1571. orig_start = r->base;
  1572. orig_end = r->base + r->size;
  1573. start = round_up(orig_start, align);
  1574. end = round_down(orig_end, align);
  1575. if (start == orig_start && end == orig_end)
  1576. continue;
  1577. if (start < end) {
  1578. r->base = start;
  1579. r->size = end - start;
  1580. } else {
  1581. memblock_remove_region(&memblock.memory,
  1582. r - memblock.memory.regions);
  1583. r--;
  1584. }
  1585. }
  1586. }
  1587. void __init_memblock memblock_set_current_limit(phys_addr_t limit)
  1588. {
  1589. memblock.current_limit = limit;
  1590. }
  1591. phys_addr_t __init_memblock memblock_get_current_limit(void)
  1592. {
  1593. return memblock.current_limit;
  1594. }
  1595. static void __init_memblock memblock_dump(struct memblock_type *type)
  1596. {
  1597. phys_addr_t base, end, size;
  1598. enum memblock_flags flags;
  1599. int idx;
  1600. struct memblock_region *rgn;
  1601. pr_info(" %s.cnt = 0x%lx\n", type->name, type->cnt);
  1602. for_each_memblock_type(idx, type, rgn) {
  1603. char nid_buf[32] = "";
  1604. base = rgn->base;
  1605. size = rgn->size;
  1606. end = base + size - 1;
  1607. flags = rgn->flags;
  1608. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  1609. if (memblock_get_region_node(rgn) != MAX_NUMNODES)
  1610. snprintf(nid_buf, sizeof(nid_buf), " on node %d",
  1611. memblock_get_region_node(rgn));
  1612. #endif
  1613. pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n",
  1614. type->name, idx, &base, &end, &size, nid_buf, flags);
  1615. }
  1616. }
  1617. void __init_memblock __memblock_dump_all(void)
  1618. {
  1619. pr_info("MEMBLOCK configuration:\n");
  1620. pr_info(" memory size = %pa reserved size = %pa\n",
  1621. &memblock.memory.total_size,
  1622. &memblock.reserved.total_size);
  1623. memblock_dump(&memblock.memory);
  1624. memblock_dump(&memblock.reserved);
  1625. #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
  1626. memblock_dump(&memblock.physmem);
  1627. #endif
  1628. }
  1629. void __init memblock_allow_resize(void)
  1630. {
  1631. memblock_can_resize = 1;
  1632. }
  1633. static int __init early_memblock(char *p)
  1634. {
  1635. if (p && strstr(p, "debug"))
  1636. memblock_debug = 1;
  1637. return 0;
  1638. }
  1639. early_param("memblock", early_memblock);
  1640. static void __init __free_pages_memory(unsigned long start, unsigned long end)
  1641. {
  1642. int order;
  1643. while (start < end) {
  1644. order = min(MAX_ORDER - 1UL, __ffs(start));
  1645. while (start + (1UL << order) > end)
  1646. order--;
  1647. memblock_free_pages(pfn_to_page(start), start, order);
  1648. start += (1UL << order);
  1649. }
  1650. }
  1651. static unsigned long __init __free_memory_core(phys_addr_t start,
  1652. phys_addr_t end)
  1653. {
  1654. unsigned long start_pfn = PFN_UP(start);
  1655. unsigned long end_pfn = min_t(unsigned long,
  1656. PFN_DOWN(end), max_low_pfn);
  1657. if (start_pfn >= end_pfn)
  1658. return 0;
  1659. __free_pages_memory(start_pfn, end_pfn);
  1660. return end_pfn - start_pfn;
  1661. }
  1662. static unsigned long __init free_low_memory_core_early(void)
  1663. {
  1664. unsigned long count = 0;
  1665. phys_addr_t start, end;
  1666. u64 i;
  1667. memblock_clear_hotplug(0, -1);
  1668. for_each_reserved_mem_region(i, &start, &end)
  1669. reserve_bootmem_region(start, end);
  1670. /*
  1671. * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
  1672. * because in some case like Node0 doesn't have RAM installed
  1673. * low ram will be on Node1
  1674. */
  1675. for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
  1676. NULL)
  1677. count += __free_memory_core(start, end);
  1678. return count;
  1679. }
  1680. static int reset_managed_pages_done __initdata;
  1681. void reset_node_managed_pages(pg_data_t *pgdat)
  1682. {
  1683. struct zone *z;
  1684. for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
  1685. z->managed_pages = 0;
  1686. }
  1687. void __init reset_all_zones_managed_pages(void)
  1688. {
  1689. struct pglist_data *pgdat;
  1690. if (reset_managed_pages_done)
  1691. return;
  1692. for_each_online_pgdat(pgdat)
  1693. reset_node_managed_pages(pgdat);
  1694. reset_managed_pages_done = 1;
  1695. }
  1696. /**
  1697. * memblock_free_all - release free pages to the buddy allocator
  1698. *
  1699. * Return: the number of pages actually released.
  1700. */
  1701. unsigned long __init memblock_free_all(void)
  1702. {
  1703. unsigned long pages;
  1704. reset_all_zones_managed_pages();
  1705. pages = free_low_memory_core_early();
  1706. totalram_pages += pages;
  1707. return pages;
  1708. }
  1709. #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK)
  1710. static int memblock_debug_show(struct seq_file *m, void *private)
  1711. {
  1712. struct memblock_type *type = m->private;
  1713. struct memblock_region *reg;
  1714. int i;
  1715. phys_addr_t end;
  1716. for (i = 0; i < type->cnt; i++) {
  1717. reg = &type->regions[i];
  1718. end = reg->base + reg->size - 1;
  1719. seq_printf(m, "%4d: ", i);
  1720. seq_printf(m, "%pa..%pa\n", &reg->base, &end);
  1721. }
  1722. return 0;
  1723. }
  1724. DEFINE_SHOW_ATTRIBUTE(memblock_debug);
  1725. static int __init memblock_init_debugfs(void)
  1726. {
  1727. struct dentry *root = debugfs_create_dir("memblock", NULL);
  1728. if (!root)
  1729. return -ENXIO;
  1730. debugfs_create_file("memory", 0444, root,
  1731. &memblock.memory, &memblock_debug_fops);
  1732. debugfs_create_file("reserved", 0444, root,
  1733. &memblock.reserved, &memblock_debug_fops);
  1734. #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
  1735. debugfs_create_file("physmem", 0444, root,
  1736. &memblock.physmem, &memblock_debug_fops);
  1737. #endif
  1738. return 0;
  1739. }
  1740. __initcall(memblock_init_debugfs);
  1741. #endif /* CONFIG_DEBUG_FS */