memblock.c 52 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877
  1. /*
  2. * Procedures for maintaining information about logical memory blocks.
  3. *
  4. * Peter Bergner, IBM Corp. June 2001.
  5. * Copyright (C) 2001 Peter Bergner.
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/slab.h>
  14. #include <linux/init.h>
  15. #include <linux/bitops.h>
  16. #include <linux/poison.h>
  17. #include <linux/pfn.h>
  18. #include <linux/debugfs.h>
  19. #include <linux/seq_file.h>
  20. #include <linux/memblock.h>
  21. #include <asm/sections.h>
  22. #include <linux/io.h>
  23. #include "internal.h"
  24. static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
  25. static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
  26. #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
  27. static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock;
  28. #endif
  29. struct memblock memblock __initdata_memblock = {
  30. .memory.regions = memblock_memory_init_regions,
  31. .memory.cnt = 1, /* empty dummy entry */
  32. .memory.max = INIT_MEMBLOCK_REGIONS,
  33. .memory.name = "memory",
  34. .reserved.regions = memblock_reserved_init_regions,
  35. .reserved.cnt = 1, /* empty dummy entry */
  36. .reserved.max = INIT_MEMBLOCK_REGIONS,
  37. .reserved.name = "reserved",
  38. #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
  39. .physmem.regions = memblock_physmem_init_regions,
  40. .physmem.cnt = 1, /* empty dummy entry */
  41. .physmem.max = INIT_PHYSMEM_REGIONS,
  42. .physmem.name = "physmem",
  43. #endif
  44. .bottom_up = false,
  45. .current_limit = MEMBLOCK_ALLOC_ANYWHERE,
  46. };
  47. int memblock_debug __initdata_memblock;
  48. static bool system_has_some_mirror __initdata_memblock = false;
  49. static int memblock_can_resize __initdata_memblock;
  50. static int memblock_memory_in_slab __initdata_memblock = 0;
  51. static int memblock_reserved_in_slab __initdata_memblock = 0;
  52. ulong __init_memblock choose_memblock_flags(void)
  53. {
  54. return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
  55. }
  56. /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
  57. static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
  58. {
  59. return *size = min(*size, (phys_addr_t)ULLONG_MAX - base);
  60. }
  61. /*
  62. * Address comparison utilities
  63. */
  64. static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
  65. phys_addr_t base2, phys_addr_t size2)
  66. {
  67. return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
  68. }
  69. bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
  70. phys_addr_t base, phys_addr_t size)
  71. {
  72. unsigned long i;
  73. for (i = 0; i < type->cnt; i++)
  74. if (memblock_addrs_overlap(base, size, type->regions[i].base,
  75. type->regions[i].size))
  76. break;
  77. return i < type->cnt;
  78. }
  79. /*
  80. * __memblock_find_range_bottom_up - find free area utility in bottom-up
  81. * @start: start of candidate range
  82. * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
  83. * @size: size of free area to find
  84. * @align: alignment of free area to find
  85. * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
  86. * @flags: pick from blocks based on memory attributes
  87. *
  88. * Utility called from memblock_find_in_range_node(), find free area bottom-up.
  89. *
  90. * RETURNS:
  91. * Found address on success, 0 on failure.
  92. */
  93. static phys_addr_t __init_memblock
  94. __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
  95. phys_addr_t size, phys_addr_t align, int nid,
  96. ulong flags)
  97. {
  98. phys_addr_t this_start, this_end, cand;
  99. u64 i;
  100. for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) {
  101. this_start = clamp(this_start, start, end);
  102. this_end = clamp(this_end, start, end);
  103. cand = round_up(this_start, align);
  104. if (cand < this_end && this_end - cand >= size)
  105. return cand;
  106. }
  107. return 0;
  108. }
  109. /**
  110. * __memblock_find_range_top_down - find free area utility, in top-down
  111. * @start: start of candidate range
  112. * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
  113. * @size: size of free area to find
  114. * @align: alignment of free area to find
  115. * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
  116. * @flags: pick from blocks based on memory attributes
  117. *
  118. * Utility called from memblock_find_in_range_node(), find free area top-down.
  119. *
  120. * RETURNS:
  121. * Found address on success, 0 on failure.
  122. */
  123. static phys_addr_t __init_memblock
  124. __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
  125. phys_addr_t size, phys_addr_t align, int nid,
  126. ulong flags)
  127. {
  128. phys_addr_t this_start, this_end, cand;
  129. u64 i;
  130. for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end,
  131. NULL) {
  132. this_start = clamp(this_start, start, end);
  133. this_end = clamp(this_end, start, end);
  134. if (this_end < size)
  135. continue;
  136. cand = round_down(this_end - size, align);
  137. if (cand >= this_start)
  138. return cand;
  139. }
  140. return 0;
  141. }
  142. /**
  143. * memblock_find_in_range_node - find free area in given range and node
  144. * @size: size of free area to find
  145. * @align: alignment of free area to find
  146. * @start: start of candidate range
  147. * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
  148. * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
  149. * @flags: pick from blocks based on memory attributes
  150. *
  151. * Find @size free area aligned to @align in the specified range and node.
  152. *
  153. * When allocation direction is bottom-up, the @start should be greater
  154. * than the end of the kernel image. Otherwise, it will be trimmed. The
  155. * reason is that we want the bottom-up allocation just near the kernel
  156. * image so it is highly likely that the allocated memory and the kernel
  157. * will reside in the same node.
  158. *
  159. * If bottom-up allocation failed, will try to allocate memory top-down.
  160. *
  161. * RETURNS:
  162. * Found address on success, 0 on failure.
  163. */
  164. phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
  165. phys_addr_t align, phys_addr_t start,
  166. phys_addr_t end, int nid, ulong flags)
  167. {
  168. phys_addr_t kernel_end, ret;
  169. /* pump up @end */
  170. if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
  171. end = memblock.current_limit;
  172. /* avoid allocating the first page */
  173. start = max_t(phys_addr_t, start, PAGE_SIZE);
  174. end = max(start, end);
  175. kernel_end = __pa_symbol(_end);
  176. /*
  177. * try bottom-up allocation only when bottom-up mode
  178. * is set and @end is above the kernel image.
  179. */
  180. if (memblock_bottom_up() && end > kernel_end) {
  181. phys_addr_t bottom_up_start;
  182. /* make sure we will allocate above the kernel */
  183. bottom_up_start = max(start, kernel_end);
  184. /* ok, try bottom-up allocation first */
  185. ret = __memblock_find_range_bottom_up(bottom_up_start, end,
  186. size, align, nid, flags);
  187. if (ret)
  188. return ret;
  189. /*
  190. * we always limit bottom-up allocation above the kernel,
  191. * but top-down allocation doesn't have the limit, so
  192. * retrying top-down allocation may succeed when bottom-up
  193. * allocation failed.
  194. *
  195. * bottom-up allocation is expected to be fail very rarely,
  196. * so we use WARN_ONCE() here to see the stack trace if
  197. * fail happens.
  198. */
  199. WARN_ONCE(1, "memblock: bottom-up allocation failed, memory hotunplug may be affected\n");
  200. }
  201. return __memblock_find_range_top_down(start, end, size, align, nid,
  202. flags);
  203. }
  204. /**
  205. * memblock_find_in_range - find free area in given range
  206. * @start: start of candidate range
  207. * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
  208. * @size: size of free area to find
  209. * @align: alignment of free area to find
  210. *
  211. * Find @size free area aligned to @align in the specified range.
  212. *
  213. * RETURNS:
  214. * Found address on success, 0 on failure.
  215. */
  216. phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
  217. phys_addr_t end, phys_addr_t size,
  218. phys_addr_t align)
  219. {
  220. phys_addr_t ret;
  221. ulong flags = choose_memblock_flags();
  222. again:
  223. ret = memblock_find_in_range_node(size, align, start, end,
  224. NUMA_NO_NODE, flags);
  225. if (!ret && (flags & MEMBLOCK_MIRROR)) {
  226. pr_warn("Could not allocate %pap bytes of mirrored memory\n",
  227. &size);
  228. flags &= ~MEMBLOCK_MIRROR;
  229. goto again;
  230. }
  231. return ret;
  232. }
  233. static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
  234. {
  235. type->total_size -= type->regions[r].size;
  236. memmove(&type->regions[r], &type->regions[r + 1],
  237. (type->cnt - (r + 1)) * sizeof(type->regions[r]));
  238. type->cnt--;
  239. /* Special case for empty arrays */
  240. if (type->cnt == 0) {
  241. WARN_ON(type->total_size != 0);
  242. type->cnt = 1;
  243. type->regions[0].base = 0;
  244. type->regions[0].size = 0;
  245. type->regions[0].flags = 0;
  246. memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
  247. }
  248. }
  249. #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
  250. /**
  251. * Discard memory and reserved arrays if they were allocated
  252. */
  253. void __init memblock_discard(void)
  254. {
  255. phys_addr_t addr, size;
  256. if (memblock.reserved.regions != memblock_reserved_init_regions) {
  257. addr = __pa(memblock.reserved.regions);
  258. size = PAGE_ALIGN(sizeof(struct memblock_region) *
  259. memblock.reserved.max);
  260. __memblock_free_late(addr, size);
  261. }
  262. if (memblock.memory.regions != memblock_memory_init_regions) {
  263. addr = __pa(memblock.memory.regions);
  264. size = PAGE_ALIGN(sizeof(struct memblock_region) *
  265. memblock.memory.max);
  266. __memblock_free_late(addr, size);
  267. }
  268. }
  269. #endif
  270. /**
  271. * memblock_double_array - double the size of the memblock regions array
  272. * @type: memblock type of the regions array being doubled
  273. * @new_area_start: starting address of memory range to avoid overlap with
  274. * @new_area_size: size of memory range to avoid overlap with
  275. *
  276. * Double the size of the @type regions array. If memblock is being used to
  277. * allocate memory for a new reserved regions array and there is a previously
  278. * allocated memory range [@new_area_start,@new_area_start+@new_area_size]
  279. * waiting to be reserved, ensure the memory used by the new array does
  280. * not overlap.
  281. *
  282. * RETURNS:
  283. * 0 on success, -1 on failure.
  284. */
  285. static int __init_memblock memblock_double_array(struct memblock_type *type,
  286. phys_addr_t new_area_start,
  287. phys_addr_t new_area_size)
  288. {
  289. struct memblock_region *new_array, *old_array;
  290. phys_addr_t old_alloc_size, new_alloc_size;
  291. phys_addr_t old_size, new_size, addr;
  292. int use_slab = slab_is_available();
  293. int *in_slab;
  294. /* We don't allow resizing until we know about the reserved regions
  295. * of memory that aren't suitable for allocation
  296. */
  297. if (!memblock_can_resize)
  298. return -1;
  299. /* Calculate new doubled size */
  300. old_size = type->max * sizeof(struct memblock_region);
  301. new_size = old_size << 1;
  302. /*
  303. * We need to allocated new one align to PAGE_SIZE,
  304. * so we can free them completely later.
  305. */
  306. old_alloc_size = PAGE_ALIGN(old_size);
  307. new_alloc_size = PAGE_ALIGN(new_size);
  308. /* Retrieve the slab flag */
  309. if (type == &memblock.memory)
  310. in_slab = &memblock_memory_in_slab;
  311. else
  312. in_slab = &memblock_reserved_in_slab;
  313. /* Try to find some space for it.
  314. *
  315. * WARNING: We assume that either slab_is_available() and we use it or
  316. * we use MEMBLOCK for allocations. That means that this is unsafe to
  317. * use when bootmem is currently active (unless bootmem itself is
  318. * implemented on top of MEMBLOCK which isn't the case yet)
  319. *
  320. * This should however not be an issue for now, as we currently only
  321. * call into MEMBLOCK while it's still active, or much later when slab
  322. * is active for memory hotplug operations
  323. */
  324. if (use_slab) {
  325. new_array = kmalloc(new_size, GFP_KERNEL);
  326. addr = new_array ? __pa(new_array) : 0;
  327. } else {
  328. /* only exclude range when trying to double reserved.regions */
  329. if (type != &memblock.reserved)
  330. new_area_start = new_area_size = 0;
  331. addr = memblock_find_in_range(new_area_start + new_area_size,
  332. memblock.current_limit,
  333. new_alloc_size, PAGE_SIZE);
  334. if (!addr && new_area_size)
  335. addr = memblock_find_in_range(0,
  336. min(new_area_start, memblock.current_limit),
  337. new_alloc_size, PAGE_SIZE);
  338. new_array = addr ? __va(addr) : NULL;
  339. }
  340. if (!addr) {
  341. pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
  342. type->name, type->max, type->max * 2);
  343. return -1;
  344. }
  345. memblock_dbg("memblock: %s is doubled to %ld at [%#010llx-%#010llx]",
  346. type->name, type->max * 2, (u64)addr,
  347. (u64)addr + new_size - 1);
  348. /*
  349. * Found space, we now need to move the array over before we add the
  350. * reserved region since it may be our reserved array itself that is
  351. * full.
  352. */
  353. memcpy(new_array, type->regions, old_size);
  354. memset(new_array + type->max, 0, old_size);
  355. old_array = type->regions;
  356. type->regions = new_array;
  357. type->max <<= 1;
  358. /* Free old array. We needn't free it if the array is the static one */
  359. if (*in_slab)
  360. kfree(old_array);
  361. else if (old_array != memblock_memory_init_regions &&
  362. old_array != memblock_reserved_init_regions)
  363. memblock_free(__pa(old_array), old_alloc_size);
  364. /*
  365. * Reserve the new array if that comes from the memblock. Otherwise, we
  366. * needn't do it
  367. */
  368. if (!use_slab)
  369. BUG_ON(memblock_reserve(addr, new_alloc_size));
  370. /* Update slab flag */
  371. *in_slab = use_slab;
  372. return 0;
  373. }
  374. /**
  375. * memblock_merge_regions - merge neighboring compatible regions
  376. * @type: memblock type to scan
  377. *
  378. * Scan @type and merge neighboring compatible regions.
  379. */
  380. static void __init_memblock memblock_merge_regions(struct memblock_type *type)
  381. {
  382. int i = 0;
  383. /* cnt never goes below 1 */
  384. while (i < type->cnt - 1) {
  385. struct memblock_region *this = &type->regions[i];
  386. struct memblock_region *next = &type->regions[i + 1];
  387. if (this->base + this->size != next->base ||
  388. memblock_get_region_node(this) !=
  389. memblock_get_region_node(next) ||
  390. this->flags != next->flags) {
  391. BUG_ON(this->base + this->size > next->base);
  392. i++;
  393. continue;
  394. }
  395. this->size += next->size;
  396. /* move forward from next + 1, index of which is i + 2 */
  397. memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
  398. type->cnt--;
  399. }
  400. }
  401. /**
  402. * memblock_insert_region - insert new memblock region
  403. * @type: memblock type to insert into
  404. * @idx: index for the insertion point
  405. * @base: base address of the new region
  406. * @size: size of the new region
  407. * @nid: node id of the new region
  408. * @flags: flags of the new region
  409. *
  410. * Insert new memblock region [@base,@base+@size) into @type at @idx.
  411. * @type must already have extra room to accommodate the new region.
  412. */
  413. static void __init_memblock memblock_insert_region(struct memblock_type *type,
  414. int idx, phys_addr_t base,
  415. phys_addr_t size,
  416. int nid, unsigned long flags)
  417. {
  418. struct memblock_region *rgn = &type->regions[idx];
  419. BUG_ON(type->cnt >= type->max);
  420. memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
  421. rgn->base = base;
  422. rgn->size = size;
  423. rgn->flags = flags;
  424. memblock_set_region_node(rgn, nid);
  425. type->cnt++;
  426. type->total_size += size;
  427. }
  428. /**
  429. * memblock_add_range - add new memblock region
  430. * @type: memblock type to add new region into
  431. * @base: base address of the new region
  432. * @size: size of the new region
  433. * @nid: nid of the new region
  434. * @flags: flags of the new region
  435. *
  436. * Add new memblock region [@base,@base+@size) into @type. The new region
  437. * is allowed to overlap with existing ones - overlaps don't affect already
  438. * existing regions. @type is guaranteed to be minimal (all neighbouring
  439. * compatible regions are merged) after the addition.
  440. *
  441. * RETURNS:
  442. * 0 on success, -errno on failure.
  443. */
  444. int __init_memblock memblock_add_range(struct memblock_type *type,
  445. phys_addr_t base, phys_addr_t size,
  446. int nid, unsigned long flags)
  447. {
  448. bool insert = false;
  449. phys_addr_t obase = base;
  450. phys_addr_t end = base + memblock_cap_size(base, &size);
  451. int idx, nr_new;
  452. struct memblock_region *rgn;
  453. if (!size)
  454. return 0;
  455. /* special case for empty array */
  456. if (type->regions[0].size == 0) {
  457. WARN_ON(type->cnt != 1 || type->total_size);
  458. type->regions[0].base = base;
  459. type->regions[0].size = size;
  460. type->regions[0].flags = flags;
  461. memblock_set_region_node(&type->regions[0], nid);
  462. type->total_size = size;
  463. return 0;
  464. }
  465. repeat:
  466. /*
  467. * The following is executed twice. Once with %false @insert and
  468. * then with %true. The first counts the number of regions needed
  469. * to accommodate the new area. The second actually inserts them.
  470. */
  471. base = obase;
  472. nr_new = 0;
  473. for_each_memblock_type(idx, type, rgn) {
  474. phys_addr_t rbase = rgn->base;
  475. phys_addr_t rend = rbase + rgn->size;
  476. if (rbase >= end)
  477. break;
  478. if (rend <= base)
  479. continue;
  480. /*
  481. * @rgn overlaps. If it separates the lower part of new
  482. * area, insert that portion.
  483. */
  484. if (rbase > base) {
  485. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  486. WARN_ON(nid != memblock_get_region_node(rgn));
  487. #endif
  488. WARN_ON(flags != rgn->flags);
  489. nr_new++;
  490. if (insert)
  491. memblock_insert_region(type, idx++, base,
  492. rbase - base, nid,
  493. flags);
  494. }
  495. /* area below @rend is dealt with, forget about it */
  496. base = min(rend, end);
  497. }
  498. /* insert the remaining portion */
  499. if (base < end) {
  500. nr_new++;
  501. if (insert)
  502. memblock_insert_region(type, idx, base, end - base,
  503. nid, flags);
  504. }
  505. if (!nr_new)
  506. return 0;
  507. /*
  508. * If this was the first round, resize array and repeat for actual
  509. * insertions; otherwise, merge and return.
  510. */
  511. if (!insert) {
  512. while (type->cnt + nr_new > type->max)
  513. if (memblock_double_array(type, obase, size) < 0)
  514. return -ENOMEM;
  515. insert = true;
  516. goto repeat;
  517. } else {
  518. memblock_merge_regions(type);
  519. return 0;
  520. }
  521. }
  522. int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
  523. int nid)
  524. {
  525. return memblock_add_range(&memblock.memory, base, size, nid, 0);
  526. }
  527. int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
  528. {
  529. phys_addr_t end = base + size - 1;
  530. memblock_dbg("memblock_add: [%pa-%pa] %pF\n",
  531. &base, &end, (void *)_RET_IP_);
  532. return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0);
  533. }
  534. /**
  535. * memblock_isolate_range - isolate given range into disjoint memblocks
  536. * @type: memblock type to isolate range for
  537. * @base: base of range to isolate
  538. * @size: size of range to isolate
  539. * @start_rgn: out parameter for the start of isolated region
  540. * @end_rgn: out parameter for the end of isolated region
  541. *
  542. * Walk @type and ensure that regions don't cross the boundaries defined by
  543. * [@base,@base+@size). Crossing regions are split at the boundaries,
  544. * which may create at most two more regions. The index of the first
  545. * region inside the range is returned in *@start_rgn and end in *@end_rgn.
  546. *
  547. * RETURNS:
  548. * 0 on success, -errno on failure.
  549. */
  550. static int __init_memblock memblock_isolate_range(struct memblock_type *type,
  551. phys_addr_t base, phys_addr_t size,
  552. int *start_rgn, int *end_rgn)
  553. {
  554. phys_addr_t end = base + memblock_cap_size(base, &size);
  555. int idx;
  556. struct memblock_region *rgn;
  557. *start_rgn = *end_rgn = 0;
  558. if (!size)
  559. return 0;
  560. /* we'll create at most two more regions */
  561. while (type->cnt + 2 > type->max)
  562. if (memblock_double_array(type, base, size) < 0)
  563. return -ENOMEM;
  564. for_each_memblock_type(idx, type, rgn) {
  565. phys_addr_t rbase = rgn->base;
  566. phys_addr_t rend = rbase + rgn->size;
  567. if (rbase >= end)
  568. break;
  569. if (rend <= base)
  570. continue;
  571. if (rbase < base) {
  572. /*
  573. * @rgn intersects from below. Split and continue
  574. * to process the next region - the new top half.
  575. */
  576. rgn->base = base;
  577. rgn->size -= base - rbase;
  578. type->total_size -= base - rbase;
  579. memblock_insert_region(type, idx, rbase, base - rbase,
  580. memblock_get_region_node(rgn),
  581. rgn->flags);
  582. } else if (rend > end) {
  583. /*
  584. * @rgn intersects from above. Split and redo the
  585. * current region - the new bottom half.
  586. */
  587. rgn->base = end;
  588. rgn->size -= end - rbase;
  589. type->total_size -= end - rbase;
  590. memblock_insert_region(type, idx--, rbase, end - rbase,
  591. memblock_get_region_node(rgn),
  592. rgn->flags);
  593. } else {
  594. /* @rgn is fully contained, record it */
  595. if (!*end_rgn)
  596. *start_rgn = idx;
  597. *end_rgn = idx + 1;
  598. }
  599. }
  600. return 0;
  601. }
  602. static int __init_memblock memblock_remove_range(struct memblock_type *type,
  603. phys_addr_t base, phys_addr_t size)
  604. {
  605. int start_rgn, end_rgn;
  606. int i, ret;
  607. ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
  608. if (ret)
  609. return ret;
  610. for (i = end_rgn - 1; i >= start_rgn; i--)
  611. memblock_remove_region(type, i);
  612. return 0;
  613. }
  614. int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
  615. {
  616. return memblock_remove_range(&memblock.memory, base, size);
  617. }
  618. int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
  619. {
  620. phys_addr_t end = base + size - 1;
  621. memblock_dbg(" memblock_free: [%pa-%pa] %pF\n",
  622. &base, &end, (void *)_RET_IP_);
  623. kmemleak_free_part_phys(base, size);
  624. return memblock_remove_range(&memblock.reserved, base, size);
  625. }
  626. int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
  627. {
  628. phys_addr_t end = base + size - 1;
  629. memblock_dbg("memblock_reserve: [%pa-%pa] %pF\n",
  630. &base, &end, (void *)_RET_IP_);
  631. return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0);
  632. }
  633. /**
  634. *
  635. * This function isolates region [@base, @base + @size), and sets/clears flag
  636. *
  637. * Return 0 on success, -errno on failure.
  638. */
  639. static int __init_memblock memblock_setclr_flag(phys_addr_t base,
  640. phys_addr_t size, int set, int flag)
  641. {
  642. struct memblock_type *type = &memblock.memory;
  643. int i, ret, start_rgn, end_rgn;
  644. ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
  645. if (ret)
  646. return ret;
  647. for (i = start_rgn; i < end_rgn; i++)
  648. if (set)
  649. memblock_set_region_flags(&type->regions[i], flag);
  650. else
  651. memblock_clear_region_flags(&type->regions[i], flag);
  652. memblock_merge_regions(type);
  653. return 0;
  654. }
  655. /**
  656. * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
  657. * @base: the base phys addr of the region
  658. * @size: the size of the region
  659. *
  660. * Return 0 on success, -errno on failure.
  661. */
  662. int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
  663. {
  664. return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG);
  665. }
  666. /**
  667. * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
  668. * @base: the base phys addr of the region
  669. * @size: the size of the region
  670. *
  671. * Return 0 on success, -errno on failure.
  672. */
  673. int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
  674. {
  675. return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG);
  676. }
  677. /**
  678. * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR.
  679. * @base: the base phys addr of the region
  680. * @size: the size of the region
  681. *
  682. * Return 0 on success, -errno on failure.
  683. */
  684. int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
  685. {
  686. system_has_some_mirror = true;
  687. return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR);
  688. }
  689. /**
  690. * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP.
  691. * @base: the base phys addr of the region
  692. * @size: the size of the region
  693. *
  694. * Return 0 on success, -errno on failure.
  695. */
  696. int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
  697. {
  698. return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP);
  699. }
  700. /**
  701. * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region.
  702. * @base: the base phys addr of the region
  703. * @size: the size of the region
  704. *
  705. * Return 0 on success, -errno on failure.
  706. */
  707. int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size)
  708. {
  709. return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP);
  710. }
  711. /**
  712. * __next_reserved_mem_region - next function for for_each_reserved_region()
  713. * @idx: pointer to u64 loop variable
  714. * @out_start: ptr to phys_addr_t for start address of the region, can be %NULL
  715. * @out_end: ptr to phys_addr_t for end address of the region, can be %NULL
  716. *
  717. * Iterate over all reserved memory regions.
  718. */
  719. void __init_memblock __next_reserved_mem_region(u64 *idx,
  720. phys_addr_t *out_start,
  721. phys_addr_t *out_end)
  722. {
  723. struct memblock_type *type = &memblock.reserved;
  724. if (*idx < type->cnt) {
  725. struct memblock_region *r = &type->regions[*idx];
  726. phys_addr_t base = r->base;
  727. phys_addr_t size = r->size;
  728. if (out_start)
  729. *out_start = base;
  730. if (out_end)
  731. *out_end = base + size - 1;
  732. *idx += 1;
  733. return;
  734. }
  735. /* signal end of iteration */
  736. *idx = ULLONG_MAX;
  737. }
  738. /**
  739. * __next__mem_range - next function for for_each_free_mem_range() etc.
  740. * @idx: pointer to u64 loop variable
  741. * @nid: node selector, %NUMA_NO_NODE for all nodes
  742. * @flags: pick from blocks based on memory attributes
  743. * @type_a: pointer to memblock_type from where the range is taken
  744. * @type_b: pointer to memblock_type which excludes memory from being taken
  745. * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
  746. * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
  747. * @out_nid: ptr to int for nid of the range, can be %NULL
  748. *
  749. * Find the first area from *@idx which matches @nid, fill the out
  750. * parameters, and update *@idx for the next iteration. The lower 32bit of
  751. * *@idx contains index into type_a and the upper 32bit indexes the
  752. * areas before each region in type_b. For example, if type_b regions
  753. * look like the following,
  754. *
  755. * 0:[0-16), 1:[32-48), 2:[128-130)
  756. *
  757. * The upper 32bit indexes the following regions.
  758. *
  759. * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
  760. *
  761. * As both region arrays are sorted, the function advances the two indices
  762. * in lockstep and returns each intersection.
  763. */
  764. void __init_memblock __next_mem_range(u64 *idx, int nid, ulong flags,
  765. struct memblock_type *type_a,
  766. struct memblock_type *type_b,
  767. phys_addr_t *out_start,
  768. phys_addr_t *out_end, int *out_nid)
  769. {
  770. int idx_a = *idx & 0xffffffff;
  771. int idx_b = *idx >> 32;
  772. if (WARN_ONCE(nid == MAX_NUMNODES,
  773. "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
  774. nid = NUMA_NO_NODE;
  775. for (; idx_a < type_a->cnt; idx_a++) {
  776. struct memblock_region *m = &type_a->regions[idx_a];
  777. phys_addr_t m_start = m->base;
  778. phys_addr_t m_end = m->base + m->size;
  779. int m_nid = memblock_get_region_node(m);
  780. /* only memory regions are associated with nodes, check it */
  781. if (nid != NUMA_NO_NODE && nid != m_nid)
  782. continue;
  783. /* skip hotpluggable memory regions if needed */
  784. if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
  785. continue;
  786. /* if we want mirror memory skip non-mirror memory regions */
  787. if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
  788. continue;
  789. /* skip nomap memory unless we were asked for it explicitly */
  790. if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
  791. continue;
  792. if (!type_b) {
  793. if (out_start)
  794. *out_start = m_start;
  795. if (out_end)
  796. *out_end = m_end;
  797. if (out_nid)
  798. *out_nid = m_nid;
  799. idx_a++;
  800. *idx = (u32)idx_a | (u64)idx_b << 32;
  801. return;
  802. }
  803. /* scan areas before each reservation */
  804. for (; idx_b < type_b->cnt + 1; idx_b++) {
  805. struct memblock_region *r;
  806. phys_addr_t r_start;
  807. phys_addr_t r_end;
  808. r = &type_b->regions[idx_b];
  809. r_start = idx_b ? r[-1].base + r[-1].size : 0;
  810. r_end = idx_b < type_b->cnt ?
  811. r->base : ULLONG_MAX;
  812. /*
  813. * if idx_b advanced past idx_a,
  814. * break out to advance idx_a
  815. */
  816. if (r_start >= m_end)
  817. break;
  818. /* if the two regions intersect, we're done */
  819. if (m_start < r_end) {
  820. if (out_start)
  821. *out_start =
  822. max(m_start, r_start);
  823. if (out_end)
  824. *out_end = min(m_end, r_end);
  825. if (out_nid)
  826. *out_nid = m_nid;
  827. /*
  828. * The region which ends first is
  829. * advanced for the next iteration.
  830. */
  831. if (m_end <= r_end)
  832. idx_a++;
  833. else
  834. idx_b++;
  835. *idx = (u32)idx_a | (u64)idx_b << 32;
  836. return;
  837. }
  838. }
  839. }
  840. /* signal end of iteration */
  841. *idx = ULLONG_MAX;
  842. }
  843. /**
  844. * __next_mem_range_rev - generic next function for for_each_*_range_rev()
  845. *
  846. * Finds the next range from type_a which is not marked as unsuitable
  847. * in type_b.
  848. *
  849. * @idx: pointer to u64 loop variable
  850. * @nid: node selector, %NUMA_NO_NODE for all nodes
  851. * @flags: pick from blocks based on memory attributes
  852. * @type_a: pointer to memblock_type from where the range is taken
  853. * @type_b: pointer to memblock_type which excludes memory from being taken
  854. * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
  855. * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
  856. * @out_nid: ptr to int for nid of the range, can be %NULL
  857. *
  858. * Reverse of __next_mem_range().
  859. */
  860. void __init_memblock __next_mem_range_rev(u64 *idx, int nid, ulong flags,
  861. struct memblock_type *type_a,
  862. struct memblock_type *type_b,
  863. phys_addr_t *out_start,
  864. phys_addr_t *out_end, int *out_nid)
  865. {
  866. int idx_a = *idx & 0xffffffff;
  867. int idx_b = *idx >> 32;
  868. if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
  869. nid = NUMA_NO_NODE;
  870. if (*idx == (u64)ULLONG_MAX) {
  871. idx_a = type_a->cnt - 1;
  872. if (type_b != NULL)
  873. idx_b = type_b->cnt;
  874. else
  875. idx_b = 0;
  876. }
  877. for (; idx_a >= 0; idx_a--) {
  878. struct memblock_region *m = &type_a->regions[idx_a];
  879. phys_addr_t m_start = m->base;
  880. phys_addr_t m_end = m->base + m->size;
  881. int m_nid = memblock_get_region_node(m);
  882. /* only memory regions are associated with nodes, check it */
  883. if (nid != NUMA_NO_NODE && nid != m_nid)
  884. continue;
  885. /* skip hotpluggable memory regions if needed */
  886. if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
  887. continue;
  888. /* if we want mirror memory skip non-mirror memory regions */
  889. if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
  890. continue;
  891. /* skip nomap memory unless we were asked for it explicitly */
  892. if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
  893. continue;
  894. if (!type_b) {
  895. if (out_start)
  896. *out_start = m_start;
  897. if (out_end)
  898. *out_end = m_end;
  899. if (out_nid)
  900. *out_nid = m_nid;
  901. idx_a--;
  902. *idx = (u32)idx_a | (u64)idx_b << 32;
  903. return;
  904. }
  905. /* scan areas before each reservation */
  906. for (; idx_b >= 0; idx_b--) {
  907. struct memblock_region *r;
  908. phys_addr_t r_start;
  909. phys_addr_t r_end;
  910. r = &type_b->regions[idx_b];
  911. r_start = idx_b ? r[-1].base + r[-1].size : 0;
  912. r_end = idx_b < type_b->cnt ?
  913. r->base : ULLONG_MAX;
  914. /*
  915. * if idx_b advanced past idx_a,
  916. * break out to advance idx_a
  917. */
  918. if (r_end <= m_start)
  919. break;
  920. /* if the two regions intersect, we're done */
  921. if (m_end > r_start) {
  922. if (out_start)
  923. *out_start = max(m_start, r_start);
  924. if (out_end)
  925. *out_end = min(m_end, r_end);
  926. if (out_nid)
  927. *out_nid = m_nid;
  928. if (m_start >= r_start)
  929. idx_a--;
  930. else
  931. idx_b--;
  932. *idx = (u32)idx_a | (u64)idx_b << 32;
  933. return;
  934. }
  935. }
  936. }
  937. /* signal end of iteration */
  938. *idx = ULLONG_MAX;
  939. }
  940. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  941. /*
  942. * Common iterator interface used to define for_each_mem_range().
  943. */
  944. void __init_memblock __next_mem_pfn_range(int *idx, int nid,
  945. unsigned long *out_start_pfn,
  946. unsigned long *out_end_pfn, int *out_nid)
  947. {
  948. struct memblock_type *type = &memblock.memory;
  949. struct memblock_region *r;
  950. while (++*idx < type->cnt) {
  951. r = &type->regions[*idx];
  952. if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
  953. continue;
  954. if (nid == MAX_NUMNODES || nid == r->nid)
  955. break;
  956. }
  957. if (*idx >= type->cnt) {
  958. *idx = -1;
  959. return;
  960. }
  961. if (out_start_pfn)
  962. *out_start_pfn = PFN_UP(r->base);
  963. if (out_end_pfn)
  964. *out_end_pfn = PFN_DOWN(r->base + r->size);
  965. if (out_nid)
  966. *out_nid = r->nid;
  967. }
  968. unsigned long __init_memblock memblock_next_valid_pfn(unsigned long pfn,
  969. unsigned long max_pfn)
  970. {
  971. struct memblock_type *type = &memblock.memory;
  972. unsigned int right = type->cnt;
  973. unsigned int mid, left = 0;
  974. phys_addr_t addr = PFN_PHYS(pfn + 1);
  975. do {
  976. mid = (right + left) / 2;
  977. if (addr < type->regions[mid].base)
  978. right = mid;
  979. else if (addr >= (type->regions[mid].base +
  980. type->regions[mid].size))
  981. left = mid + 1;
  982. else {
  983. /* addr is within the region, so pfn + 1 is valid */
  984. return min(pfn + 1, max_pfn);
  985. }
  986. } while (left < right);
  987. if (right == type->cnt)
  988. return max_pfn;
  989. else
  990. return min(PHYS_PFN(type->regions[right].base), max_pfn);
  991. }
  992. /**
  993. * memblock_set_node - set node ID on memblock regions
  994. * @base: base of area to set node ID for
  995. * @size: size of area to set node ID for
  996. * @type: memblock type to set node ID for
  997. * @nid: node ID to set
  998. *
  999. * Set the nid of memblock @type regions in [@base,@base+@size) to @nid.
  1000. * Regions which cross the area boundaries are split as necessary.
  1001. *
  1002. * RETURNS:
  1003. * 0 on success, -errno on failure.
  1004. */
  1005. int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
  1006. struct memblock_type *type, int nid)
  1007. {
  1008. int start_rgn, end_rgn;
  1009. int i, ret;
  1010. ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
  1011. if (ret)
  1012. return ret;
  1013. for (i = start_rgn; i < end_rgn; i++)
  1014. memblock_set_region_node(&type->regions[i], nid);
  1015. memblock_merge_regions(type);
  1016. return 0;
  1017. }
  1018. #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  1019. static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
  1020. phys_addr_t align, phys_addr_t start,
  1021. phys_addr_t end, int nid, ulong flags)
  1022. {
  1023. phys_addr_t found;
  1024. if (!align)
  1025. align = SMP_CACHE_BYTES;
  1026. found = memblock_find_in_range_node(size, align, start, end, nid,
  1027. flags);
  1028. if (found && !memblock_reserve(found, size)) {
  1029. /*
  1030. * The min_count is set to 0 so that memblock allocations are
  1031. * never reported as leaks.
  1032. */
  1033. kmemleak_alloc_phys(found, size, 0, 0);
  1034. return found;
  1035. }
  1036. return 0;
  1037. }
  1038. phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
  1039. phys_addr_t start, phys_addr_t end,
  1040. ulong flags)
  1041. {
  1042. return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
  1043. flags);
  1044. }
  1045. static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
  1046. phys_addr_t align, phys_addr_t max_addr,
  1047. int nid, ulong flags)
  1048. {
  1049. return memblock_alloc_range_nid(size, align, 0, max_addr, nid, flags);
  1050. }
  1051. phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
  1052. {
  1053. ulong flags = choose_memblock_flags();
  1054. phys_addr_t ret;
  1055. again:
  1056. ret = memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE,
  1057. nid, flags);
  1058. if (!ret && (flags & MEMBLOCK_MIRROR)) {
  1059. flags &= ~MEMBLOCK_MIRROR;
  1060. goto again;
  1061. }
  1062. return ret;
  1063. }
  1064. phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
  1065. {
  1066. return memblock_alloc_base_nid(size, align, max_addr, NUMA_NO_NODE,
  1067. MEMBLOCK_NONE);
  1068. }
  1069. phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
  1070. {
  1071. phys_addr_t alloc;
  1072. alloc = __memblock_alloc_base(size, align, max_addr);
  1073. if (alloc == 0)
  1074. panic("ERROR: Failed to allocate %pa bytes below %pa.\n",
  1075. &size, &max_addr);
  1076. return alloc;
  1077. }
  1078. phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
  1079. {
  1080. return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
  1081. }
  1082. phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
  1083. {
  1084. phys_addr_t res = memblock_alloc_nid(size, align, nid);
  1085. if (res)
  1086. return res;
  1087. return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
  1088. }
  1089. /**
  1090. * memblock_virt_alloc_internal - allocate boot memory block
  1091. * @size: size of memory block to be allocated in bytes
  1092. * @align: alignment of the region and block's size
  1093. * @min_addr: the lower bound of the memory region to allocate (phys address)
  1094. * @max_addr: the upper bound of the memory region to allocate (phys address)
  1095. * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
  1096. *
  1097. * The @min_addr limit is dropped if it can not be satisfied and the allocation
  1098. * will fall back to memory below @min_addr. Also, allocation may fall back
  1099. * to any node in the system if the specified node can not
  1100. * hold the requested memory.
  1101. *
  1102. * The allocation is performed from memory region limited by
  1103. * memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE.
  1104. *
  1105. * The memory block is aligned on SMP_CACHE_BYTES if @align == 0.
  1106. *
  1107. * The phys address of allocated boot memory block is converted to virtual and
  1108. * allocated memory is reset to 0.
  1109. *
  1110. * In addition, function sets the min_count to 0 using kmemleak_alloc for
  1111. * allocated boot memory block, so that it is never reported as leaks.
  1112. *
  1113. * RETURNS:
  1114. * Virtual address of allocated memory block on success, NULL on failure.
  1115. */
  1116. static void * __init memblock_virt_alloc_internal(
  1117. phys_addr_t size, phys_addr_t align,
  1118. phys_addr_t min_addr, phys_addr_t max_addr,
  1119. int nid)
  1120. {
  1121. phys_addr_t alloc;
  1122. void *ptr;
  1123. ulong flags = choose_memblock_flags();
  1124. if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
  1125. nid = NUMA_NO_NODE;
  1126. /*
  1127. * Detect any accidental use of these APIs after slab is ready, as at
  1128. * this moment memblock may be deinitialized already and its
  1129. * internal data may be destroyed (after execution of free_all_bootmem)
  1130. */
  1131. if (WARN_ON_ONCE(slab_is_available()))
  1132. return kzalloc_node(size, GFP_NOWAIT, nid);
  1133. if (!align)
  1134. align = SMP_CACHE_BYTES;
  1135. if (max_addr > memblock.current_limit)
  1136. max_addr = memblock.current_limit;
  1137. again:
  1138. alloc = memblock_find_in_range_node(size, align, min_addr, max_addr,
  1139. nid, flags);
  1140. if (alloc && !memblock_reserve(alloc, size))
  1141. goto done;
  1142. if (nid != NUMA_NO_NODE) {
  1143. alloc = memblock_find_in_range_node(size, align, min_addr,
  1144. max_addr, NUMA_NO_NODE,
  1145. flags);
  1146. if (alloc && !memblock_reserve(alloc, size))
  1147. goto done;
  1148. }
  1149. if (min_addr) {
  1150. min_addr = 0;
  1151. goto again;
  1152. }
  1153. if (flags & MEMBLOCK_MIRROR) {
  1154. flags &= ~MEMBLOCK_MIRROR;
  1155. pr_warn("Could not allocate %pap bytes of mirrored memory\n",
  1156. &size);
  1157. goto again;
  1158. }
  1159. return NULL;
  1160. done:
  1161. ptr = phys_to_virt(alloc);
  1162. /*
  1163. * The min_count is set to 0 so that bootmem allocated blocks
  1164. * are never reported as leaks. This is because many of these blocks
  1165. * are only referred via the physical address which is not
  1166. * looked up by kmemleak.
  1167. */
  1168. kmemleak_alloc(ptr, size, 0, 0);
  1169. return ptr;
  1170. }
  1171. /**
  1172. * memblock_virt_alloc_try_nid_raw - allocate boot memory block without zeroing
  1173. * memory and without panicking
  1174. * @size: size of memory block to be allocated in bytes
  1175. * @align: alignment of the region and block's size
  1176. * @min_addr: the lower bound of the memory region from where the allocation
  1177. * is preferred (phys address)
  1178. * @max_addr: the upper bound of the memory region from where the allocation
  1179. * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
  1180. * allocate only from memory limited by memblock.current_limit value
  1181. * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
  1182. *
  1183. * Public function, provides additional debug information (including caller
  1184. * info), if enabled. Does not zero allocated memory, does not panic if request
  1185. * cannot be satisfied.
  1186. *
  1187. * RETURNS:
  1188. * Virtual address of allocated memory block on success, NULL on failure.
  1189. */
  1190. void * __init memblock_virt_alloc_try_nid_raw(
  1191. phys_addr_t size, phys_addr_t align,
  1192. phys_addr_t min_addr, phys_addr_t max_addr,
  1193. int nid)
  1194. {
  1195. void *ptr;
  1196. memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
  1197. __func__, (u64)size, (u64)align, nid, (u64)min_addr,
  1198. (u64)max_addr, (void *)_RET_IP_);
  1199. ptr = memblock_virt_alloc_internal(size, align,
  1200. min_addr, max_addr, nid);
  1201. #ifdef CONFIG_DEBUG_VM
  1202. if (ptr && size > 0)
  1203. memset(ptr, 0xff, size);
  1204. #endif
  1205. return ptr;
  1206. }
  1207. /**
  1208. * memblock_virt_alloc_try_nid_nopanic - allocate boot memory block
  1209. * @size: size of memory block to be allocated in bytes
  1210. * @align: alignment of the region and block's size
  1211. * @min_addr: the lower bound of the memory region from where the allocation
  1212. * is preferred (phys address)
  1213. * @max_addr: the upper bound of the memory region from where the allocation
  1214. * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
  1215. * allocate only from memory limited by memblock.current_limit value
  1216. * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
  1217. *
  1218. * Public function, provides additional debug information (including caller
  1219. * info), if enabled. This function zeroes the allocated memory.
  1220. *
  1221. * RETURNS:
  1222. * Virtual address of allocated memory block on success, NULL on failure.
  1223. */
  1224. void * __init memblock_virt_alloc_try_nid_nopanic(
  1225. phys_addr_t size, phys_addr_t align,
  1226. phys_addr_t min_addr, phys_addr_t max_addr,
  1227. int nid)
  1228. {
  1229. void *ptr;
  1230. memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
  1231. __func__, (u64)size, (u64)align, nid, (u64)min_addr,
  1232. (u64)max_addr, (void *)_RET_IP_);
  1233. ptr = memblock_virt_alloc_internal(size, align,
  1234. min_addr, max_addr, nid);
  1235. if (ptr)
  1236. memset(ptr, 0, size);
  1237. return ptr;
  1238. }
  1239. /**
  1240. * memblock_virt_alloc_try_nid - allocate boot memory block with panicking
  1241. * @size: size of memory block to be allocated in bytes
  1242. * @align: alignment of the region and block's size
  1243. * @min_addr: the lower bound of the memory region from where the allocation
  1244. * is preferred (phys address)
  1245. * @max_addr: the upper bound of the memory region from where the allocation
  1246. * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
  1247. * allocate only from memory limited by memblock.current_limit value
  1248. * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
  1249. *
  1250. * Public panicking version of memblock_virt_alloc_try_nid_nopanic()
  1251. * which provides debug information (including caller info), if enabled,
  1252. * and panics if the request can not be satisfied.
  1253. *
  1254. * RETURNS:
  1255. * Virtual address of allocated memory block on success, NULL on failure.
  1256. */
  1257. void * __init memblock_virt_alloc_try_nid(
  1258. phys_addr_t size, phys_addr_t align,
  1259. phys_addr_t min_addr, phys_addr_t max_addr,
  1260. int nid)
  1261. {
  1262. void *ptr;
  1263. memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
  1264. __func__, (u64)size, (u64)align, nid, (u64)min_addr,
  1265. (u64)max_addr, (void *)_RET_IP_);
  1266. ptr = memblock_virt_alloc_internal(size, align,
  1267. min_addr, max_addr, nid);
  1268. if (ptr) {
  1269. memset(ptr, 0, size);
  1270. return ptr;
  1271. }
  1272. panic("%s: Failed to allocate %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx\n",
  1273. __func__, (u64)size, (u64)align, nid, (u64)min_addr,
  1274. (u64)max_addr);
  1275. return NULL;
  1276. }
  1277. /**
  1278. * __memblock_free_early - free boot memory block
  1279. * @base: phys starting address of the boot memory block
  1280. * @size: size of the boot memory block in bytes
  1281. *
  1282. * Free boot memory block previously allocated by memblock_virt_alloc_xx() API.
  1283. * The freeing memory will not be released to the buddy allocator.
  1284. */
  1285. void __init __memblock_free_early(phys_addr_t base, phys_addr_t size)
  1286. {
  1287. memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
  1288. __func__, (u64)base, (u64)base + size - 1,
  1289. (void *)_RET_IP_);
  1290. kmemleak_free_part_phys(base, size);
  1291. memblock_remove_range(&memblock.reserved, base, size);
  1292. }
  1293. /*
  1294. * __memblock_free_late - free bootmem block pages directly to buddy allocator
  1295. * @addr: phys starting address of the boot memory block
  1296. * @size: size of the boot memory block in bytes
  1297. *
  1298. * This is only useful when the bootmem allocator has already been torn
  1299. * down, but we are still initializing the system. Pages are released directly
  1300. * to the buddy allocator, no bootmem metadata is updated because it is gone.
  1301. */
  1302. void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
  1303. {
  1304. u64 cursor, end;
  1305. memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
  1306. __func__, (u64)base, (u64)base + size - 1,
  1307. (void *)_RET_IP_);
  1308. kmemleak_free_part_phys(base, size);
  1309. cursor = PFN_UP(base);
  1310. end = PFN_DOWN(base + size);
  1311. for (; cursor < end; cursor++) {
  1312. __free_pages_bootmem(pfn_to_page(cursor), cursor, 0);
  1313. totalram_pages++;
  1314. }
  1315. }
  1316. /*
  1317. * Remaining API functions
  1318. */
  1319. phys_addr_t __init_memblock memblock_phys_mem_size(void)
  1320. {
  1321. return memblock.memory.total_size;
  1322. }
  1323. phys_addr_t __init_memblock memblock_reserved_size(void)
  1324. {
  1325. return memblock.reserved.total_size;
  1326. }
  1327. phys_addr_t __init memblock_mem_size(unsigned long limit_pfn)
  1328. {
  1329. unsigned long pages = 0;
  1330. struct memblock_region *r;
  1331. unsigned long start_pfn, end_pfn;
  1332. for_each_memblock(memory, r) {
  1333. start_pfn = memblock_region_memory_base_pfn(r);
  1334. end_pfn = memblock_region_memory_end_pfn(r);
  1335. start_pfn = min_t(unsigned long, start_pfn, limit_pfn);
  1336. end_pfn = min_t(unsigned long, end_pfn, limit_pfn);
  1337. pages += end_pfn - start_pfn;
  1338. }
  1339. return PFN_PHYS(pages);
  1340. }
  1341. /* lowest address */
  1342. phys_addr_t __init_memblock memblock_start_of_DRAM(void)
  1343. {
  1344. return memblock.memory.regions[0].base;
  1345. }
  1346. phys_addr_t __init_memblock memblock_end_of_DRAM(void)
  1347. {
  1348. int idx = memblock.memory.cnt - 1;
  1349. return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
  1350. }
  1351. static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit)
  1352. {
  1353. phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX;
  1354. struct memblock_region *r;
  1355. /*
  1356. * translate the memory @limit size into the max address within one of
  1357. * the memory memblock regions, if the @limit exceeds the total size
  1358. * of those regions, max_addr will keep original value ULLONG_MAX
  1359. */
  1360. for_each_memblock(memory, r) {
  1361. if (limit <= r->size) {
  1362. max_addr = r->base + limit;
  1363. break;
  1364. }
  1365. limit -= r->size;
  1366. }
  1367. return max_addr;
  1368. }
  1369. void __init memblock_enforce_memory_limit(phys_addr_t limit)
  1370. {
  1371. phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX;
  1372. if (!limit)
  1373. return;
  1374. max_addr = __find_max_addr(limit);
  1375. /* @limit exceeds the total size of the memory, do nothing */
  1376. if (max_addr == (phys_addr_t)ULLONG_MAX)
  1377. return;
  1378. /* truncate both memory and reserved regions */
  1379. memblock_remove_range(&memblock.memory, max_addr,
  1380. (phys_addr_t)ULLONG_MAX);
  1381. memblock_remove_range(&memblock.reserved, max_addr,
  1382. (phys_addr_t)ULLONG_MAX);
  1383. }
  1384. void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size)
  1385. {
  1386. int start_rgn, end_rgn;
  1387. int i, ret;
  1388. if (!size)
  1389. return;
  1390. ret = memblock_isolate_range(&memblock.memory, base, size,
  1391. &start_rgn, &end_rgn);
  1392. if (ret)
  1393. return;
  1394. /* remove all the MAP regions */
  1395. for (i = memblock.memory.cnt - 1; i >= end_rgn; i--)
  1396. if (!memblock_is_nomap(&memblock.memory.regions[i]))
  1397. memblock_remove_region(&memblock.memory, i);
  1398. for (i = start_rgn - 1; i >= 0; i--)
  1399. if (!memblock_is_nomap(&memblock.memory.regions[i]))
  1400. memblock_remove_region(&memblock.memory, i);
  1401. /* truncate the reserved regions */
  1402. memblock_remove_range(&memblock.reserved, 0, base);
  1403. memblock_remove_range(&memblock.reserved,
  1404. base + size, (phys_addr_t)ULLONG_MAX);
  1405. }
  1406. void __init memblock_mem_limit_remove_map(phys_addr_t limit)
  1407. {
  1408. phys_addr_t max_addr;
  1409. if (!limit)
  1410. return;
  1411. max_addr = __find_max_addr(limit);
  1412. /* @limit exceeds the total size of the memory, do nothing */
  1413. if (max_addr == (phys_addr_t)ULLONG_MAX)
  1414. return;
  1415. memblock_cap_memory_range(0, max_addr);
  1416. }
  1417. static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
  1418. {
  1419. unsigned int left = 0, right = type->cnt;
  1420. do {
  1421. unsigned int mid = (right + left) / 2;
  1422. if (addr < type->regions[mid].base)
  1423. right = mid;
  1424. else if (addr >= (type->regions[mid].base +
  1425. type->regions[mid].size))
  1426. left = mid + 1;
  1427. else
  1428. return mid;
  1429. } while (left < right);
  1430. return -1;
  1431. }
  1432. bool __init memblock_is_reserved(phys_addr_t addr)
  1433. {
  1434. return memblock_search(&memblock.reserved, addr) != -1;
  1435. }
  1436. bool __init_memblock memblock_is_memory(phys_addr_t addr)
  1437. {
  1438. return memblock_search(&memblock.memory, addr) != -1;
  1439. }
  1440. int __init_memblock memblock_is_map_memory(phys_addr_t addr)
  1441. {
  1442. int i = memblock_search(&memblock.memory, addr);
  1443. if (i == -1)
  1444. return false;
  1445. return !memblock_is_nomap(&memblock.memory.regions[i]);
  1446. }
  1447. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  1448. int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
  1449. unsigned long *start_pfn, unsigned long *end_pfn)
  1450. {
  1451. struct memblock_type *type = &memblock.memory;
  1452. int mid = memblock_search(type, PFN_PHYS(pfn));
  1453. if (mid == -1)
  1454. return -1;
  1455. *start_pfn = PFN_DOWN(type->regions[mid].base);
  1456. *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
  1457. return type->regions[mid].nid;
  1458. }
  1459. #endif
  1460. /**
  1461. * memblock_is_region_memory - check if a region is a subset of memory
  1462. * @base: base of region to check
  1463. * @size: size of region to check
  1464. *
  1465. * Check if the region [@base, @base+@size) is a subset of a memory block.
  1466. *
  1467. * RETURNS:
  1468. * 0 if false, non-zero if true
  1469. */
  1470. int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
  1471. {
  1472. int idx = memblock_search(&memblock.memory, base);
  1473. phys_addr_t end = base + memblock_cap_size(base, &size);
  1474. if (idx == -1)
  1475. return 0;
  1476. return (memblock.memory.regions[idx].base +
  1477. memblock.memory.regions[idx].size) >= end;
  1478. }
  1479. /**
  1480. * memblock_is_region_reserved - check if a region intersects reserved memory
  1481. * @base: base of region to check
  1482. * @size: size of region to check
  1483. *
  1484. * Check if the region [@base, @base+@size) intersects a reserved memory block.
  1485. *
  1486. * RETURNS:
  1487. * True if they intersect, false if not.
  1488. */
  1489. bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
  1490. {
  1491. memblock_cap_size(base, &size);
  1492. return memblock_overlaps_region(&memblock.reserved, base, size);
  1493. }
  1494. void __init_memblock memblock_trim_memory(phys_addr_t align)
  1495. {
  1496. phys_addr_t start, end, orig_start, orig_end;
  1497. struct memblock_region *r;
  1498. for_each_memblock(memory, r) {
  1499. orig_start = r->base;
  1500. orig_end = r->base + r->size;
  1501. start = round_up(orig_start, align);
  1502. end = round_down(orig_end, align);
  1503. if (start == orig_start && end == orig_end)
  1504. continue;
  1505. if (start < end) {
  1506. r->base = start;
  1507. r->size = end - start;
  1508. } else {
  1509. memblock_remove_region(&memblock.memory,
  1510. r - memblock.memory.regions);
  1511. r--;
  1512. }
  1513. }
  1514. }
  1515. void __init_memblock memblock_set_current_limit(phys_addr_t limit)
  1516. {
  1517. memblock.current_limit = limit;
  1518. }
  1519. phys_addr_t __init_memblock memblock_get_current_limit(void)
  1520. {
  1521. return memblock.current_limit;
  1522. }
  1523. static void __init_memblock memblock_dump(struct memblock_type *type)
  1524. {
  1525. phys_addr_t base, end, size;
  1526. unsigned long flags;
  1527. int idx;
  1528. struct memblock_region *rgn;
  1529. pr_info(" %s.cnt = 0x%lx\n", type->name, type->cnt);
  1530. for_each_memblock_type(idx, type, rgn) {
  1531. char nid_buf[32] = "";
  1532. base = rgn->base;
  1533. size = rgn->size;
  1534. end = base + size - 1;
  1535. flags = rgn->flags;
  1536. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  1537. if (memblock_get_region_node(rgn) != MAX_NUMNODES)
  1538. snprintf(nid_buf, sizeof(nid_buf), " on node %d",
  1539. memblock_get_region_node(rgn));
  1540. #endif
  1541. pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#lx\n",
  1542. type->name, idx, &base, &end, &size, nid_buf, flags);
  1543. }
  1544. }
  1545. extern unsigned long __init_memblock
  1546. memblock_reserved_memory_within(phys_addr_t start_addr, phys_addr_t end_addr)
  1547. {
  1548. struct memblock_region *rgn;
  1549. unsigned long size = 0;
  1550. int idx;
  1551. for_each_memblock_type(idx, (&memblock.reserved), rgn) {
  1552. phys_addr_t start, end;
  1553. if (rgn->base + rgn->size < start_addr)
  1554. continue;
  1555. if (rgn->base > end_addr)
  1556. continue;
  1557. start = rgn->base;
  1558. end = start + rgn->size;
  1559. size += end - start;
  1560. }
  1561. return size;
  1562. }
  1563. void __init_memblock __memblock_dump_all(void)
  1564. {
  1565. pr_info("MEMBLOCK configuration:\n");
  1566. pr_info(" memory size = %pa reserved size = %pa\n",
  1567. &memblock.memory.total_size,
  1568. &memblock.reserved.total_size);
  1569. memblock_dump(&memblock.memory);
  1570. memblock_dump(&memblock.reserved);
  1571. #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
  1572. memblock_dump(&memblock.physmem);
  1573. #endif
  1574. }
  1575. void __init memblock_allow_resize(void)
  1576. {
  1577. memblock_can_resize = 1;
  1578. }
  1579. static int __init early_memblock(char *p)
  1580. {
  1581. if (p && strstr(p, "debug"))
  1582. memblock_debug = 1;
  1583. return 0;
  1584. }
  1585. early_param("memblock", early_memblock);
  1586. #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK)
  1587. static int memblock_debug_show(struct seq_file *m, void *private)
  1588. {
  1589. struct memblock_type *type = m->private;
  1590. struct memblock_region *reg;
  1591. int i;
  1592. phys_addr_t end;
  1593. for (i = 0; i < type->cnt; i++) {
  1594. reg = &type->regions[i];
  1595. end = reg->base + reg->size - 1;
  1596. seq_printf(m, "%4d: ", i);
  1597. seq_printf(m, "%pa..%pa\n", &reg->base, &end);
  1598. }
  1599. return 0;
  1600. }
  1601. static int memblock_debug_open(struct inode *inode, struct file *file)
  1602. {
  1603. return single_open(file, memblock_debug_show, inode->i_private);
  1604. }
  1605. static const struct file_operations memblock_debug_fops = {
  1606. .open = memblock_debug_open,
  1607. .read = seq_read,
  1608. .llseek = seq_lseek,
  1609. .release = single_release,
  1610. };
  1611. static int __init memblock_init_debugfs(void)
  1612. {
  1613. struct dentry *root = debugfs_create_dir("memblock", NULL);
  1614. if (!root)
  1615. return -ENXIO;
  1616. debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops);
  1617. debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops);
  1618. #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
  1619. debugfs_create_file("physmem", S_IRUGO, root, &memblock.physmem, &memblock_debug_fops);
  1620. #endif
  1621. return 0;
  1622. }
  1623. __initcall(memblock_init_debugfs);
  1624. #endif /* CONFIG_DEBUG_FS */