memblock.c 51 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825
  1. /*
  2. * Procedures for maintaining information about logical memory blocks.
  3. *
  4. * Peter Bergner, IBM Corp. June 2001.
  5. * Copyright (C) 2001 Peter Bergner.
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/slab.h>
  14. #include <linux/init.h>
  15. #include <linux/bitops.h>
  16. #include <linux/poison.h>
  17. #include <linux/pfn.h>
  18. #include <linux/debugfs.h>
  19. #include <linux/kmemleak.h>
  20. #include <linux/seq_file.h>
  21. #include <linux/memblock.h>
  22. #include <asm/sections.h>
  23. #include <linux/io.h>
  24. #include "internal.h"
  25. static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
  26. static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
  27. #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
  28. static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock;
  29. #endif
  30. struct memblock memblock __initdata_memblock = {
  31. .memory.regions = memblock_memory_init_regions,
  32. .memory.cnt = 1, /* empty dummy entry */
  33. .memory.max = INIT_MEMBLOCK_REGIONS,
  34. .memory.name = "memory",
  35. .reserved.regions = memblock_reserved_init_regions,
  36. .reserved.cnt = 1, /* empty dummy entry */
  37. .reserved.max = INIT_MEMBLOCK_REGIONS,
  38. .reserved.name = "reserved",
  39. #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
  40. .physmem.regions = memblock_physmem_init_regions,
  41. .physmem.cnt = 1, /* empty dummy entry */
  42. .physmem.max = INIT_PHYSMEM_REGIONS,
  43. .physmem.name = "physmem",
  44. #endif
  45. .bottom_up = false,
  46. .current_limit = MEMBLOCK_ALLOC_ANYWHERE,
  47. };
  48. int memblock_debug __initdata_memblock;
  49. static bool system_has_some_mirror __initdata_memblock = false;
  50. static int memblock_can_resize __initdata_memblock;
  51. static int memblock_memory_in_slab __initdata_memblock = 0;
  52. static int memblock_reserved_in_slab __initdata_memblock = 0;
  53. ulong __init_memblock choose_memblock_flags(void)
  54. {
  55. return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
  56. }
  57. /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
  58. static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
  59. {
  60. return *size = min(*size, PHYS_ADDR_MAX - base);
  61. }
  62. /*
  63. * Address comparison utilities
  64. */
  65. static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
  66. phys_addr_t base2, phys_addr_t size2)
  67. {
  68. return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
  69. }
  70. bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
  71. phys_addr_t base, phys_addr_t size)
  72. {
  73. unsigned long i;
  74. for (i = 0; i < type->cnt; i++)
  75. if (memblock_addrs_overlap(base, size, type->regions[i].base,
  76. type->regions[i].size))
  77. break;
  78. return i < type->cnt;
  79. }
  80. /*
  81. * __memblock_find_range_bottom_up - find free area utility in bottom-up
  82. * @start: start of candidate range
  83. * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
  84. * @size: size of free area to find
  85. * @align: alignment of free area to find
  86. * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
  87. * @flags: pick from blocks based on memory attributes
  88. *
  89. * Utility called from memblock_find_in_range_node(), find free area bottom-up.
  90. *
  91. * RETURNS:
  92. * Found address on success, 0 on failure.
  93. */
  94. static phys_addr_t __init_memblock
  95. __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
  96. phys_addr_t size, phys_addr_t align, int nid,
  97. ulong flags)
  98. {
  99. phys_addr_t this_start, this_end, cand;
  100. u64 i;
  101. for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) {
  102. this_start = clamp(this_start, start, end);
  103. this_end = clamp(this_end, start, end);
  104. cand = round_up(this_start, align);
  105. if (cand < this_end && this_end - cand >= size)
  106. return cand;
  107. }
  108. return 0;
  109. }
  110. /**
  111. * __memblock_find_range_top_down - find free area utility, in top-down
  112. * @start: start of candidate range
  113. * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
  114. * @size: size of free area to find
  115. * @align: alignment of free area to find
  116. * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
  117. * @flags: pick from blocks based on memory attributes
  118. *
  119. * Utility called from memblock_find_in_range_node(), find free area top-down.
  120. *
  121. * RETURNS:
  122. * Found address on success, 0 on failure.
  123. */
  124. static phys_addr_t __init_memblock
  125. __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
  126. phys_addr_t size, phys_addr_t align, int nid,
  127. ulong flags)
  128. {
  129. phys_addr_t this_start, this_end, cand;
  130. u64 i;
  131. for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end,
  132. NULL) {
  133. this_start = clamp(this_start, start, end);
  134. this_end = clamp(this_end, start, end);
  135. if (this_end < size)
  136. continue;
  137. cand = round_down(this_end - size, align);
  138. if (cand >= this_start)
  139. return cand;
  140. }
  141. return 0;
  142. }
  143. /**
  144. * memblock_find_in_range_node - find free area in given range and node
  145. * @size: size of free area to find
  146. * @align: alignment of free area to find
  147. * @start: start of candidate range
  148. * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
  149. * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
  150. * @flags: pick from blocks based on memory attributes
  151. *
  152. * Find @size free area aligned to @align in the specified range and node.
  153. *
  154. * When allocation direction is bottom-up, the @start should be greater
  155. * than the end of the kernel image. Otherwise, it will be trimmed. The
  156. * reason is that we want the bottom-up allocation just near the kernel
  157. * image so it is highly likely that the allocated memory and the kernel
  158. * will reside in the same node.
  159. *
  160. * If bottom-up allocation failed, will try to allocate memory top-down.
  161. *
  162. * RETURNS:
  163. * Found address on success, 0 on failure.
  164. */
  165. phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
  166. phys_addr_t align, phys_addr_t start,
  167. phys_addr_t end, int nid, ulong flags)
  168. {
  169. phys_addr_t kernel_end, ret;
  170. /* pump up @end */
  171. if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
  172. end = memblock.current_limit;
  173. /* avoid allocating the first page */
  174. start = max_t(phys_addr_t, start, PAGE_SIZE);
  175. end = max(start, end);
  176. kernel_end = __pa_symbol(_end);
  177. /*
  178. * try bottom-up allocation only when bottom-up mode
  179. * is set and @end is above the kernel image.
  180. */
  181. if (memblock_bottom_up() && end > kernel_end) {
  182. phys_addr_t bottom_up_start;
  183. /* make sure we will allocate above the kernel */
  184. bottom_up_start = max(start, kernel_end);
  185. /* ok, try bottom-up allocation first */
  186. ret = __memblock_find_range_bottom_up(bottom_up_start, end,
  187. size, align, nid, flags);
  188. if (ret)
  189. return ret;
  190. /*
  191. * we always limit bottom-up allocation above the kernel,
  192. * but top-down allocation doesn't have the limit, so
  193. * retrying top-down allocation may succeed when bottom-up
  194. * allocation failed.
  195. *
  196. * bottom-up allocation is expected to be fail very rarely,
  197. * so we use WARN_ONCE() here to see the stack trace if
  198. * fail happens.
  199. */
  200. WARN_ONCE(IS_ENABLED(CONFIG_MEMORY_HOTREMOVE),
  201. "memblock: bottom-up allocation failed, memory hotremove may be affected\n");
  202. }
  203. return __memblock_find_range_top_down(start, end, size, align, nid,
  204. flags);
  205. }
  206. /**
  207. * memblock_find_in_range - find free area in given range
  208. * @start: start of candidate range
  209. * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
  210. * @size: size of free area to find
  211. * @align: alignment of free area to find
  212. *
  213. * Find @size free area aligned to @align in the specified range.
  214. *
  215. * RETURNS:
  216. * Found address on success, 0 on failure.
  217. */
  218. phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
  219. phys_addr_t end, phys_addr_t size,
  220. phys_addr_t align)
  221. {
  222. phys_addr_t ret;
  223. ulong flags = choose_memblock_flags();
  224. again:
  225. ret = memblock_find_in_range_node(size, align, start, end,
  226. NUMA_NO_NODE, flags);
  227. if (!ret && (flags & MEMBLOCK_MIRROR)) {
  228. pr_warn("Could not allocate %pap bytes of mirrored memory\n",
  229. &size);
  230. flags &= ~MEMBLOCK_MIRROR;
  231. goto again;
  232. }
  233. return ret;
  234. }
  235. static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
  236. {
  237. type->total_size -= type->regions[r].size;
  238. memmove(&type->regions[r], &type->regions[r + 1],
  239. (type->cnt - (r + 1)) * sizeof(type->regions[r]));
  240. type->cnt--;
  241. /* Special case for empty arrays */
  242. if (type->cnt == 0) {
  243. WARN_ON(type->total_size != 0);
  244. type->cnt = 1;
  245. type->regions[0].base = 0;
  246. type->regions[0].size = 0;
  247. type->regions[0].flags = 0;
  248. memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
  249. }
  250. }
  251. #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
  252. /**
  253. * Discard memory and reserved arrays if they were allocated
  254. */
  255. void __init memblock_discard(void)
  256. {
  257. phys_addr_t addr, size;
  258. if (memblock.reserved.regions != memblock_reserved_init_regions) {
  259. addr = __pa(memblock.reserved.regions);
  260. size = PAGE_ALIGN(sizeof(struct memblock_region) *
  261. memblock.reserved.max);
  262. __memblock_free_late(addr, size);
  263. }
  264. if (memblock.memory.regions != memblock_memory_init_regions) {
  265. addr = __pa(memblock.memory.regions);
  266. size = PAGE_ALIGN(sizeof(struct memblock_region) *
  267. memblock.memory.max);
  268. __memblock_free_late(addr, size);
  269. }
  270. }
  271. #endif
  272. /**
  273. * memblock_double_array - double the size of the memblock regions array
  274. * @type: memblock type of the regions array being doubled
  275. * @new_area_start: starting address of memory range to avoid overlap with
  276. * @new_area_size: size of memory range to avoid overlap with
  277. *
  278. * Double the size of the @type regions array. If memblock is being used to
  279. * allocate memory for a new reserved regions array and there is a previously
  280. * allocated memory range [@new_area_start,@new_area_start+@new_area_size]
  281. * waiting to be reserved, ensure the memory used by the new array does
  282. * not overlap.
  283. *
  284. * RETURNS:
  285. * 0 on success, -1 on failure.
  286. */
  287. static int __init_memblock memblock_double_array(struct memblock_type *type,
  288. phys_addr_t new_area_start,
  289. phys_addr_t new_area_size)
  290. {
  291. struct memblock_region *new_array, *old_array;
  292. phys_addr_t old_alloc_size, new_alloc_size;
  293. phys_addr_t old_size, new_size, addr;
  294. int use_slab = slab_is_available();
  295. int *in_slab;
  296. /* We don't allow resizing until we know about the reserved regions
  297. * of memory that aren't suitable for allocation
  298. */
  299. if (!memblock_can_resize)
  300. return -1;
  301. /* Calculate new doubled size */
  302. old_size = type->max * sizeof(struct memblock_region);
  303. new_size = old_size << 1;
  304. /*
  305. * We need to allocated new one align to PAGE_SIZE,
  306. * so we can free them completely later.
  307. */
  308. old_alloc_size = PAGE_ALIGN(old_size);
  309. new_alloc_size = PAGE_ALIGN(new_size);
  310. /* Retrieve the slab flag */
  311. if (type == &memblock.memory)
  312. in_slab = &memblock_memory_in_slab;
  313. else
  314. in_slab = &memblock_reserved_in_slab;
  315. /* Try to find some space for it.
  316. *
  317. * WARNING: We assume that either slab_is_available() and we use it or
  318. * we use MEMBLOCK for allocations. That means that this is unsafe to
  319. * use when bootmem is currently active (unless bootmem itself is
  320. * implemented on top of MEMBLOCK which isn't the case yet)
  321. *
  322. * This should however not be an issue for now, as we currently only
  323. * call into MEMBLOCK while it's still active, or much later when slab
  324. * is active for memory hotplug operations
  325. */
  326. if (use_slab) {
  327. new_array = kmalloc(new_size, GFP_KERNEL);
  328. addr = new_array ? __pa(new_array) : 0;
  329. } else {
  330. /* only exclude range when trying to double reserved.regions */
  331. if (type != &memblock.reserved)
  332. new_area_start = new_area_size = 0;
  333. addr = memblock_find_in_range(new_area_start + new_area_size,
  334. memblock.current_limit,
  335. new_alloc_size, PAGE_SIZE);
  336. if (!addr && new_area_size)
  337. addr = memblock_find_in_range(0,
  338. min(new_area_start, memblock.current_limit),
  339. new_alloc_size, PAGE_SIZE);
  340. new_array = addr ? __va(addr) : NULL;
  341. }
  342. if (!addr) {
  343. pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
  344. type->name, type->max, type->max * 2);
  345. return -1;
  346. }
  347. memblock_dbg("memblock: %s is doubled to %ld at [%#010llx-%#010llx]",
  348. type->name, type->max * 2, (u64)addr,
  349. (u64)addr + new_size - 1);
  350. /*
  351. * Found space, we now need to move the array over before we add the
  352. * reserved region since it may be our reserved array itself that is
  353. * full.
  354. */
  355. memcpy(new_array, type->regions, old_size);
  356. memset(new_array + type->max, 0, old_size);
  357. old_array = type->regions;
  358. type->regions = new_array;
  359. type->max <<= 1;
  360. /* Free old array. We needn't free it if the array is the static one */
  361. if (*in_slab)
  362. kfree(old_array);
  363. else if (old_array != memblock_memory_init_regions &&
  364. old_array != memblock_reserved_init_regions)
  365. memblock_free(__pa(old_array), old_alloc_size);
  366. /*
  367. * Reserve the new array if that comes from the memblock. Otherwise, we
  368. * needn't do it
  369. */
  370. if (!use_slab)
  371. BUG_ON(memblock_reserve(addr, new_alloc_size));
  372. /* Update slab flag */
  373. *in_slab = use_slab;
  374. return 0;
  375. }
  376. /**
  377. * memblock_merge_regions - merge neighboring compatible regions
  378. * @type: memblock type to scan
  379. *
  380. * Scan @type and merge neighboring compatible regions.
  381. */
  382. static void __init_memblock memblock_merge_regions(struct memblock_type *type)
  383. {
  384. int i = 0;
  385. /* cnt never goes below 1 */
  386. while (i < type->cnt - 1) {
  387. struct memblock_region *this = &type->regions[i];
  388. struct memblock_region *next = &type->regions[i + 1];
  389. if (this->base + this->size != next->base ||
  390. memblock_get_region_node(this) !=
  391. memblock_get_region_node(next) ||
  392. this->flags != next->flags) {
  393. BUG_ON(this->base + this->size > next->base);
  394. i++;
  395. continue;
  396. }
  397. this->size += next->size;
  398. /* move forward from next + 1, index of which is i + 2 */
  399. memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
  400. type->cnt--;
  401. }
  402. }
  403. /**
  404. * memblock_insert_region - insert new memblock region
  405. * @type: memblock type to insert into
  406. * @idx: index for the insertion point
  407. * @base: base address of the new region
  408. * @size: size of the new region
  409. * @nid: node id of the new region
  410. * @flags: flags of the new region
  411. *
  412. * Insert new memblock region [@base,@base+@size) into @type at @idx.
  413. * @type must already have extra room to accommodate the new region.
  414. */
  415. static void __init_memblock memblock_insert_region(struct memblock_type *type,
  416. int idx, phys_addr_t base,
  417. phys_addr_t size,
  418. int nid, unsigned long flags)
  419. {
  420. struct memblock_region *rgn = &type->regions[idx];
  421. BUG_ON(type->cnt >= type->max);
  422. memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
  423. rgn->base = base;
  424. rgn->size = size;
  425. rgn->flags = flags;
  426. memblock_set_region_node(rgn, nid);
  427. type->cnt++;
  428. type->total_size += size;
  429. }
  430. /**
  431. * memblock_add_range - add new memblock region
  432. * @type: memblock type to add new region into
  433. * @base: base address of the new region
  434. * @size: size of the new region
  435. * @nid: nid of the new region
  436. * @flags: flags of the new region
  437. *
  438. * Add new memblock region [@base,@base+@size) into @type. The new region
  439. * is allowed to overlap with existing ones - overlaps don't affect already
  440. * existing regions. @type is guaranteed to be minimal (all neighbouring
  441. * compatible regions are merged) after the addition.
  442. *
  443. * RETURNS:
  444. * 0 on success, -errno on failure.
  445. */
  446. int __init_memblock memblock_add_range(struct memblock_type *type,
  447. phys_addr_t base, phys_addr_t size,
  448. int nid, unsigned long flags)
  449. {
  450. bool insert = false;
  451. phys_addr_t obase = base;
  452. phys_addr_t end = base + memblock_cap_size(base, &size);
  453. int idx, nr_new;
  454. struct memblock_region *rgn;
  455. if (!size)
  456. return 0;
  457. /* special case for empty array */
  458. if (type->regions[0].size == 0) {
  459. WARN_ON(type->cnt != 1 || type->total_size);
  460. type->regions[0].base = base;
  461. type->regions[0].size = size;
  462. type->regions[0].flags = flags;
  463. memblock_set_region_node(&type->regions[0], nid);
  464. type->total_size = size;
  465. return 0;
  466. }
  467. repeat:
  468. /*
  469. * The following is executed twice. Once with %false @insert and
  470. * then with %true. The first counts the number of regions needed
  471. * to accommodate the new area. The second actually inserts them.
  472. */
  473. base = obase;
  474. nr_new = 0;
  475. for_each_memblock_type(idx, type, rgn) {
  476. phys_addr_t rbase = rgn->base;
  477. phys_addr_t rend = rbase + rgn->size;
  478. if (rbase >= end)
  479. break;
  480. if (rend <= base)
  481. continue;
  482. /*
  483. * @rgn overlaps. If it separates the lower part of new
  484. * area, insert that portion.
  485. */
  486. if (rbase > base) {
  487. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  488. WARN_ON(nid != memblock_get_region_node(rgn));
  489. #endif
  490. WARN_ON(flags != rgn->flags);
  491. nr_new++;
  492. if (insert)
  493. memblock_insert_region(type, idx++, base,
  494. rbase - base, nid,
  495. flags);
  496. }
  497. /* area below @rend is dealt with, forget about it */
  498. base = min(rend, end);
  499. }
  500. /* insert the remaining portion */
  501. if (base < end) {
  502. nr_new++;
  503. if (insert)
  504. memblock_insert_region(type, idx, base, end - base,
  505. nid, flags);
  506. }
  507. if (!nr_new)
  508. return 0;
  509. /*
  510. * If this was the first round, resize array and repeat for actual
  511. * insertions; otherwise, merge and return.
  512. */
  513. if (!insert) {
  514. while (type->cnt + nr_new > type->max)
  515. if (memblock_double_array(type, obase, size) < 0)
  516. return -ENOMEM;
  517. insert = true;
  518. goto repeat;
  519. } else {
  520. memblock_merge_regions(type);
  521. return 0;
  522. }
  523. }
  524. int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
  525. int nid)
  526. {
  527. return memblock_add_range(&memblock.memory, base, size, nid, 0);
  528. }
  529. int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
  530. {
  531. phys_addr_t end = base + size - 1;
  532. memblock_dbg("memblock_add: [%pa-%pa] %pF\n",
  533. &base, &end, (void *)_RET_IP_);
  534. return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0);
  535. }
  536. /**
  537. * memblock_isolate_range - isolate given range into disjoint memblocks
  538. * @type: memblock type to isolate range for
  539. * @base: base of range to isolate
  540. * @size: size of range to isolate
  541. * @start_rgn: out parameter for the start of isolated region
  542. * @end_rgn: out parameter for the end of isolated region
  543. *
  544. * Walk @type and ensure that regions don't cross the boundaries defined by
  545. * [@base,@base+@size). Crossing regions are split at the boundaries,
  546. * which may create at most two more regions. The index of the first
  547. * region inside the range is returned in *@start_rgn and end in *@end_rgn.
  548. *
  549. * RETURNS:
  550. * 0 on success, -errno on failure.
  551. */
  552. static int __init_memblock memblock_isolate_range(struct memblock_type *type,
  553. phys_addr_t base, phys_addr_t size,
  554. int *start_rgn, int *end_rgn)
  555. {
  556. phys_addr_t end = base + memblock_cap_size(base, &size);
  557. int idx;
  558. struct memblock_region *rgn;
  559. *start_rgn = *end_rgn = 0;
  560. if (!size)
  561. return 0;
  562. /* we'll create at most two more regions */
  563. while (type->cnt + 2 > type->max)
  564. if (memblock_double_array(type, base, size) < 0)
  565. return -ENOMEM;
  566. for_each_memblock_type(idx, type, rgn) {
  567. phys_addr_t rbase = rgn->base;
  568. phys_addr_t rend = rbase + rgn->size;
  569. if (rbase >= end)
  570. break;
  571. if (rend <= base)
  572. continue;
  573. if (rbase < base) {
  574. /*
  575. * @rgn intersects from below. Split and continue
  576. * to process the next region - the new top half.
  577. */
  578. rgn->base = base;
  579. rgn->size -= base - rbase;
  580. type->total_size -= base - rbase;
  581. memblock_insert_region(type, idx, rbase, base - rbase,
  582. memblock_get_region_node(rgn),
  583. rgn->flags);
  584. } else if (rend > end) {
  585. /*
  586. * @rgn intersects from above. Split and redo the
  587. * current region - the new bottom half.
  588. */
  589. rgn->base = end;
  590. rgn->size -= end - rbase;
  591. type->total_size -= end - rbase;
  592. memblock_insert_region(type, idx--, rbase, end - rbase,
  593. memblock_get_region_node(rgn),
  594. rgn->flags);
  595. } else {
  596. /* @rgn is fully contained, record it */
  597. if (!*end_rgn)
  598. *start_rgn = idx;
  599. *end_rgn = idx + 1;
  600. }
  601. }
  602. return 0;
  603. }
  604. static int __init_memblock memblock_remove_range(struct memblock_type *type,
  605. phys_addr_t base, phys_addr_t size)
  606. {
  607. int start_rgn, end_rgn;
  608. int i, ret;
  609. ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
  610. if (ret)
  611. return ret;
  612. for (i = end_rgn - 1; i >= start_rgn; i--)
  613. memblock_remove_region(type, i);
  614. return 0;
  615. }
  616. int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
  617. {
  618. phys_addr_t end = base + size - 1;
  619. memblock_dbg("memblock_remove: [%pa-%pa] %pS\n",
  620. &base, &end, (void *)_RET_IP_);
  621. return memblock_remove_range(&memblock.memory, base, size);
  622. }
  623. int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
  624. {
  625. phys_addr_t end = base + size - 1;
  626. memblock_dbg(" memblock_free: [%pa-%pa] %pF\n",
  627. &base, &end, (void *)_RET_IP_);
  628. kmemleak_free_part_phys(base, size);
  629. return memblock_remove_range(&memblock.reserved, base, size);
  630. }
  631. int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
  632. {
  633. phys_addr_t end = base + size - 1;
  634. memblock_dbg("memblock_reserve: [%pa-%pa] %pF\n",
  635. &base, &end, (void *)_RET_IP_);
  636. return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0);
  637. }
  638. /**
  639. *
  640. * This function isolates region [@base, @base + @size), and sets/clears flag
  641. *
  642. * Return 0 on success, -errno on failure.
  643. */
  644. static int __init_memblock memblock_setclr_flag(phys_addr_t base,
  645. phys_addr_t size, int set, int flag)
  646. {
  647. struct memblock_type *type = &memblock.memory;
  648. int i, ret, start_rgn, end_rgn;
  649. ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
  650. if (ret)
  651. return ret;
  652. for (i = start_rgn; i < end_rgn; i++)
  653. if (set)
  654. memblock_set_region_flags(&type->regions[i], flag);
  655. else
  656. memblock_clear_region_flags(&type->regions[i], flag);
  657. memblock_merge_regions(type);
  658. return 0;
  659. }
  660. /**
  661. * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
  662. * @base: the base phys addr of the region
  663. * @size: the size of the region
  664. *
  665. * Return 0 on success, -errno on failure.
  666. */
  667. int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
  668. {
  669. return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG);
  670. }
  671. /**
  672. * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
  673. * @base: the base phys addr of the region
  674. * @size: the size of the region
  675. *
  676. * Return 0 on success, -errno on failure.
  677. */
  678. int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
  679. {
  680. return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG);
  681. }
  682. /**
  683. * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR.
  684. * @base: the base phys addr of the region
  685. * @size: the size of the region
  686. *
  687. * Return 0 on success, -errno on failure.
  688. */
  689. int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
  690. {
  691. system_has_some_mirror = true;
  692. return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR);
  693. }
  694. /**
  695. * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP.
  696. * @base: the base phys addr of the region
  697. * @size: the size of the region
  698. *
  699. * Return 0 on success, -errno on failure.
  700. */
  701. int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
  702. {
  703. return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP);
  704. }
  705. /**
  706. * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region.
  707. * @base: the base phys addr of the region
  708. * @size: the size of the region
  709. *
  710. * Return 0 on success, -errno on failure.
  711. */
  712. int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size)
  713. {
  714. return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP);
  715. }
  716. /**
  717. * __next_reserved_mem_region - next function for for_each_reserved_region()
  718. * @idx: pointer to u64 loop variable
  719. * @out_start: ptr to phys_addr_t for start address of the region, can be %NULL
  720. * @out_end: ptr to phys_addr_t for end address of the region, can be %NULL
  721. *
  722. * Iterate over all reserved memory regions.
  723. */
  724. void __init_memblock __next_reserved_mem_region(u64 *idx,
  725. phys_addr_t *out_start,
  726. phys_addr_t *out_end)
  727. {
  728. struct memblock_type *type = &memblock.reserved;
  729. if (*idx < type->cnt) {
  730. struct memblock_region *r = &type->regions[*idx];
  731. phys_addr_t base = r->base;
  732. phys_addr_t size = r->size;
  733. if (out_start)
  734. *out_start = base;
  735. if (out_end)
  736. *out_end = base + size - 1;
  737. *idx += 1;
  738. return;
  739. }
  740. /* signal end of iteration */
  741. *idx = ULLONG_MAX;
  742. }
  743. /**
  744. * __next__mem_range - next function for for_each_free_mem_range() etc.
  745. * @idx: pointer to u64 loop variable
  746. * @nid: node selector, %NUMA_NO_NODE for all nodes
  747. * @flags: pick from blocks based on memory attributes
  748. * @type_a: pointer to memblock_type from where the range is taken
  749. * @type_b: pointer to memblock_type which excludes memory from being taken
  750. * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
  751. * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
  752. * @out_nid: ptr to int for nid of the range, can be %NULL
  753. *
  754. * Find the first area from *@idx which matches @nid, fill the out
  755. * parameters, and update *@idx for the next iteration. The lower 32bit of
  756. * *@idx contains index into type_a and the upper 32bit indexes the
  757. * areas before each region in type_b. For example, if type_b regions
  758. * look like the following,
  759. *
  760. * 0:[0-16), 1:[32-48), 2:[128-130)
  761. *
  762. * The upper 32bit indexes the following regions.
  763. *
  764. * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
  765. *
  766. * As both region arrays are sorted, the function advances the two indices
  767. * in lockstep and returns each intersection.
  768. */
  769. void __init_memblock __next_mem_range(u64 *idx, int nid, ulong flags,
  770. struct memblock_type *type_a,
  771. struct memblock_type *type_b,
  772. phys_addr_t *out_start,
  773. phys_addr_t *out_end, int *out_nid)
  774. {
  775. int idx_a = *idx & 0xffffffff;
  776. int idx_b = *idx >> 32;
  777. if (WARN_ONCE(nid == MAX_NUMNODES,
  778. "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
  779. nid = NUMA_NO_NODE;
  780. for (; idx_a < type_a->cnt; idx_a++) {
  781. struct memblock_region *m = &type_a->regions[idx_a];
  782. phys_addr_t m_start = m->base;
  783. phys_addr_t m_end = m->base + m->size;
  784. int m_nid = memblock_get_region_node(m);
  785. /* only memory regions are associated with nodes, check it */
  786. if (nid != NUMA_NO_NODE && nid != m_nid)
  787. continue;
  788. /* skip hotpluggable memory regions if needed */
  789. if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
  790. continue;
  791. /* if we want mirror memory skip non-mirror memory regions */
  792. if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
  793. continue;
  794. /* skip nomap memory unless we were asked for it explicitly */
  795. if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
  796. continue;
  797. if (!type_b) {
  798. if (out_start)
  799. *out_start = m_start;
  800. if (out_end)
  801. *out_end = m_end;
  802. if (out_nid)
  803. *out_nid = m_nid;
  804. idx_a++;
  805. *idx = (u32)idx_a | (u64)idx_b << 32;
  806. return;
  807. }
  808. /* scan areas before each reservation */
  809. for (; idx_b < type_b->cnt + 1; idx_b++) {
  810. struct memblock_region *r;
  811. phys_addr_t r_start;
  812. phys_addr_t r_end;
  813. r = &type_b->regions[idx_b];
  814. r_start = idx_b ? r[-1].base + r[-1].size : 0;
  815. r_end = idx_b < type_b->cnt ?
  816. r->base : PHYS_ADDR_MAX;
  817. /*
  818. * if idx_b advanced past idx_a,
  819. * break out to advance idx_a
  820. */
  821. if (r_start >= m_end)
  822. break;
  823. /* if the two regions intersect, we're done */
  824. if (m_start < r_end) {
  825. if (out_start)
  826. *out_start =
  827. max(m_start, r_start);
  828. if (out_end)
  829. *out_end = min(m_end, r_end);
  830. if (out_nid)
  831. *out_nid = m_nid;
  832. /*
  833. * The region which ends first is
  834. * advanced for the next iteration.
  835. */
  836. if (m_end <= r_end)
  837. idx_a++;
  838. else
  839. idx_b++;
  840. *idx = (u32)idx_a | (u64)idx_b << 32;
  841. return;
  842. }
  843. }
  844. }
  845. /* signal end of iteration */
  846. *idx = ULLONG_MAX;
  847. }
  848. /**
  849. * __next_mem_range_rev - generic next function for for_each_*_range_rev()
  850. *
  851. * Finds the next range from type_a which is not marked as unsuitable
  852. * in type_b.
  853. *
  854. * @idx: pointer to u64 loop variable
  855. * @nid: node selector, %NUMA_NO_NODE for all nodes
  856. * @flags: pick from blocks based on memory attributes
  857. * @type_a: pointer to memblock_type from where the range is taken
  858. * @type_b: pointer to memblock_type which excludes memory from being taken
  859. * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
  860. * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
  861. * @out_nid: ptr to int for nid of the range, can be %NULL
  862. *
  863. * Reverse of __next_mem_range().
  864. */
  865. void __init_memblock __next_mem_range_rev(u64 *idx, int nid, ulong flags,
  866. struct memblock_type *type_a,
  867. struct memblock_type *type_b,
  868. phys_addr_t *out_start,
  869. phys_addr_t *out_end, int *out_nid)
  870. {
  871. int idx_a = *idx & 0xffffffff;
  872. int idx_b = *idx >> 32;
  873. if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
  874. nid = NUMA_NO_NODE;
  875. if (*idx == (u64)ULLONG_MAX) {
  876. idx_a = type_a->cnt - 1;
  877. if (type_b != NULL)
  878. idx_b = type_b->cnt;
  879. else
  880. idx_b = 0;
  881. }
  882. for (; idx_a >= 0; idx_a--) {
  883. struct memblock_region *m = &type_a->regions[idx_a];
  884. phys_addr_t m_start = m->base;
  885. phys_addr_t m_end = m->base + m->size;
  886. int m_nid = memblock_get_region_node(m);
  887. /* only memory regions are associated with nodes, check it */
  888. if (nid != NUMA_NO_NODE && nid != m_nid)
  889. continue;
  890. /* skip hotpluggable memory regions if needed */
  891. if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
  892. continue;
  893. /* if we want mirror memory skip non-mirror memory regions */
  894. if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
  895. continue;
  896. /* skip nomap memory unless we were asked for it explicitly */
  897. if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
  898. continue;
  899. if (!type_b) {
  900. if (out_start)
  901. *out_start = m_start;
  902. if (out_end)
  903. *out_end = m_end;
  904. if (out_nid)
  905. *out_nid = m_nid;
  906. idx_a--;
  907. *idx = (u32)idx_a | (u64)idx_b << 32;
  908. return;
  909. }
  910. /* scan areas before each reservation */
  911. for (; idx_b >= 0; idx_b--) {
  912. struct memblock_region *r;
  913. phys_addr_t r_start;
  914. phys_addr_t r_end;
  915. r = &type_b->regions[idx_b];
  916. r_start = idx_b ? r[-1].base + r[-1].size : 0;
  917. r_end = idx_b < type_b->cnt ?
  918. r->base : PHYS_ADDR_MAX;
  919. /*
  920. * if idx_b advanced past idx_a,
  921. * break out to advance idx_a
  922. */
  923. if (r_end <= m_start)
  924. break;
  925. /* if the two regions intersect, we're done */
  926. if (m_end > r_start) {
  927. if (out_start)
  928. *out_start = max(m_start, r_start);
  929. if (out_end)
  930. *out_end = min(m_end, r_end);
  931. if (out_nid)
  932. *out_nid = m_nid;
  933. if (m_start >= r_start)
  934. idx_a--;
  935. else
  936. idx_b--;
  937. *idx = (u32)idx_a | (u64)idx_b << 32;
  938. return;
  939. }
  940. }
  941. }
  942. /* signal end of iteration */
  943. *idx = ULLONG_MAX;
  944. }
  945. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  946. /*
  947. * Common iterator interface used to define for_each_mem_range().
  948. */
  949. void __init_memblock __next_mem_pfn_range(int *idx, int nid,
  950. unsigned long *out_start_pfn,
  951. unsigned long *out_end_pfn, int *out_nid)
  952. {
  953. struct memblock_type *type = &memblock.memory;
  954. struct memblock_region *r;
  955. while (++*idx < type->cnt) {
  956. r = &type->regions[*idx];
  957. if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
  958. continue;
  959. if (nid == MAX_NUMNODES || nid == r->nid)
  960. break;
  961. }
  962. if (*idx >= type->cnt) {
  963. *idx = -1;
  964. return;
  965. }
  966. if (out_start_pfn)
  967. *out_start_pfn = PFN_UP(r->base);
  968. if (out_end_pfn)
  969. *out_end_pfn = PFN_DOWN(r->base + r->size);
  970. if (out_nid)
  971. *out_nid = r->nid;
  972. }
  973. /**
  974. * memblock_set_node - set node ID on memblock regions
  975. * @base: base of area to set node ID for
  976. * @size: size of area to set node ID for
  977. * @type: memblock type to set node ID for
  978. * @nid: node ID to set
  979. *
  980. * Set the nid of memblock @type regions in [@base,@base+@size) to @nid.
  981. * Regions which cross the area boundaries are split as necessary.
  982. *
  983. * RETURNS:
  984. * 0 on success, -errno on failure.
  985. */
  986. int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
  987. struct memblock_type *type, int nid)
  988. {
  989. int start_rgn, end_rgn;
  990. int i, ret;
  991. ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
  992. if (ret)
  993. return ret;
  994. for (i = start_rgn; i < end_rgn; i++)
  995. memblock_set_region_node(&type->regions[i], nid);
  996. memblock_merge_regions(type);
  997. return 0;
  998. }
  999. #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  1000. static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
  1001. phys_addr_t align, phys_addr_t start,
  1002. phys_addr_t end, int nid, ulong flags)
  1003. {
  1004. phys_addr_t found;
  1005. if (!align)
  1006. align = SMP_CACHE_BYTES;
  1007. found = memblock_find_in_range_node(size, align, start, end, nid,
  1008. flags);
  1009. if (found && !memblock_reserve(found, size)) {
  1010. /*
  1011. * The min_count is set to 0 so that memblock allocations are
  1012. * never reported as leaks.
  1013. */
  1014. kmemleak_alloc_phys(found, size, 0, 0);
  1015. return found;
  1016. }
  1017. return 0;
  1018. }
  1019. phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
  1020. phys_addr_t start, phys_addr_t end,
  1021. ulong flags)
  1022. {
  1023. return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
  1024. flags);
  1025. }
  1026. phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
  1027. phys_addr_t align, phys_addr_t max_addr,
  1028. int nid, ulong flags)
  1029. {
  1030. return memblock_alloc_range_nid(size, align, 0, max_addr, nid, flags);
  1031. }
  1032. phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
  1033. {
  1034. ulong flags = choose_memblock_flags();
  1035. phys_addr_t ret;
  1036. again:
  1037. ret = memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE,
  1038. nid, flags);
  1039. if (!ret && (flags & MEMBLOCK_MIRROR)) {
  1040. flags &= ~MEMBLOCK_MIRROR;
  1041. goto again;
  1042. }
  1043. return ret;
  1044. }
  1045. phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
  1046. {
  1047. return memblock_alloc_base_nid(size, align, max_addr, NUMA_NO_NODE,
  1048. MEMBLOCK_NONE);
  1049. }
  1050. phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
  1051. {
  1052. phys_addr_t alloc;
  1053. alloc = __memblock_alloc_base(size, align, max_addr);
  1054. if (alloc == 0)
  1055. panic("ERROR: Failed to allocate %pa bytes below %pa.\n",
  1056. &size, &max_addr);
  1057. return alloc;
  1058. }
  1059. phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
  1060. {
  1061. return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
  1062. }
  1063. phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
  1064. {
  1065. phys_addr_t res = memblock_alloc_nid(size, align, nid);
  1066. if (res)
  1067. return res;
  1068. return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
  1069. }
  1070. /**
  1071. * memblock_virt_alloc_internal - allocate boot memory block
  1072. * @size: size of memory block to be allocated in bytes
  1073. * @align: alignment of the region and block's size
  1074. * @min_addr: the lower bound of the memory region to allocate (phys address)
  1075. * @max_addr: the upper bound of the memory region to allocate (phys address)
  1076. * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
  1077. *
  1078. * The @min_addr limit is dropped if it can not be satisfied and the allocation
  1079. * will fall back to memory below @min_addr. Also, allocation may fall back
  1080. * to any node in the system if the specified node can not
  1081. * hold the requested memory.
  1082. *
  1083. * The allocation is performed from memory region limited by
  1084. * memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE.
  1085. *
  1086. * The memory block is aligned on SMP_CACHE_BYTES if @align == 0.
  1087. *
  1088. * The phys address of allocated boot memory block is converted to virtual and
  1089. * allocated memory is reset to 0.
  1090. *
  1091. * In addition, function sets the min_count to 0 using kmemleak_alloc for
  1092. * allocated boot memory block, so that it is never reported as leaks.
  1093. *
  1094. * RETURNS:
  1095. * Virtual address of allocated memory block on success, NULL on failure.
  1096. */
  1097. static void * __init memblock_virt_alloc_internal(
  1098. phys_addr_t size, phys_addr_t align,
  1099. phys_addr_t min_addr, phys_addr_t max_addr,
  1100. int nid)
  1101. {
  1102. phys_addr_t alloc;
  1103. void *ptr;
  1104. ulong flags = choose_memblock_flags();
  1105. if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
  1106. nid = NUMA_NO_NODE;
  1107. /*
  1108. * Detect any accidental use of these APIs after slab is ready, as at
  1109. * this moment memblock may be deinitialized already and its
  1110. * internal data may be destroyed (after execution of free_all_bootmem)
  1111. */
  1112. if (WARN_ON_ONCE(slab_is_available()))
  1113. return kzalloc_node(size, GFP_NOWAIT, nid);
  1114. if (!align)
  1115. align = SMP_CACHE_BYTES;
  1116. if (max_addr > memblock.current_limit)
  1117. max_addr = memblock.current_limit;
  1118. again:
  1119. alloc = memblock_find_in_range_node(size, align, min_addr, max_addr,
  1120. nid, flags);
  1121. if (alloc && !memblock_reserve(alloc, size))
  1122. goto done;
  1123. if (nid != NUMA_NO_NODE) {
  1124. alloc = memblock_find_in_range_node(size, align, min_addr,
  1125. max_addr, NUMA_NO_NODE,
  1126. flags);
  1127. if (alloc && !memblock_reserve(alloc, size))
  1128. goto done;
  1129. }
  1130. if (min_addr) {
  1131. min_addr = 0;
  1132. goto again;
  1133. }
  1134. if (flags & MEMBLOCK_MIRROR) {
  1135. flags &= ~MEMBLOCK_MIRROR;
  1136. pr_warn("Could not allocate %pap bytes of mirrored memory\n",
  1137. &size);
  1138. goto again;
  1139. }
  1140. return NULL;
  1141. done:
  1142. ptr = phys_to_virt(alloc);
  1143. /*
  1144. * The min_count is set to 0 so that bootmem allocated blocks
  1145. * are never reported as leaks. This is because many of these blocks
  1146. * are only referred via the physical address which is not
  1147. * looked up by kmemleak.
  1148. */
  1149. kmemleak_alloc(ptr, size, 0, 0);
  1150. return ptr;
  1151. }
  1152. /**
  1153. * memblock_virt_alloc_try_nid_raw - allocate boot memory block without zeroing
  1154. * memory and without panicking
  1155. * @size: size of memory block to be allocated in bytes
  1156. * @align: alignment of the region and block's size
  1157. * @min_addr: the lower bound of the memory region from where the allocation
  1158. * is preferred (phys address)
  1159. * @max_addr: the upper bound of the memory region from where the allocation
  1160. * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
  1161. * allocate only from memory limited by memblock.current_limit value
  1162. * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
  1163. *
  1164. * Public function, provides additional debug information (including caller
  1165. * info), if enabled. Does not zero allocated memory, does not panic if request
  1166. * cannot be satisfied.
  1167. *
  1168. * RETURNS:
  1169. * Virtual address of allocated memory block on success, NULL on failure.
  1170. */
  1171. void * __init memblock_virt_alloc_try_nid_raw(
  1172. phys_addr_t size, phys_addr_t align,
  1173. phys_addr_t min_addr, phys_addr_t max_addr,
  1174. int nid)
  1175. {
  1176. void *ptr;
  1177. memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
  1178. __func__, (u64)size, (u64)align, nid, (u64)min_addr,
  1179. (u64)max_addr, (void *)_RET_IP_);
  1180. ptr = memblock_virt_alloc_internal(size, align,
  1181. min_addr, max_addr, nid);
  1182. #ifdef CONFIG_DEBUG_VM
  1183. if (ptr && size > 0)
  1184. memset(ptr, PAGE_POISON_PATTERN, size);
  1185. #endif
  1186. return ptr;
  1187. }
  1188. /**
  1189. * memblock_virt_alloc_try_nid_nopanic - allocate boot memory block
  1190. * @size: size of memory block to be allocated in bytes
  1191. * @align: alignment of the region and block's size
  1192. * @min_addr: the lower bound of the memory region from where the allocation
  1193. * is preferred (phys address)
  1194. * @max_addr: the upper bound of the memory region from where the allocation
  1195. * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
  1196. * allocate only from memory limited by memblock.current_limit value
  1197. * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
  1198. *
  1199. * Public function, provides additional debug information (including caller
  1200. * info), if enabled. This function zeroes the allocated memory.
  1201. *
  1202. * RETURNS:
  1203. * Virtual address of allocated memory block on success, NULL on failure.
  1204. */
  1205. void * __init memblock_virt_alloc_try_nid_nopanic(
  1206. phys_addr_t size, phys_addr_t align,
  1207. phys_addr_t min_addr, phys_addr_t max_addr,
  1208. int nid)
  1209. {
  1210. void *ptr;
  1211. memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
  1212. __func__, (u64)size, (u64)align, nid, (u64)min_addr,
  1213. (u64)max_addr, (void *)_RET_IP_);
  1214. ptr = memblock_virt_alloc_internal(size, align,
  1215. min_addr, max_addr, nid);
  1216. if (ptr)
  1217. memset(ptr, 0, size);
  1218. return ptr;
  1219. }
  1220. /**
  1221. * memblock_virt_alloc_try_nid - allocate boot memory block with panicking
  1222. * @size: size of memory block to be allocated in bytes
  1223. * @align: alignment of the region and block's size
  1224. * @min_addr: the lower bound of the memory region from where the allocation
  1225. * is preferred (phys address)
  1226. * @max_addr: the upper bound of the memory region from where the allocation
  1227. * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
  1228. * allocate only from memory limited by memblock.current_limit value
  1229. * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
  1230. *
  1231. * Public panicking version of memblock_virt_alloc_try_nid_nopanic()
  1232. * which provides debug information (including caller info), if enabled,
  1233. * and panics if the request can not be satisfied.
  1234. *
  1235. * RETURNS:
  1236. * Virtual address of allocated memory block on success, NULL on failure.
  1237. */
  1238. void * __init memblock_virt_alloc_try_nid(
  1239. phys_addr_t size, phys_addr_t align,
  1240. phys_addr_t min_addr, phys_addr_t max_addr,
  1241. int nid)
  1242. {
  1243. void *ptr;
  1244. memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
  1245. __func__, (u64)size, (u64)align, nid, (u64)min_addr,
  1246. (u64)max_addr, (void *)_RET_IP_);
  1247. ptr = memblock_virt_alloc_internal(size, align,
  1248. min_addr, max_addr, nid);
  1249. if (ptr) {
  1250. memset(ptr, 0, size);
  1251. return ptr;
  1252. }
  1253. panic("%s: Failed to allocate %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx\n",
  1254. __func__, (u64)size, (u64)align, nid, (u64)min_addr,
  1255. (u64)max_addr);
  1256. return NULL;
  1257. }
  1258. /**
  1259. * __memblock_free_early - free boot memory block
  1260. * @base: phys starting address of the boot memory block
  1261. * @size: size of the boot memory block in bytes
  1262. *
  1263. * Free boot memory block previously allocated by memblock_virt_alloc_xx() API.
  1264. * The freeing memory will not be released to the buddy allocator.
  1265. */
  1266. void __init __memblock_free_early(phys_addr_t base, phys_addr_t size)
  1267. {
  1268. memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
  1269. __func__, (u64)base, (u64)base + size - 1,
  1270. (void *)_RET_IP_);
  1271. kmemleak_free_part_phys(base, size);
  1272. memblock_remove_range(&memblock.reserved, base, size);
  1273. }
  1274. /*
  1275. * __memblock_free_late - free bootmem block pages directly to buddy allocator
  1276. * @addr: phys starting address of the boot memory block
  1277. * @size: size of the boot memory block in bytes
  1278. *
  1279. * This is only useful when the bootmem allocator has already been torn
  1280. * down, but we are still initializing the system. Pages are released directly
  1281. * to the buddy allocator, no bootmem metadata is updated because it is gone.
  1282. */
  1283. void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
  1284. {
  1285. u64 cursor, end;
  1286. memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
  1287. __func__, (u64)base, (u64)base + size - 1,
  1288. (void *)_RET_IP_);
  1289. kmemleak_free_part_phys(base, size);
  1290. cursor = PFN_UP(base);
  1291. end = PFN_DOWN(base + size);
  1292. for (; cursor < end; cursor++) {
  1293. __free_pages_bootmem(pfn_to_page(cursor), cursor, 0);
  1294. totalram_pages++;
  1295. }
  1296. }
  1297. /*
  1298. * Remaining API functions
  1299. */
  1300. phys_addr_t __init_memblock memblock_phys_mem_size(void)
  1301. {
  1302. return memblock.memory.total_size;
  1303. }
  1304. phys_addr_t __init_memblock memblock_reserved_size(void)
  1305. {
  1306. return memblock.reserved.total_size;
  1307. }
  1308. phys_addr_t __init memblock_mem_size(unsigned long limit_pfn)
  1309. {
  1310. unsigned long pages = 0;
  1311. struct memblock_region *r;
  1312. unsigned long start_pfn, end_pfn;
  1313. for_each_memblock(memory, r) {
  1314. start_pfn = memblock_region_memory_base_pfn(r);
  1315. end_pfn = memblock_region_memory_end_pfn(r);
  1316. start_pfn = min_t(unsigned long, start_pfn, limit_pfn);
  1317. end_pfn = min_t(unsigned long, end_pfn, limit_pfn);
  1318. pages += end_pfn - start_pfn;
  1319. }
  1320. return PFN_PHYS(pages);
  1321. }
  1322. /* lowest address */
  1323. phys_addr_t __init_memblock memblock_start_of_DRAM(void)
  1324. {
  1325. return memblock.memory.regions[0].base;
  1326. }
  1327. phys_addr_t __init_memblock memblock_end_of_DRAM(void)
  1328. {
  1329. int idx = memblock.memory.cnt - 1;
  1330. return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
  1331. }
  1332. static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit)
  1333. {
  1334. phys_addr_t max_addr = PHYS_ADDR_MAX;
  1335. struct memblock_region *r;
  1336. /*
  1337. * translate the memory @limit size into the max address within one of
  1338. * the memory memblock regions, if the @limit exceeds the total size
  1339. * of those regions, max_addr will keep original value PHYS_ADDR_MAX
  1340. */
  1341. for_each_memblock(memory, r) {
  1342. if (limit <= r->size) {
  1343. max_addr = r->base + limit;
  1344. break;
  1345. }
  1346. limit -= r->size;
  1347. }
  1348. return max_addr;
  1349. }
  1350. void __init memblock_enforce_memory_limit(phys_addr_t limit)
  1351. {
  1352. phys_addr_t max_addr = PHYS_ADDR_MAX;
  1353. if (!limit)
  1354. return;
  1355. max_addr = __find_max_addr(limit);
  1356. /* @limit exceeds the total size of the memory, do nothing */
  1357. if (max_addr == PHYS_ADDR_MAX)
  1358. return;
  1359. /* truncate both memory and reserved regions */
  1360. memblock_remove_range(&memblock.memory, max_addr,
  1361. PHYS_ADDR_MAX);
  1362. memblock_remove_range(&memblock.reserved, max_addr,
  1363. PHYS_ADDR_MAX);
  1364. }
  1365. void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size)
  1366. {
  1367. int start_rgn, end_rgn;
  1368. int i, ret;
  1369. if (!size)
  1370. return;
  1371. ret = memblock_isolate_range(&memblock.memory, base, size,
  1372. &start_rgn, &end_rgn);
  1373. if (ret)
  1374. return;
  1375. /* remove all the MAP regions */
  1376. for (i = memblock.memory.cnt - 1; i >= end_rgn; i--)
  1377. if (!memblock_is_nomap(&memblock.memory.regions[i]))
  1378. memblock_remove_region(&memblock.memory, i);
  1379. for (i = start_rgn - 1; i >= 0; i--)
  1380. if (!memblock_is_nomap(&memblock.memory.regions[i]))
  1381. memblock_remove_region(&memblock.memory, i);
  1382. /* truncate the reserved regions */
  1383. memblock_remove_range(&memblock.reserved, 0, base);
  1384. memblock_remove_range(&memblock.reserved,
  1385. base + size, PHYS_ADDR_MAX);
  1386. }
  1387. void __init memblock_mem_limit_remove_map(phys_addr_t limit)
  1388. {
  1389. phys_addr_t max_addr;
  1390. if (!limit)
  1391. return;
  1392. max_addr = __find_max_addr(limit);
  1393. /* @limit exceeds the total size of the memory, do nothing */
  1394. if (max_addr == PHYS_ADDR_MAX)
  1395. return;
  1396. memblock_cap_memory_range(0, max_addr);
  1397. }
  1398. static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
  1399. {
  1400. unsigned int left = 0, right = type->cnt;
  1401. do {
  1402. unsigned int mid = (right + left) / 2;
  1403. if (addr < type->regions[mid].base)
  1404. right = mid;
  1405. else if (addr >= (type->regions[mid].base +
  1406. type->regions[mid].size))
  1407. left = mid + 1;
  1408. else
  1409. return mid;
  1410. } while (left < right);
  1411. return -1;
  1412. }
  1413. bool __init memblock_is_reserved(phys_addr_t addr)
  1414. {
  1415. return memblock_search(&memblock.reserved, addr) != -1;
  1416. }
  1417. bool __init_memblock memblock_is_memory(phys_addr_t addr)
  1418. {
  1419. return memblock_search(&memblock.memory, addr) != -1;
  1420. }
  1421. bool __init_memblock memblock_is_map_memory(phys_addr_t addr)
  1422. {
  1423. int i = memblock_search(&memblock.memory, addr);
  1424. if (i == -1)
  1425. return false;
  1426. return !memblock_is_nomap(&memblock.memory.regions[i]);
  1427. }
  1428. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  1429. int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
  1430. unsigned long *start_pfn, unsigned long *end_pfn)
  1431. {
  1432. struct memblock_type *type = &memblock.memory;
  1433. int mid = memblock_search(type, PFN_PHYS(pfn));
  1434. if (mid == -1)
  1435. return -1;
  1436. *start_pfn = PFN_DOWN(type->regions[mid].base);
  1437. *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
  1438. return type->regions[mid].nid;
  1439. }
  1440. #endif
  1441. /**
  1442. * memblock_is_region_memory - check if a region is a subset of memory
  1443. * @base: base of region to check
  1444. * @size: size of region to check
  1445. *
  1446. * Check if the region [@base, @base+@size) is a subset of a memory block.
  1447. *
  1448. * RETURNS:
  1449. * 0 if false, non-zero if true
  1450. */
  1451. bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
  1452. {
  1453. int idx = memblock_search(&memblock.memory, base);
  1454. phys_addr_t end = base + memblock_cap_size(base, &size);
  1455. if (idx == -1)
  1456. return false;
  1457. return (memblock.memory.regions[idx].base +
  1458. memblock.memory.regions[idx].size) >= end;
  1459. }
  1460. /**
  1461. * memblock_is_region_reserved - check if a region intersects reserved memory
  1462. * @base: base of region to check
  1463. * @size: size of region to check
  1464. *
  1465. * Check if the region [@base, @base+@size) intersects a reserved memory block.
  1466. *
  1467. * RETURNS:
  1468. * True if they intersect, false if not.
  1469. */
  1470. bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
  1471. {
  1472. memblock_cap_size(base, &size);
  1473. return memblock_overlaps_region(&memblock.reserved, base, size);
  1474. }
  1475. void __init_memblock memblock_trim_memory(phys_addr_t align)
  1476. {
  1477. phys_addr_t start, end, orig_start, orig_end;
  1478. struct memblock_region *r;
  1479. for_each_memblock(memory, r) {
  1480. orig_start = r->base;
  1481. orig_end = r->base + r->size;
  1482. start = round_up(orig_start, align);
  1483. end = round_down(orig_end, align);
  1484. if (start == orig_start && end == orig_end)
  1485. continue;
  1486. if (start < end) {
  1487. r->base = start;
  1488. r->size = end - start;
  1489. } else {
  1490. memblock_remove_region(&memblock.memory,
  1491. r - memblock.memory.regions);
  1492. r--;
  1493. }
  1494. }
  1495. }
  1496. void __init_memblock memblock_set_current_limit(phys_addr_t limit)
  1497. {
  1498. memblock.current_limit = limit;
  1499. }
  1500. phys_addr_t __init_memblock memblock_get_current_limit(void)
  1501. {
  1502. return memblock.current_limit;
  1503. }
  1504. static void __init_memblock memblock_dump(struct memblock_type *type)
  1505. {
  1506. phys_addr_t base, end, size;
  1507. unsigned long flags;
  1508. int idx;
  1509. struct memblock_region *rgn;
  1510. pr_info(" %s.cnt = 0x%lx\n", type->name, type->cnt);
  1511. for_each_memblock_type(idx, type, rgn) {
  1512. char nid_buf[32] = "";
  1513. base = rgn->base;
  1514. size = rgn->size;
  1515. end = base + size - 1;
  1516. flags = rgn->flags;
  1517. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  1518. if (memblock_get_region_node(rgn) != MAX_NUMNODES)
  1519. snprintf(nid_buf, sizeof(nid_buf), " on node %d",
  1520. memblock_get_region_node(rgn));
  1521. #endif
  1522. pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#lx\n",
  1523. type->name, idx, &base, &end, &size, nid_buf, flags);
  1524. }
  1525. }
  1526. void __init_memblock __memblock_dump_all(void)
  1527. {
  1528. pr_info("MEMBLOCK configuration:\n");
  1529. pr_info(" memory size = %pa reserved size = %pa\n",
  1530. &memblock.memory.total_size,
  1531. &memblock.reserved.total_size);
  1532. memblock_dump(&memblock.memory);
  1533. memblock_dump(&memblock.reserved);
  1534. #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
  1535. memblock_dump(&memblock.physmem);
  1536. #endif
  1537. }
  1538. void __init memblock_allow_resize(void)
  1539. {
  1540. memblock_can_resize = 1;
  1541. }
  1542. static int __init early_memblock(char *p)
  1543. {
  1544. if (p && strstr(p, "debug"))
  1545. memblock_debug = 1;
  1546. return 0;
  1547. }
  1548. early_param("memblock", early_memblock);
  1549. #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK)
  1550. static int memblock_debug_show(struct seq_file *m, void *private)
  1551. {
  1552. struct memblock_type *type = m->private;
  1553. struct memblock_region *reg;
  1554. int i;
  1555. phys_addr_t end;
  1556. for (i = 0; i < type->cnt; i++) {
  1557. reg = &type->regions[i];
  1558. end = reg->base + reg->size - 1;
  1559. seq_printf(m, "%4d: ", i);
  1560. seq_printf(m, "%pa..%pa\n", &reg->base, &end);
  1561. }
  1562. return 0;
  1563. }
  1564. DEFINE_SHOW_ATTRIBUTE(memblock_debug);
  1565. static int __init memblock_init_debugfs(void)
  1566. {
  1567. struct dentry *root = debugfs_create_dir("memblock", NULL);
  1568. if (!root)
  1569. return -ENXIO;
  1570. debugfs_create_file("memory", 0444, root,
  1571. &memblock.memory, &memblock_debug_fops);
  1572. debugfs_create_file("reserved", 0444, root,
  1573. &memblock.reserved, &memblock_debug_fops);
  1574. #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
  1575. debugfs_create_file("physmem", 0444, root,
  1576. &memblock.physmem, &memblock_debug_fops);
  1577. #endif
  1578. return 0;
  1579. }
  1580. __initcall(memblock_init_debugfs);
  1581. #endif /* CONFIG_DEBUG_FS */