memblock.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431
  1. #ifndef _LINUX_MEMBLOCK_H
  2. #define _LINUX_MEMBLOCK_H
  3. #ifdef __KERNEL__
  4. #ifdef CONFIG_HAVE_MEMBLOCK
  5. /*
  6. * Logical memory blocks.
  7. *
  8. * Copyright (C) 2001 Peter Bergner, IBM Corp.
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License
  12. * as published by the Free Software Foundation; either version
  13. * 2 of the License, or (at your option) any later version.
  14. */
  15. #include <linux/init.h>
  16. #include <linux/mm.h>
  17. #define INIT_MEMBLOCK_REGIONS 128
  18. #define INIT_PHYSMEM_REGIONS 4
  19. /* Definition of memblock flags. */
  20. enum memblock_flags {
  21. MEMBLOCK_NONE = 0x0, /* No special request */
  22. MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */
  23. MEMBLOCK_MIRROR = 0x2, /* mirrored region */
  24. MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */
  25. };
  26. struct memblock_region {
  27. phys_addr_t base;
  28. phys_addr_t size;
  29. enum memblock_flags flags;
  30. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  31. int nid;
  32. #endif
  33. };
  34. struct memblock_type {
  35. unsigned long cnt; /* number of regions */
  36. unsigned long max; /* size of the allocated array */
  37. phys_addr_t total_size; /* size of all regions */
  38. struct memblock_region *regions;
  39. char *name;
  40. };
  41. struct memblock {
  42. bool bottom_up; /* is bottom up direction? */
  43. phys_addr_t current_limit;
  44. struct memblock_type memory;
  45. struct memblock_type reserved;
  46. #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
  47. struct memblock_type physmem;
  48. #endif
  49. };
  50. extern struct memblock memblock;
  51. extern int memblock_debug;
  52. #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
  53. #define __init_memblock __meminit
  54. #define __initdata_memblock __meminitdata
  55. void memblock_discard(void);
  56. #else
  57. #define __init_memblock
  58. #define __initdata_memblock
  59. #endif
  60. #define memblock_dbg(fmt, ...) \
  61. if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
  62. phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align,
  63. phys_addr_t start, phys_addr_t end,
  64. int nid, enum memblock_flags flags);
  65. phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
  66. phys_addr_t size, phys_addr_t align);
  67. void memblock_allow_resize(void);
  68. int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
  69. int memblock_add(phys_addr_t base, phys_addr_t size);
  70. int memblock_remove(phys_addr_t base, phys_addr_t size);
  71. int memblock_free(phys_addr_t base, phys_addr_t size);
  72. int memblock_reserve(phys_addr_t base, phys_addr_t size);
  73. void memblock_trim_memory(phys_addr_t align);
  74. bool memblock_overlaps_region(struct memblock_type *type,
  75. phys_addr_t base, phys_addr_t size);
  76. int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
  77. int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
  78. int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
  79. int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
  80. int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
  81. enum memblock_flags choose_memblock_flags(void);
  82. /* Low level functions */
  83. int memblock_add_range(struct memblock_type *type,
  84. phys_addr_t base, phys_addr_t size,
  85. int nid, enum memblock_flags flags);
  86. void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
  87. struct memblock_type *type_a,
  88. struct memblock_type *type_b, phys_addr_t *out_start,
  89. phys_addr_t *out_end, int *out_nid);
  90. void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
  91. struct memblock_type *type_a,
  92. struct memblock_type *type_b, phys_addr_t *out_start,
  93. phys_addr_t *out_end, int *out_nid);
  94. void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
  95. phys_addr_t *out_end);
  96. void __memblock_free_early(phys_addr_t base, phys_addr_t size);
  97. void __memblock_free_late(phys_addr_t base, phys_addr_t size);
  98. /**
  99. * for_each_mem_range - iterate through memblock areas from type_a and not
  100. * included in type_b. Or just type_a if type_b is NULL.
  101. * @i: u64 used as loop variable
  102. * @type_a: ptr to memblock_type to iterate
  103. * @type_b: ptr to memblock_type which excludes from the iteration
  104. * @nid: node selector, %NUMA_NO_NODE for all nodes
  105. * @flags: pick from blocks based on memory attributes
  106. * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
  107. * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
  108. * @p_nid: ptr to int for nid of the range, can be %NULL
  109. */
  110. #define for_each_mem_range(i, type_a, type_b, nid, flags, \
  111. p_start, p_end, p_nid) \
  112. for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \
  113. p_start, p_end, p_nid); \
  114. i != (u64)ULLONG_MAX; \
  115. __next_mem_range(&i, nid, flags, type_a, type_b, \
  116. p_start, p_end, p_nid))
  117. /**
  118. * for_each_mem_range_rev - reverse iterate through memblock areas from
  119. * type_a and not included in type_b. Or just type_a if type_b is NULL.
  120. * @i: u64 used as loop variable
  121. * @type_a: ptr to memblock_type to iterate
  122. * @type_b: ptr to memblock_type which excludes from the iteration
  123. * @nid: node selector, %NUMA_NO_NODE for all nodes
  124. * @flags: pick from blocks based on memory attributes
  125. * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
  126. * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
  127. * @p_nid: ptr to int for nid of the range, can be %NULL
  128. */
  129. #define for_each_mem_range_rev(i, type_a, type_b, nid, flags, \
  130. p_start, p_end, p_nid) \
  131. for (i = (u64)ULLONG_MAX, \
  132. __next_mem_range_rev(&i, nid, flags, type_a, type_b,\
  133. p_start, p_end, p_nid); \
  134. i != (u64)ULLONG_MAX; \
  135. __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
  136. p_start, p_end, p_nid))
  137. /**
  138. * for_each_reserved_mem_region - iterate over all reserved memblock areas
  139. * @i: u64 used as loop variable
  140. * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
  141. * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
  142. *
  143. * Walks over reserved areas of memblock. Available as soon as memblock
  144. * is initialized.
  145. */
  146. #define for_each_reserved_mem_region(i, p_start, p_end) \
  147. for (i = 0UL, __next_reserved_mem_region(&i, p_start, p_end); \
  148. i != (u64)ULLONG_MAX; \
  149. __next_reserved_mem_region(&i, p_start, p_end))
  150. static inline bool memblock_is_hotpluggable(struct memblock_region *m)
  151. {
  152. return m->flags & MEMBLOCK_HOTPLUG;
  153. }
  154. static inline bool memblock_is_mirror(struct memblock_region *m)
  155. {
  156. return m->flags & MEMBLOCK_MIRROR;
  157. }
  158. static inline bool memblock_is_nomap(struct memblock_region *m)
  159. {
  160. return m->flags & MEMBLOCK_NOMAP;
  161. }
  162. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  163. int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
  164. unsigned long *end_pfn);
  165. void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
  166. unsigned long *out_end_pfn, int *out_nid);
  167. /**
  168. * for_each_mem_pfn_range - early memory pfn range iterator
  169. * @i: an integer used as loop variable
  170. * @nid: node selector, %MAX_NUMNODES for all nodes
  171. * @p_start: ptr to ulong for start pfn of the range, can be %NULL
  172. * @p_end: ptr to ulong for end pfn of the range, can be %NULL
  173. * @p_nid: ptr to int for nid of the range, can be %NULL
  174. *
  175. * Walks over configured memory ranges.
  176. */
  177. #define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
  178. for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
  179. i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
  180. #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  181. /**
  182. * for_each_free_mem_range - iterate through free memblock areas
  183. * @i: u64 used as loop variable
  184. * @nid: node selector, %NUMA_NO_NODE for all nodes
  185. * @flags: pick from blocks based on memory attributes
  186. * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
  187. * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
  188. * @p_nid: ptr to int for nid of the range, can be %NULL
  189. *
  190. * Walks over free (memory && !reserved) areas of memblock. Available as
  191. * soon as memblock is initialized.
  192. */
  193. #define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \
  194. for_each_mem_range(i, &memblock.memory, &memblock.reserved, \
  195. nid, flags, p_start, p_end, p_nid)
  196. /**
  197. * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
  198. * @i: u64 used as loop variable
  199. * @nid: node selector, %NUMA_NO_NODE for all nodes
  200. * @flags: pick from blocks based on memory attributes
  201. * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
  202. * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
  203. * @p_nid: ptr to int for nid of the range, can be %NULL
  204. *
  205. * Walks over free (memory && !reserved) areas of memblock in reverse
  206. * order. Available as soon as memblock is initialized.
  207. */
  208. #define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \
  209. p_nid) \
  210. for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
  211. nid, flags, p_start, p_end, p_nid)
  212. /**
  213. * for_each_resv_unavail_range - iterate through reserved and unavailable memory
  214. * @i: u64 used as loop variable
  215. * @flags: pick from blocks based on memory attributes
  216. * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
  217. * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
  218. *
  219. * Walks over unavailable but reserved (reserved && !memory) areas of memblock.
  220. * Available as soon as memblock is initialized.
  221. * Note: because this memory does not belong to any physical node, flags and
  222. * nid arguments do not make sense and thus not exported as arguments.
  223. */
  224. #define for_each_resv_unavail_range(i, p_start, p_end) \
  225. for_each_mem_range(i, &memblock.reserved, &memblock.memory, \
  226. NUMA_NO_NODE, MEMBLOCK_NONE, p_start, p_end, NULL)
  227. static inline void memblock_set_region_flags(struct memblock_region *r,
  228. enum memblock_flags flags)
  229. {
  230. r->flags |= flags;
  231. }
  232. static inline void memblock_clear_region_flags(struct memblock_region *r,
  233. enum memblock_flags flags)
  234. {
  235. r->flags &= ~flags;
  236. }
  237. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  238. int memblock_set_node(phys_addr_t base, phys_addr_t size,
  239. struct memblock_type *type, int nid);
  240. static inline void memblock_set_region_node(struct memblock_region *r, int nid)
  241. {
  242. r->nid = nid;
  243. }
  244. static inline int memblock_get_region_node(const struct memblock_region *r)
  245. {
  246. return r->nid;
  247. }
  248. #else
  249. static inline void memblock_set_region_node(struct memblock_region *r, int nid)
  250. {
  251. }
  252. static inline int memblock_get_region_node(const struct memblock_region *r)
  253. {
  254. return 0;
  255. }
  256. #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  257. phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid);
  258. phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
  259. phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align);
  260. /*
  261. * Set the allocation direction to bottom-up or top-down.
  262. */
  263. static inline void __init memblock_set_bottom_up(bool enable)
  264. {
  265. memblock.bottom_up = enable;
  266. }
  267. /*
  268. * Check if the allocation direction is bottom-up or not.
  269. * if this is true, that said, memblock will allocate memory
  270. * in bottom-up direction.
  271. */
  272. static inline bool memblock_bottom_up(void)
  273. {
  274. return memblock.bottom_up;
  275. }
  276. /* Flags for memblock_alloc_base() amd __memblock_alloc_base() */
  277. #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
  278. #define MEMBLOCK_ALLOC_ACCESSIBLE 0
  279. phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
  280. phys_addr_t start, phys_addr_t end,
  281. enum memblock_flags flags);
  282. phys_addr_t memblock_alloc_base_nid(phys_addr_t size,
  283. phys_addr_t align, phys_addr_t max_addr,
  284. int nid, enum memblock_flags flags);
  285. phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align,
  286. phys_addr_t max_addr);
  287. phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,
  288. phys_addr_t max_addr);
  289. phys_addr_t memblock_phys_mem_size(void);
  290. phys_addr_t memblock_reserved_size(void);
  291. phys_addr_t memblock_mem_size(unsigned long limit_pfn);
  292. phys_addr_t memblock_start_of_DRAM(void);
  293. phys_addr_t memblock_end_of_DRAM(void);
  294. void memblock_enforce_memory_limit(phys_addr_t memory_limit);
  295. void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size);
  296. void memblock_mem_limit_remove_map(phys_addr_t limit);
  297. bool memblock_is_memory(phys_addr_t addr);
  298. bool memblock_is_map_memory(phys_addr_t addr);
  299. bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
  300. bool memblock_is_reserved(phys_addr_t addr);
  301. bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
  302. extern void __memblock_dump_all(void);
  303. static inline void memblock_dump_all(void)
  304. {
  305. if (memblock_debug)
  306. __memblock_dump_all();
  307. }
  308. /**
  309. * memblock_set_current_limit - Set the current allocation limit to allow
  310. * limiting allocations to what is currently
  311. * accessible during boot
  312. * @limit: New limit value (physical address)
  313. */
  314. void memblock_set_current_limit(phys_addr_t limit);
  315. phys_addr_t memblock_get_current_limit(void);
  316. /*
  317. * pfn conversion functions
  318. *
  319. * While the memory MEMBLOCKs should always be page aligned, the reserved
  320. * MEMBLOCKs may not be. This accessor attempt to provide a very clear
  321. * idea of what they return for such non aligned MEMBLOCKs.
  322. */
  323. /**
  324. * memblock_region_memory_base_pfn - Return the lowest pfn intersecting with the memory region
  325. * @reg: memblock_region structure
  326. */
  327. static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
  328. {
  329. return PFN_UP(reg->base);
  330. }
  331. /**
  332. * memblock_region_memory_end_pfn - Return the end_pfn this region
  333. * @reg: memblock_region structure
  334. */
  335. static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
  336. {
  337. return PFN_DOWN(reg->base + reg->size);
  338. }
  339. /**
  340. * memblock_region_reserved_base_pfn - Return the lowest pfn intersecting with the reserved region
  341. * @reg: memblock_region structure
  342. */
  343. static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
  344. {
  345. return PFN_DOWN(reg->base);
  346. }
  347. /**
  348. * memblock_region_reserved_end_pfn - Return the end_pfn this region
  349. * @reg: memblock_region structure
  350. */
  351. static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
  352. {
  353. return PFN_UP(reg->base + reg->size);
  354. }
  355. #define for_each_memblock(memblock_type, region) \
  356. for (region = memblock.memblock_type.regions; \
  357. region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \
  358. region++)
  359. #define for_each_memblock_type(i, memblock_type, rgn) \
  360. for (i = 0, rgn = &memblock_type->regions[0]; \
  361. i < memblock_type->cnt; \
  362. i++, rgn = &memblock_type->regions[i])
  363. #ifdef CONFIG_MEMTEST
  364. extern void early_memtest(phys_addr_t start, phys_addr_t end);
  365. #else
  366. static inline void early_memtest(phys_addr_t start, phys_addr_t end)
  367. {
  368. }
  369. #endif
  370. #else
  371. static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
  372. {
  373. return 0;
  374. }
  375. #endif /* CONFIG_HAVE_MEMBLOCK */
  376. #endif /* __KERNEL__ */
  377. #endif /* _LINUX_MEMBLOCK_H */