sparse.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * sparse memory mappings.
  4. */
  5. #include <linux/mm.h>
  6. #include <linux/slab.h>
  7. #include <linux/mmzone.h>
  8. #include <linux/bootmem.h>
  9. #include <linux/compiler.h>
  10. #include <linux/highmem.h>
  11. #include <linux/export.h>
  12. #include <linux/spinlock.h>
  13. #include <linux/vmalloc.h>
  14. #include "internal.h"
  15. #include <asm/dma.h>
  16. #include <asm/pgalloc.h>
  17. #include <asm/pgtable.h>
  18. /*
  19. * Permanent SPARSEMEM data:
  20. *
  21. * 1) mem_section - memory sections, mem_map's for valid memory
  22. */
  23. #ifdef CONFIG_SPARSEMEM_EXTREME
  24. struct mem_section **mem_section;
  25. #else
  26. struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
  27. ____cacheline_internodealigned_in_smp;
  28. #endif
  29. EXPORT_SYMBOL(mem_section);
  30. #ifdef NODE_NOT_IN_PAGE_FLAGS
  31. /*
  32. * If we did not store the node number in the page then we have to
  33. * do a lookup in the section_to_node_table in order to find which
  34. * node the page belongs to.
  35. */
  36. #if MAX_NUMNODES <= 256
  37. static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
  38. #else
  39. static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
  40. #endif
  41. int page_to_nid(const struct page *page)
  42. {
  43. return section_to_node_table[page_to_section(page)];
  44. }
  45. EXPORT_SYMBOL(page_to_nid);
  46. static void set_section_nid(unsigned long section_nr, int nid)
  47. {
  48. section_to_node_table[section_nr] = nid;
  49. }
  50. #else /* !NODE_NOT_IN_PAGE_FLAGS */
  51. static inline void set_section_nid(unsigned long section_nr, int nid)
  52. {
  53. }
  54. #endif
  55. #ifdef CONFIG_SPARSEMEM_EXTREME
  56. static noinline struct mem_section __ref *sparse_index_alloc(int nid)
  57. {
  58. struct mem_section *section = NULL;
  59. unsigned long array_size = SECTIONS_PER_ROOT *
  60. sizeof(struct mem_section);
  61. if (slab_is_available())
  62. section = kzalloc_node(array_size, GFP_KERNEL, nid);
  63. else
  64. section = memblock_virt_alloc_node(array_size, nid);
  65. return section;
  66. }
  67. static int __meminit sparse_index_init(unsigned long section_nr, int nid)
  68. {
  69. unsigned long root = SECTION_NR_TO_ROOT(section_nr);
  70. struct mem_section *section;
  71. if (mem_section[root])
  72. return -EEXIST;
  73. section = sparse_index_alloc(nid);
  74. if (!section)
  75. return -ENOMEM;
  76. mem_section[root] = section;
  77. return 0;
  78. }
  79. #else /* !SPARSEMEM_EXTREME */
  80. static inline int sparse_index_init(unsigned long section_nr, int nid)
  81. {
  82. return 0;
  83. }
  84. #endif
  85. #ifdef CONFIG_SPARSEMEM_EXTREME
  86. int __section_nr(struct mem_section* ms)
  87. {
  88. unsigned long root_nr;
  89. struct mem_section *root = NULL;
  90. for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
  91. root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
  92. if (!root)
  93. continue;
  94. if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT)))
  95. break;
  96. }
  97. VM_BUG_ON(!root);
  98. return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
  99. }
  100. #else
  101. int __section_nr(struct mem_section* ms)
  102. {
  103. return (int)(ms - mem_section[0]);
  104. }
  105. #endif
  106. /*
  107. * During early boot, before section_mem_map is used for an actual
  108. * mem_map, we use section_mem_map to store the section's NUMA
  109. * node. This keeps us from having to use another data structure. The
  110. * node information is cleared just before we store the real mem_map.
  111. */
  112. static inline unsigned long sparse_encode_early_nid(int nid)
  113. {
  114. return (nid << SECTION_NID_SHIFT);
  115. }
  116. static inline int sparse_early_nid(struct mem_section *section)
  117. {
  118. return (section->section_mem_map >> SECTION_NID_SHIFT);
  119. }
  120. /* Validate the physical addressing limitations of the model */
  121. void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
  122. unsigned long *end_pfn)
  123. {
  124. unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
  125. /*
  126. * Sanity checks - do not allow an architecture to pass
  127. * in larger pfns than the maximum scope of sparsemem:
  128. */
  129. if (*start_pfn > max_sparsemem_pfn) {
  130. mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
  131. "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
  132. *start_pfn, *end_pfn, max_sparsemem_pfn);
  133. WARN_ON_ONCE(1);
  134. *start_pfn = max_sparsemem_pfn;
  135. *end_pfn = max_sparsemem_pfn;
  136. } else if (*end_pfn > max_sparsemem_pfn) {
  137. mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
  138. "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
  139. *start_pfn, *end_pfn, max_sparsemem_pfn);
  140. WARN_ON_ONCE(1);
  141. *end_pfn = max_sparsemem_pfn;
  142. }
  143. }
  144. /*
  145. * There are a number of times that we loop over NR_MEM_SECTIONS,
  146. * looking for section_present() on each. But, when we have very
  147. * large physical address spaces, NR_MEM_SECTIONS can also be
  148. * very large which makes the loops quite long.
  149. *
  150. * Keeping track of this gives us an easy way to break out of
  151. * those loops early.
  152. */
  153. int __highest_present_section_nr;
  154. static void section_mark_present(struct mem_section *ms)
  155. {
  156. int section_nr = __section_nr(ms);
  157. if (section_nr > __highest_present_section_nr)
  158. __highest_present_section_nr = section_nr;
  159. ms->section_mem_map |= SECTION_MARKED_PRESENT;
  160. }
  161. static inline int next_present_section_nr(int section_nr)
  162. {
  163. do {
  164. section_nr++;
  165. if (present_section_nr(section_nr))
  166. return section_nr;
  167. } while ((section_nr < NR_MEM_SECTIONS) &&
  168. (section_nr <= __highest_present_section_nr));
  169. return -1;
  170. }
  171. #define for_each_present_section_nr(start, section_nr) \
  172. for (section_nr = next_present_section_nr(start-1); \
  173. ((section_nr >= 0) && \
  174. (section_nr < NR_MEM_SECTIONS) && \
  175. (section_nr <= __highest_present_section_nr)); \
  176. section_nr = next_present_section_nr(section_nr))
  177. /* Record a memory area against a node. */
  178. void __init memory_present(int nid, unsigned long start, unsigned long end)
  179. {
  180. unsigned long pfn;
  181. #ifdef CONFIG_SPARSEMEM_EXTREME
  182. if (unlikely(!mem_section)) {
  183. unsigned long size, align;
  184. size = sizeof(struct mem_section*) * NR_SECTION_ROOTS;
  185. align = 1 << (INTERNODE_CACHE_SHIFT);
  186. mem_section = memblock_virt_alloc(size, align);
  187. }
  188. #endif
  189. start &= PAGE_SECTION_MASK;
  190. mminit_validate_memmodel_limits(&start, &end);
  191. for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
  192. unsigned long section = pfn_to_section_nr(pfn);
  193. struct mem_section *ms;
  194. sparse_index_init(section, nid);
  195. set_section_nid(section, nid);
  196. ms = __nr_to_section(section);
  197. if (!ms->section_mem_map) {
  198. ms->section_mem_map = sparse_encode_early_nid(nid) |
  199. SECTION_IS_ONLINE;
  200. section_mark_present(ms);
  201. }
  202. }
  203. }
  204. /*
  205. * Subtle, we encode the real pfn into the mem_map such that
  206. * the identity pfn - section_mem_map will return the actual
  207. * physical page frame number.
  208. */
  209. static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
  210. {
  211. unsigned long coded_mem_map =
  212. (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
  213. BUILD_BUG_ON(SECTION_MAP_LAST_BIT > (1UL<<PFN_SECTION_SHIFT));
  214. BUG_ON(coded_mem_map & ~SECTION_MAP_MASK);
  215. return coded_mem_map;
  216. }
  217. /*
  218. * Decode mem_map from the coded memmap
  219. */
  220. struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
  221. {
  222. /* mask off the extra low bits of information */
  223. coded_mem_map &= SECTION_MAP_MASK;
  224. return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
  225. }
  226. static int __meminit sparse_init_one_section(struct mem_section *ms,
  227. unsigned long pnum, struct page *mem_map,
  228. unsigned long *pageblock_bitmap)
  229. {
  230. if (!present_section(ms))
  231. return -EINVAL;
  232. ms->section_mem_map &= ~SECTION_MAP_MASK;
  233. ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) |
  234. SECTION_HAS_MEM_MAP;
  235. ms->pageblock_flags = pageblock_bitmap;
  236. return 1;
  237. }
  238. unsigned long usemap_size(void)
  239. {
  240. return BITS_TO_LONGS(SECTION_BLOCKFLAGS_BITS) * sizeof(unsigned long);
  241. }
  242. #ifdef CONFIG_MEMORY_HOTPLUG
  243. static unsigned long *__kmalloc_section_usemap(void)
  244. {
  245. return kmalloc(usemap_size(), GFP_KERNEL);
  246. }
  247. #endif /* CONFIG_MEMORY_HOTPLUG */
  248. #ifdef CONFIG_MEMORY_HOTREMOVE
  249. static unsigned long * __init
  250. sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
  251. unsigned long size)
  252. {
  253. unsigned long goal, limit;
  254. unsigned long *p;
  255. int nid;
  256. /*
  257. * A page may contain usemaps for other sections preventing the
  258. * page being freed and making a section unremovable while
  259. * other sections referencing the usemap remain active. Similarly,
  260. * a pgdat can prevent a section being removed. If section A
  261. * contains a pgdat and section B contains the usemap, both
  262. * sections become inter-dependent. This allocates usemaps
  263. * from the same section as the pgdat where possible to avoid
  264. * this problem.
  265. */
  266. goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT);
  267. limit = goal + (1UL << PA_SECTION_SHIFT);
  268. nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
  269. again:
  270. p = memblock_virt_alloc_try_nid_nopanic(size,
  271. SMP_CACHE_BYTES, goal, limit,
  272. nid);
  273. if (!p && limit) {
  274. limit = 0;
  275. goto again;
  276. }
  277. return p;
  278. }
  279. static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
  280. {
  281. unsigned long usemap_snr, pgdat_snr;
  282. static unsigned long old_usemap_snr;
  283. static unsigned long old_pgdat_snr;
  284. struct pglist_data *pgdat = NODE_DATA(nid);
  285. int usemap_nid;
  286. /* First call */
  287. if (!old_usemap_snr) {
  288. old_usemap_snr = NR_MEM_SECTIONS;
  289. old_pgdat_snr = NR_MEM_SECTIONS;
  290. }
  291. usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT);
  292. pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
  293. if (usemap_snr == pgdat_snr)
  294. return;
  295. if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr)
  296. /* skip redundant message */
  297. return;
  298. old_usemap_snr = usemap_snr;
  299. old_pgdat_snr = pgdat_snr;
  300. usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr));
  301. if (usemap_nid != nid) {
  302. pr_info("node %d must be removed before remove section %ld\n",
  303. nid, usemap_snr);
  304. return;
  305. }
  306. /*
  307. * There is a circular dependency.
  308. * Some platforms allow un-removable section because they will just
  309. * gather other removable sections for dynamic partitioning.
  310. * Just notify un-removable section's number here.
  311. */
  312. pr_info("Section %ld and %ld (node %d) have a circular dependency on usemap and pgdat allocations\n",
  313. usemap_snr, pgdat_snr, nid);
  314. }
  315. #else
  316. static unsigned long * __init
  317. sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
  318. unsigned long size)
  319. {
  320. return memblock_virt_alloc_node_nopanic(size, pgdat->node_id);
  321. }
  322. static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
  323. {
  324. }
  325. #endif /* CONFIG_MEMORY_HOTREMOVE */
  326. static void __init sparse_early_usemaps_alloc_node(void *data,
  327. unsigned long pnum_begin,
  328. unsigned long pnum_end,
  329. unsigned long usemap_count, int nodeid)
  330. {
  331. void *usemap;
  332. unsigned long pnum;
  333. unsigned long **usemap_map = (unsigned long **)data;
  334. int size = usemap_size();
  335. usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid),
  336. size * usemap_count);
  337. if (!usemap) {
  338. pr_warn("%s: allocation failed\n", __func__);
  339. return;
  340. }
  341. for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
  342. if (!present_section_nr(pnum))
  343. continue;
  344. usemap_map[pnum] = usemap;
  345. usemap += size;
  346. check_usemap_section_nr(nodeid, usemap_map[pnum]);
  347. }
  348. }
  349. #ifndef CONFIG_SPARSEMEM_VMEMMAP
  350. struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid,
  351. struct vmem_altmap *altmap)
  352. {
  353. struct page *map;
  354. unsigned long size;
  355. size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
  356. map = memblock_virt_alloc_try_nid(size,
  357. PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
  358. BOOTMEM_ALLOC_ACCESSIBLE, nid);
  359. return map;
  360. }
  361. void __init sparse_mem_maps_populate_node(struct page **map_map,
  362. unsigned long pnum_begin,
  363. unsigned long pnum_end,
  364. unsigned long map_count, int nodeid)
  365. {
  366. void *map;
  367. unsigned long pnum;
  368. unsigned long size = sizeof(struct page) * PAGES_PER_SECTION;
  369. size = PAGE_ALIGN(size);
  370. map = memblock_virt_alloc_try_nid_raw(size * map_count,
  371. PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
  372. BOOTMEM_ALLOC_ACCESSIBLE, nodeid);
  373. if (map) {
  374. for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
  375. if (!present_section_nr(pnum))
  376. continue;
  377. map_map[pnum] = map;
  378. map += size;
  379. }
  380. return;
  381. }
  382. /* fallback */
  383. for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
  384. struct mem_section *ms;
  385. if (!present_section_nr(pnum))
  386. continue;
  387. map_map[pnum] = sparse_mem_map_populate(pnum, nodeid, NULL);
  388. if (map_map[pnum])
  389. continue;
  390. ms = __nr_to_section(pnum);
  391. pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
  392. __func__);
  393. ms->section_mem_map = 0;
  394. }
  395. }
  396. #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
  397. #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
  398. static void __init sparse_early_mem_maps_alloc_node(void *data,
  399. unsigned long pnum_begin,
  400. unsigned long pnum_end,
  401. unsigned long map_count, int nodeid)
  402. {
  403. struct page **map_map = (struct page **)data;
  404. sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end,
  405. map_count, nodeid);
  406. }
  407. #else
  408. static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
  409. {
  410. struct page *map;
  411. struct mem_section *ms = __nr_to_section(pnum);
  412. int nid = sparse_early_nid(ms);
  413. map = sparse_mem_map_populate(pnum, nid, NULL);
  414. if (map)
  415. return map;
  416. pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
  417. __func__);
  418. ms->section_mem_map = 0;
  419. return NULL;
  420. }
  421. #endif
  422. void __weak __meminit vmemmap_populate_print_last(void)
  423. {
  424. }
  425. /**
  426. * alloc_usemap_and_memmap - memory alloction for pageblock flags and vmemmap
  427. * @map: usemap_map for pageblock flags or mmap_map for vmemmap
  428. */
  429. static void __init alloc_usemap_and_memmap(void (*alloc_func)
  430. (void *, unsigned long, unsigned long,
  431. unsigned long, int), void *data)
  432. {
  433. unsigned long pnum;
  434. unsigned long map_count;
  435. int nodeid_begin = 0;
  436. unsigned long pnum_begin = 0;
  437. for_each_present_section_nr(0, pnum) {
  438. struct mem_section *ms;
  439. ms = __nr_to_section(pnum);
  440. nodeid_begin = sparse_early_nid(ms);
  441. pnum_begin = pnum;
  442. break;
  443. }
  444. map_count = 1;
  445. for_each_present_section_nr(pnum_begin + 1, pnum) {
  446. struct mem_section *ms;
  447. int nodeid;
  448. ms = __nr_to_section(pnum);
  449. nodeid = sparse_early_nid(ms);
  450. if (nodeid == nodeid_begin) {
  451. map_count++;
  452. continue;
  453. }
  454. /* ok, we need to take cake of from pnum_begin to pnum - 1*/
  455. alloc_func(data, pnum_begin, pnum,
  456. map_count, nodeid_begin);
  457. /* new start, update count etc*/
  458. nodeid_begin = nodeid;
  459. pnum_begin = pnum;
  460. map_count = 1;
  461. }
  462. /* ok, last chunk */
  463. alloc_func(data, pnum_begin, NR_MEM_SECTIONS,
  464. map_count, nodeid_begin);
  465. }
  466. /*
  467. * Allocate the accumulated non-linear sections, allocate a mem_map
  468. * for each and record the physical to section mapping.
  469. */
  470. void __init sparse_init(void)
  471. {
  472. unsigned long pnum;
  473. struct page *map;
  474. unsigned long *usemap;
  475. unsigned long **usemap_map;
  476. int size;
  477. #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
  478. int size2;
  479. struct page **map_map;
  480. #endif
  481. /* see include/linux/mmzone.h 'struct mem_section' definition */
  482. BUILD_BUG_ON(!is_power_of_2(sizeof(struct mem_section)));
  483. /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */
  484. set_pageblock_order();
  485. /*
  486. * map is using big page (aka 2M in x86 64 bit)
  487. * usemap is less one page (aka 24 bytes)
  488. * so alloc 2M (with 2M align) and 24 bytes in turn will
  489. * make next 2M slip to one more 2M later.
  490. * then in big system, the memory will have a lot of holes...
  491. * here try to allocate 2M pages continuously.
  492. *
  493. * powerpc need to call sparse_init_one_section right after each
  494. * sparse_early_mem_map_alloc, so allocate usemap_map at first.
  495. */
  496. size = sizeof(unsigned long *) * NR_MEM_SECTIONS;
  497. usemap_map = memblock_virt_alloc(size, 0);
  498. if (!usemap_map)
  499. panic("can not allocate usemap_map\n");
  500. alloc_usemap_and_memmap(sparse_early_usemaps_alloc_node,
  501. (void *)usemap_map);
  502. #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
  503. size2 = sizeof(struct page *) * NR_MEM_SECTIONS;
  504. map_map = memblock_virt_alloc(size2, 0);
  505. if (!map_map)
  506. panic("can not allocate map_map\n");
  507. alloc_usemap_and_memmap(sparse_early_mem_maps_alloc_node,
  508. (void *)map_map);
  509. #endif
  510. for_each_present_section_nr(0, pnum) {
  511. usemap = usemap_map[pnum];
  512. if (!usemap)
  513. continue;
  514. #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
  515. map = map_map[pnum];
  516. #else
  517. map = sparse_early_mem_map_alloc(pnum);
  518. #endif
  519. if (!map)
  520. continue;
  521. sparse_init_one_section(__nr_to_section(pnum), pnum, map,
  522. usemap);
  523. }
  524. vmemmap_populate_print_last();
  525. #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
  526. memblock_free_early(__pa(map_map), size2);
  527. #endif
  528. memblock_free_early(__pa(usemap_map), size);
  529. }
  530. #ifdef CONFIG_MEMORY_HOTPLUG
  531. /* Mark all memory sections within the pfn range as online */
  532. void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
  533. {
  534. unsigned long pfn;
  535. for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
  536. unsigned long section_nr = pfn_to_section_nr(pfn);
  537. struct mem_section *ms;
  538. /* onlining code should never touch invalid ranges */
  539. if (WARN_ON(!valid_section_nr(section_nr)))
  540. continue;
  541. ms = __nr_to_section(section_nr);
  542. ms->section_mem_map |= SECTION_IS_ONLINE;
  543. }
  544. }
  545. #ifdef CONFIG_MEMORY_HOTREMOVE
  546. /* Mark all memory sections within the pfn range as online */
  547. void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
  548. {
  549. unsigned long pfn;
  550. for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
  551. unsigned long section_nr = pfn_to_section_nr(start_pfn);
  552. struct mem_section *ms;
  553. /*
  554. * TODO this needs some double checking. Offlining code makes
  555. * sure to check pfn_valid but those checks might be just bogus
  556. */
  557. if (WARN_ON(!valid_section_nr(section_nr)))
  558. continue;
  559. ms = __nr_to_section(section_nr);
  560. ms->section_mem_map &= ~SECTION_IS_ONLINE;
  561. }
  562. }
  563. #endif
  564. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  565. static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
  566. struct vmem_altmap *altmap)
  567. {
  568. /* This will make the necessary allocations eventually. */
  569. return sparse_mem_map_populate(pnum, nid, altmap);
  570. }
  571. static void __kfree_section_memmap(struct page *memmap,
  572. struct vmem_altmap *altmap)
  573. {
  574. unsigned long start = (unsigned long)memmap;
  575. unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
  576. vmemmap_free(start, end, altmap);
  577. }
  578. #ifdef CONFIG_MEMORY_HOTREMOVE
  579. static void free_map_bootmem(struct page *memmap)
  580. {
  581. unsigned long start = (unsigned long)memmap;
  582. unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
  583. vmemmap_free(start, end, NULL);
  584. }
  585. #endif /* CONFIG_MEMORY_HOTREMOVE */
  586. #else
  587. static struct page *__kmalloc_section_memmap(void)
  588. {
  589. struct page *page, *ret;
  590. unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION;
  591. page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
  592. if (page)
  593. goto got_map_page;
  594. ret = vmalloc(memmap_size);
  595. if (ret)
  596. goto got_map_ptr;
  597. return NULL;
  598. got_map_page:
  599. ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
  600. got_map_ptr:
  601. return ret;
  602. }
  603. static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
  604. struct vmem_altmap *altmap)
  605. {
  606. return __kmalloc_section_memmap();
  607. }
  608. static void __kfree_section_memmap(struct page *memmap,
  609. struct vmem_altmap *altmap)
  610. {
  611. if (is_vmalloc_addr(memmap))
  612. vfree(memmap);
  613. else
  614. free_pages((unsigned long)memmap,
  615. get_order(sizeof(struct page) * PAGES_PER_SECTION));
  616. }
  617. #ifdef CONFIG_MEMORY_HOTREMOVE
  618. static void free_map_bootmem(struct page *memmap)
  619. {
  620. unsigned long maps_section_nr, removing_section_nr, i;
  621. unsigned long magic, nr_pages;
  622. struct page *page = virt_to_page(memmap);
  623. nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
  624. >> PAGE_SHIFT;
  625. for (i = 0; i < nr_pages; i++, page++) {
  626. magic = (unsigned long) page->freelist;
  627. BUG_ON(magic == NODE_INFO);
  628. maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
  629. removing_section_nr = page_private(page);
  630. /*
  631. * When this function is called, the removing section is
  632. * logical offlined state. This means all pages are isolated
  633. * from page allocator. If removing section's memmap is placed
  634. * on the same section, it must not be freed.
  635. * If it is freed, page allocator may allocate it which will
  636. * be removed physically soon.
  637. */
  638. if (maps_section_nr != removing_section_nr)
  639. put_page_bootmem(page);
  640. }
  641. }
  642. #endif /* CONFIG_MEMORY_HOTREMOVE */
  643. #endif /* CONFIG_SPARSEMEM_VMEMMAP */
  644. /*
  645. * returns the number of sections whose mem_maps were properly
  646. * set. If this is <=0, then that means that the passed-in
  647. * map was not consumed and must be freed.
  648. */
  649. int __meminit sparse_add_one_section(struct pglist_data *pgdat,
  650. unsigned long start_pfn, struct vmem_altmap *altmap)
  651. {
  652. unsigned long section_nr = pfn_to_section_nr(start_pfn);
  653. struct mem_section *ms;
  654. struct page *memmap;
  655. unsigned long *usemap;
  656. unsigned long flags;
  657. int ret;
  658. /*
  659. * no locking for this, because it does its own
  660. * plus, it does a kmalloc
  661. */
  662. ret = sparse_index_init(section_nr, pgdat->node_id);
  663. if (ret < 0 && ret != -EEXIST)
  664. return ret;
  665. memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, altmap);
  666. if (!memmap)
  667. return -ENOMEM;
  668. usemap = __kmalloc_section_usemap();
  669. if (!usemap) {
  670. __kfree_section_memmap(memmap, altmap);
  671. return -ENOMEM;
  672. }
  673. pgdat_resize_lock(pgdat, &flags);
  674. ms = __pfn_to_section(start_pfn);
  675. if (ms->section_mem_map & SECTION_MARKED_PRESENT) {
  676. ret = -EEXIST;
  677. goto out;
  678. }
  679. #ifdef CONFIG_DEBUG_VM
  680. /*
  681. * Poison uninitialized struct pages in order to catch invalid flags
  682. * combinations.
  683. */
  684. memset(memmap, PAGE_POISON_PATTERN, sizeof(struct page) * PAGES_PER_SECTION);
  685. #endif
  686. section_mark_present(ms);
  687. ret = sparse_init_one_section(ms, section_nr, memmap, usemap);
  688. out:
  689. pgdat_resize_unlock(pgdat, &flags);
  690. if (ret <= 0) {
  691. kfree(usemap);
  692. __kfree_section_memmap(memmap, altmap);
  693. }
  694. return ret;
  695. }
  696. #ifdef CONFIG_MEMORY_HOTREMOVE
  697. #ifdef CONFIG_MEMORY_FAILURE
  698. static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
  699. {
  700. int i;
  701. if (!memmap)
  702. return;
  703. for (i = 0; i < nr_pages; i++) {
  704. if (PageHWPoison(&memmap[i])) {
  705. atomic_long_sub(1, &num_poisoned_pages);
  706. ClearPageHWPoison(&memmap[i]);
  707. }
  708. }
  709. }
  710. #else
  711. static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
  712. {
  713. }
  714. #endif
  715. static void free_section_usemap(struct page *memmap, unsigned long *usemap,
  716. struct vmem_altmap *altmap)
  717. {
  718. struct page *usemap_page;
  719. if (!usemap)
  720. return;
  721. usemap_page = virt_to_page(usemap);
  722. /*
  723. * Check to see if allocation came from hot-plug-add
  724. */
  725. if (PageSlab(usemap_page) || PageCompound(usemap_page)) {
  726. kfree(usemap);
  727. if (memmap)
  728. __kfree_section_memmap(memmap, altmap);
  729. return;
  730. }
  731. /*
  732. * The usemap came from bootmem. This is packed with other usemaps
  733. * on the section which has pgdat at boot time. Just keep it as is now.
  734. */
  735. if (memmap)
  736. free_map_bootmem(memmap);
  737. }
  738. void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
  739. unsigned long map_offset, struct vmem_altmap *altmap)
  740. {
  741. struct page *memmap = NULL;
  742. unsigned long *usemap = NULL, flags;
  743. struct pglist_data *pgdat = zone->zone_pgdat;
  744. pgdat_resize_lock(pgdat, &flags);
  745. if (ms->section_mem_map) {
  746. usemap = ms->pageblock_flags;
  747. memmap = sparse_decode_mem_map(ms->section_mem_map,
  748. __section_nr(ms));
  749. ms->section_mem_map = 0;
  750. ms->pageblock_flags = NULL;
  751. }
  752. pgdat_resize_unlock(pgdat, &flags);
  753. clear_hwpoisoned_pages(memmap + map_offset,
  754. PAGES_PER_SECTION - map_offset);
  755. free_section_usemap(memmap, usemap, altmap);
  756. }
  757. #endif /* CONFIG_MEMORY_HOTREMOVE */
  758. #endif /* CONFIG_MEMORY_HOTPLUG */