numa.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882
  1. /* Common code for 32 and 64-bit NUMA */
  2. #include <linux/kernel.h>
  3. #include <linux/mm.h>
  4. #include <linux/string.h>
  5. #include <linux/init.h>
  6. #include <linux/bootmem.h>
  7. #include <linux/memblock.h>
  8. #include <linux/mmzone.h>
  9. #include <linux/ctype.h>
  10. #include <linux/module.h>
  11. #include <linux/nodemask.h>
  12. #include <linux/sched.h>
  13. #include <linux/topology.h>
  14. #include <asm/e820.h>
  15. #include <asm/proto.h>
  16. #include <asm/dma.h>
  17. #include <asm/acpi.h>
  18. #include <asm/amd_nb.h>
  19. #include "numa_internal.h"
  20. int __initdata numa_off;
  21. nodemask_t numa_nodes_parsed __initdata;
  22. struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
  23. EXPORT_SYMBOL(node_data);
  24. static struct numa_meminfo numa_meminfo
  25. #ifndef CONFIG_MEMORY_HOTPLUG
  26. __initdata
  27. #endif
  28. ;
  29. static int numa_distance_cnt;
  30. static u8 *numa_distance;
  31. static __init int numa_setup(char *opt)
  32. {
  33. if (!opt)
  34. return -EINVAL;
  35. if (!strncmp(opt, "off", 3))
  36. numa_off = 1;
  37. #ifdef CONFIG_NUMA_EMU
  38. if (!strncmp(opt, "fake=", 5))
  39. numa_emu_cmdline(opt + 5);
  40. #endif
  41. #ifdef CONFIG_ACPI_NUMA
  42. if (!strncmp(opt, "noacpi", 6))
  43. acpi_numa = -1;
  44. #endif
  45. return 0;
  46. }
  47. early_param("numa", numa_setup);
  48. /*
  49. * apicid, cpu, node mappings
  50. */
  51. s16 __apicid_to_node[MAX_LOCAL_APIC] = {
  52. [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
  53. };
  54. int numa_cpu_node(int cpu)
  55. {
  56. int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
  57. if (apicid != BAD_APICID)
  58. return __apicid_to_node[apicid];
  59. return NUMA_NO_NODE;
  60. }
  61. cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
  62. EXPORT_SYMBOL(node_to_cpumask_map);
  63. /*
  64. * Map cpu index to node index
  65. */
  66. DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
  67. EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
  68. void numa_set_node(int cpu, int node)
  69. {
  70. int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
  71. /* early setting, no percpu area yet */
  72. if (cpu_to_node_map) {
  73. cpu_to_node_map[cpu] = node;
  74. return;
  75. }
  76. #ifdef CONFIG_DEBUG_PER_CPU_MAPS
  77. if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
  78. printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
  79. dump_stack();
  80. return;
  81. }
  82. #endif
  83. per_cpu(x86_cpu_to_node_map, cpu) = node;
  84. set_cpu_numa_node(cpu, node);
  85. }
  86. void numa_clear_node(int cpu)
  87. {
  88. numa_set_node(cpu, NUMA_NO_NODE);
  89. }
  90. /*
  91. * Allocate node_to_cpumask_map based on number of available nodes
  92. * Requires node_possible_map to be valid.
  93. *
  94. * Note: cpumask_of_node() is not valid until after this is done.
  95. * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
  96. */
  97. void __init setup_node_to_cpumask_map(void)
  98. {
  99. unsigned int node;
  100. /* setup nr_node_ids if not done yet */
  101. if (nr_node_ids == MAX_NUMNODES)
  102. setup_nr_node_ids();
  103. /* allocate the map */
  104. for (node = 0; node < nr_node_ids; node++)
  105. alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
  106. /* cpumask_of_node() will now work */
  107. pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids);
  108. }
  109. static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
  110. struct numa_meminfo *mi)
  111. {
  112. /* ignore zero length blks */
  113. if (start == end)
  114. return 0;
  115. /* whine about and ignore invalid blks */
  116. if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
  117. pr_warning("NUMA: Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
  118. nid, start, end - 1);
  119. return 0;
  120. }
  121. if (mi->nr_blks >= NR_NODE_MEMBLKS) {
  122. pr_err("NUMA: too many memblk ranges\n");
  123. return -EINVAL;
  124. }
  125. mi->blk[mi->nr_blks].start = start;
  126. mi->blk[mi->nr_blks].end = end;
  127. mi->blk[mi->nr_blks].nid = nid;
  128. mi->nr_blks++;
  129. return 0;
  130. }
  131. /**
  132. * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo
  133. * @idx: Index of memblk to remove
  134. * @mi: numa_meminfo to remove memblk from
  135. *
  136. * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and
  137. * decrementing @mi->nr_blks.
  138. */
  139. void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi)
  140. {
  141. mi->nr_blks--;
  142. memmove(&mi->blk[idx], &mi->blk[idx + 1],
  143. (mi->nr_blks - idx) * sizeof(mi->blk[0]));
  144. }
  145. /**
  146. * numa_add_memblk - Add one numa_memblk to numa_meminfo
  147. * @nid: NUMA node ID of the new memblk
  148. * @start: Start address of the new memblk
  149. * @end: End address of the new memblk
  150. *
  151. * Add a new memblk to the default numa_meminfo.
  152. *
  153. * RETURNS:
  154. * 0 on success, -errno on failure.
  155. */
  156. int __init numa_add_memblk(int nid, u64 start, u64 end)
  157. {
  158. return numa_add_memblk_to(nid, start, end, &numa_meminfo);
  159. }
  160. /* Allocate NODE_DATA for a node on the local memory */
  161. static void __init alloc_node_data(int nid)
  162. {
  163. const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
  164. u64 nd_pa;
  165. void *nd;
  166. int tnid;
  167. /*
  168. * Allocate node data. Try node-local memory and then any node.
  169. * Never allocate in DMA zone.
  170. */
  171. nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid);
  172. if (!nd_pa) {
  173. nd_pa = __memblock_alloc_base(nd_size, SMP_CACHE_BYTES,
  174. MEMBLOCK_ALLOC_ACCESSIBLE);
  175. if (!nd_pa) {
  176. pr_err("Cannot find %zu bytes in node %d\n",
  177. nd_size, nid);
  178. return;
  179. }
  180. }
  181. nd = __va(nd_pa);
  182. /* report and initialize */
  183. printk(KERN_INFO "NODE_DATA(%d) allocated [mem %#010Lx-%#010Lx]\n", nid,
  184. nd_pa, nd_pa + nd_size - 1);
  185. tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
  186. if (tnid != nid)
  187. printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nid, tnid);
  188. node_data[nid] = nd;
  189. memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
  190. node_set_online(nid);
  191. }
  192. /**
  193. * numa_cleanup_meminfo - Cleanup a numa_meminfo
  194. * @mi: numa_meminfo to clean up
  195. *
  196. * Sanitize @mi by merging and removing unncessary memblks. Also check for
  197. * conflicts and clear unused memblks.
  198. *
  199. * RETURNS:
  200. * 0 on success, -errno on failure.
  201. */
  202. int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
  203. {
  204. const u64 low = 0;
  205. const u64 high = PFN_PHYS(max_pfn);
  206. int i, j, k;
  207. /* first, trim all entries */
  208. for (i = 0; i < mi->nr_blks; i++) {
  209. struct numa_memblk *bi = &mi->blk[i];
  210. /* make sure all blocks are inside the limits */
  211. bi->start = max(bi->start, low);
  212. bi->end = min(bi->end, high);
  213. /* and there's no empty block */
  214. if (bi->start >= bi->end)
  215. numa_remove_memblk_from(i--, mi);
  216. }
  217. /* merge neighboring / overlapping entries */
  218. for (i = 0; i < mi->nr_blks; i++) {
  219. struct numa_memblk *bi = &mi->blk[i];
  220. for (j = i + 1; j < mi->nr_blks; j++) {
  221. struct numa_memblk *bj = &mi->blk[j];
  222. u64 start, end;
  223. /*
  224. * See whether there are overlapping blocks. Whine
  225. * about but allow overlaps of the same nid. They
  226. * will be merged below.
  227. */
  228. if (bi->end > bj->start && bi->start < bj->end) {
  229. if (bi->nid != bj->nid) {
  230. pr_err("NUMA: node %d [mem %#010Lx-%#010Lx] overlaps with node %d [mem %#010Lx-%#010Lx]\n",
  231. bi->nid, bi->start, bi->end - 1,
  232. bj->nid, bj->start, bj->end - 1);
  233. return -EINVAL;
  234. }
  235. pr_warning("NUMA: Warning: node %d [mem %#010Lx-%#010Lx] overlaps with itself [mem %#010Lx-%#010Lx]\n",
  236. bi->nid, bi->start, bi->end - 1,
  237. bj->start, bj->end - 1);
  238. }
  239. /*
  240. * Join together blocks on the same node, holes
  241. * between which don't overlap with memory on other
  242. * nodes.
  243. */
  244. if (bi->nid != bj->nid)
  245. continue;
  246. start = min(bi->start, bj->start);
  247. end = max(bi->end, bj->end);
  248. for (k = 0; k < mi->nr_blks; k++) {
  249. struct numa_memblk *bk = &mi->blk[k];
  250. if (bi->nid == bk->nid)
  251. continue;
  252. if (start < bk->end && end > bk->start)
  253. break;
  254. }
  255. if (k < mi->nr_blks)
  256. continue;
  257. printk(KERN_INFO "NUMA: Node %d [mem %#010Lx-%#010Lx] + [mem %#010Lx-%#010Lx] -> [mem %#010Lx-%#010Lx]\n",
  258. bi->nid, bi->start, bi->end - 1, bj->start,
  259. bj->end - 1, start, end - 1);
  260. bi->start = start;
  261. bi->end = end;
  262. numa_remove_memblk_from(j--, mi);
  263. }
  264. }
  265. /* clear unused ones */
  266. for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) {
  267. mi->blk[i].start = mi->blk[i].end = 0;
  268. mi->blk[i].nid = NUMA_NO_NODE;
  269. }
  270. return 0;
  271. }
  272. /*
  273. * Set nodes, which have memory in @mi, in *@nodemask.
  274. */
  275. static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask,
  276. const struct numa_meminfo *mi)
  277. {
  278. int i;
  279. for (i = 0; i < ARRAY_SIZE(mi->blk); i++)
  280. if (mi->blk[i].start != mi->blk[i].end &&
  281. mi->blk[i].nid != NUMA_NO_NODE)
  282. node_set(mi->blk[i].nid, *nodemask);
  283. }
  284. /**
  285. * numa_reset_distance - Reset NUMA distance table
  286. *
  287. * The current table is freed. The next numa_set_distance() call will
  288. * create a new one.
  289. */
  290. void __init numa_reset_distance(void)
  291. {
  292. size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]);
  293. /* numa_distance could be 1LU marking allocation failure, test cnt */
  294. if (numa_distance_cnt)
  295. memblock_free(__pa(numa_distance), size);
  296. numa_distance_cnt = 0;
  297. numa_distance = NULL; /* enable table creation */
  298. }
  299. static int __init numa_alloc_distance(void)
  300. {
  301. nodemask_t nodes_parsed;
  302. size_t size;
  303. int i, j, cnt = 0;
  304. u64 phys;
  305. /* size the new table and allocate it */
  306. nodes_parsed = numa_nodes_parsed;
  307. numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo);
  308. for_each_node_mask(i, nodes_parsed)
  309. cnt = i;
  310. cnt++;
  311. size = cnt * cnt * sizeof(numa_distance[0]);
  312. phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
  313. size, PAGE_SIZE);
  314. if (!phys) {
  315. pr_warning("NUMA: Warning: can't allocate distance table!\n");
  316. /* don't retry until explicitly reset */
  317. numa_distance = (void *)1LU;
  318. return -ENOMEM;
  319. }
  320. memblock_reserve(phys, size);
  321. numa_distance = __va(phys);
  322. numa_distance_cnt = cnt;
  323. /* fill with the default distances */
  324. for (i = 0; i < cnt; i++)
  325. for (j = 0; j < cnt; j++)
  326. numa_distance[i * cnt + j] = i == j ?
  327. LOCAL_DISTANCE : REMOTE_DISTANCE;
  328. printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt);
  329. return 0;
  330. }
  331. /**
  332. * numa_set_distance - Set NUMA distance from one NUMA to another
  333. * @from: the 'from' node to set distance
  334. * @to: the 'to' node to set distance
  335. * @distance: NUMA distance
  336. *
  337. * Set the distance from node @from to @to to @distance. If distance table
  338. * doesn't exist, one which is large enough to accommodate all the currently
  339. * known nodes will be created.
  340. *
  341. * If such table cannot be allocated, a warning is printed and further
  342. * calls are ignored until the distance table is reset with
  343. * numa_reset_distance().
  344. *
  345. * If @from or @to is higher than the highest known node or lower than zero
  346. * at the time of table creation or @distance doesn't make sense, the call
  347. * is ignored.
  348. * This is to allow simplification of specific NUMA config implementations.
  349. */
  350. void __init numa_set_distance(int from, int to, int distance)
  351. {
  352. if (!numa_distance && numa_alloc_distance() < 0)
  353. return;
  354. if (from >= numa_distance_cnt || to >= numa_distance_cnt ||
  355. from < 0 || to < 0) {
  356. pr_warn_once("NUMA: Warning: node ids are out of bound, from=%d to=%d distance=%d\n",
  357. from, to, distance);
  358. return;
  359. }
  360. if ((u8)distance != distance ||
  361. (from == to && distance != LOCAL_DISTANCE)) {
  362. pr_warn_once("NUMA: Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
  363. from, to, distance);
  364. return;
  365. }
  366. numa_distance[from * numa_distance_cnt + to] = distance;
  367. }
  368. int __node_distance(int from, int to)
  369. {
  370. if (from >= numa_distance_cnt || to >= numa_distance_cnt)
  371. return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
  372. return numa_distance[from * numa_distance_cnt + to];
  373. }
  374. EXPORT_SYMBOL(__node_distance);
  375. /*
  376. * Sanity check to catch more bad NUMA configurations (they are amazingly
  377. * common). Make sure the nodes cover all memory.
  378. */
  379. static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
  380. {
  381. u64 numaram, e820ram;
  382. int i;
  383. numaram = 0;
  384. for (i = 0; i < mi->nr_blks; i++) {
  385. u64 s = mi->blk[i].start >> PAGE_SHIFT;
  386. u64 e = mi->blk[i].end >> PAGE_SHIFT;
  387. numaram += e - s;
  388. numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
  389. if ((s64)numaram < 0)
  390. numaram = 0;
  391. }
  392. e820ram = max_pfn - absent_pages_in_range(0, max_pfn);
  393. /* We seem to lose 3 pages somewhere. Allow 1M of slack. */
  394. if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
  395. printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n",
  396. (numaram << PAGE_SHIFT) >> 20,
  397. (e820ram << PAGE_SHIFT) >> 20);
  398. return false;
  399. }
  400. return true;
  401. }
  402. static void __init numa_clear_kernel_node_hotplug(void)
  403. {
  404. int i, nid;
  405. nodemask_t numa_kernel_nodes = NODE_MASK_NONE;
  406. unsigned long start, end;
  407. struct memblock_region *r;
  408. /*
  409. * At this time, all memory regions reserved by memblock are
  410. * used by the kernel. Set the nid in memblock.reserved will
  411. * mark out all the nodes the kernel resides in.
  412. */
  413. for (i = 0; i < numa_meminfo.nr_blks; i++) {
  414. struct numa_memblk *mb = &numa_meminfo.blk[i];
  415. memblock_set_node(mb->start, mb->end - mb->start,
  416. &memblock.reserved, mb->nid);
  417. }
  418. /*
  419. * Mark all kernel nodes.
  420. *
  421. * When booting with mem=nn[kMG] or in a kdump kernel, numa_meminfo
  422. * may not include all the memblock.reserved memory ranges because
  423. * trim_snb_memory() reserves specific pages for Sandy Bridge graphics.
  424. */
  425. for_each_memblock(reserved, r)
  426. if (r->nid != MAX_NUMNODES)
  427. node_set(r->nid, numa_kernel_nodes);
  428. /* Clear MEMBLOCK_HOTPLUG flag for memory in kernel nodes. */
  429. for (i = 0; i < numa_meminfo.nr_blks; i++) {
  430. nid = numa_meminfo.blk[i].nid;
  431. if (!node_isset(nid, numa_kernel_nodes))
  432. continue;
  433. start = numa_meminfo.blk[i].start;
  434. end = numa_meminfo.blk[i].end;
  435. memblock_clear_hotplug(start, end - start);
  436. }
  437. }
  438. static int __init numa_register_memblks(struct numa_meminfo *mi)
  439. {
  440. unsigned long uninitialized_var(pfn_align);
  441. int i, nid;
  442. /* Account for nodes with cpus and no memory */
  443. node_possible_map = numa_nodes_parsed;
  444. numa_nodemask_from_meminfo(&node_possible_map, mi);
  445. if (WARN_ON(nodes_empty(node_possible_map)))
  446. return -EINVAL;
  447. for (i = 0; i < mi->nr_blks; i++) {
  448. struct numa_memblk *mb = &mi->blk[i];
  449. memblock_set_node(mb->start, mb->end - mb->start,
  450. &memblock.memory, mb->nid);
  451. }
  452. /*
  453. * At very early time, the kernel have to use some memory such as
  454. * loading the kernel image. We cannot prevent this anyway. So any
  455. * node the kernel resides in should be un-hotpluggable.
  456. *
  457. * And when we come here, alloc node data won't fail.
  458. */
  459. numa_clear_kernel_node_hotplug();
  460. /*
  461. * If sections array is gonna be used for pfn -> nid mapping, check
  462. * whether its granularity is fine enough.
  463. */
  464. #ifdef NODE_NOT_IN_PAGE_FLAGS
  465. pfn_align = node_map_pfn_alignment();
  466. if (pfn_align && pfn_align < PAGES_PER_SECTION) {
  467. printk(KERN_WARNING "Node alignment %LuMB < min %LuMB, rejecting NUMA config\n",
  468. PFN_PHYS(pfn_align) >> 20,
  469. PFN_PHYS(PAGES_PER_SECTION) >> 20);
  470. return -EINVAL;
  471. }
  472. #endif
  473. if (!numa_meminfo_cover_memory(mi))
  474. return -EINVAL;
  475. /* Finally register nodes. */
  476. for_each_node_mask(nid, node_possible_map) {
  477. u64 start = PFN_PHYS(max_pfn);
  478. u64 end = 0;
  479. for (i = 0; i < mi->nr_blks; i++) {
  480. if (nid != mi->blk[i].nid)
  481. continue;
  482. start = min(mi->blk[i].start, start);
  483. end = max(mi->blk[i].end, end);
  484. }
  485. if (start >= end)
  486. continue;
  487. /*
  488. * Don't confuse VM with a node that doesn't have the
  489. * minimum amount of memory:
  490. */
  491. if (end && (end - start) < NODE_MIN_SIZE)
  492. continue;
  493. alloc_node_data(nid);
  494. }
  495. /* Dump memblock with node info and return. */
  496. memblock_dump_all();
  497. return 0;
  498. }
  499. /*
  500. * There are unfortunately some poorly designed mainboards around that
  501. * only connect memory to a single CPU. This breaks the 1:1 cpu->node
  502. * mapping. To avoid this fill in the mapping for all possible CPUs,
  503. * as the number of CPUs is not known yet. We round robin the existing
  504. * nodes.
  505. */
  506. static void __init numa_init_array(void)
  507. {
  508. int rr, i;
  509. rr = first_node(node_online_map);
  510. for (i = 0; i < nr_cpu_ids; i++) {
  511. if (early_cpu_to_node(i) != NUMA_NO_NODE)
  512. continue;
  513. numa_set_node(i, rr);
  514. rr = next_node(rr, node_online_map);
  515. if (rr == MAX_NUMNODES)
  516. rr = first_node(node_online_map);
  517. }
  518. }
  519. static int __init numa_init(int (*init_func)(void))
  520. {
  521. int i;
  522. int ret;
  523. for (i = 0; i < MAX_LOCAL_APIC; i++)
  524. set_apicid_to_node(i, NUMA_NO_NODE);
  525. nodes_clear(numa_nodes_parsed);
  526. nodes_clear(node_possible_map);
  527. nodes_clear(node_online_map);
  528. memset(&numa_meminfo, 0, sizeof(numa_meminfo));
  529. WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.memory,
  530. MAX_NUMNODES));
  531. WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.reserved,
  532. MAX_NUMNODES));
  533. /* In case that parsing SRAT failed. */
  534. WARN_ON(memblock_clear_hotplug(0, ULLONG_MAX));
  535. numa_reset_distance();
  536. ret = init_func();
  537. if (ret < 0)
  538. return ret;
  539. /*
  540. * We reset memblock back to the top-down direction
  541. * here because if we configured ACPI_NUMA, we have
  542. * parsed SRAT in init_func(). It is ok to have the
  543. * reset here even if we did't configure ACPI_NUMA
  544. * or acpi numa init fails and fallbacks to dummy
  545. * numa init.
  546. */
  547. memblock_set_bottom_up(false);
  548. ret = numa_cleanup_meminfo(&numa_meminfo);
  549. if (ret < 0)
  550. return ret;
  551. numa_emulation(&numa_meminfo, numa_distance_cnt);
  552. ret = numa_register_memblks(&numa_meminfo);
  553. if (ret < 0)
  554. return ret;
  555. for (i = 0; i < nr_cpu_ids; i++) {
  556. int nid = early_cpu_to_node(i);
  557. if (nid == NUMA_NO_NODE)
  558. continue;
  559. if (!node_online(nid))
  560. numa_clear_node(i);
  561. }
  562. numa_init_array();
  563. return 0;
  564. }
  565. /**
  566. * dummy_numa_init - Fallback dummy NUMA init
  567. *
  568. * Used if there's no underlying NUMA architecture, NUMA initialization
  569. * fails, or NUMA is disabled on the command line.
  570. *
  571. * Must online at least one node and add memory blocks that cover all
  572. * allowed memory. This function must not fail.
  573. */
  574. static int __init dummy_numa_init(void)
  575. {
  576. printk(KERN_INFO "%s\n",
  577. numa_off ? "NUMA turned off" : "No NUMA configuration found");
  578. printk(KERN_INFO "Faking a node at [mem %#018Lx-%#018Lx]\n",
  579. 0LLU, PFN_PHYS(max_pfn) - 1);
  580. node_set(0, numa_nodes_parsed);
  581. numa_add_memblk(0, 0, PFN_PHYS(max_pfn));
  582. return 0;
  583. }
  584. /**
  585. * x86_numa_init - Initialize NUMA
  586. *
  587. * Try each configured NUMA initialization method until one succeeds. The
  588. * last fallback is dummy single node config encomapssing whole memory and
  589. * never fails.
  590. */
  591. void __init x86_numa_init(void)
  592. {
  593. if (!numa_off) {
  594. #ifdef CONFIG_ACPI_NUMA
  595. if (!numa_init(x86_acpi_numa_init))
  596. return;
  597. #endif
  598. #ifdef CONFIG_AMD_NUMA
  599. if (!numa_init(amd_numa_init))
  600. return;
  601. #endif
  602. }
  603. numa_init(dummy_numa_init);
  604. }
  605. static __init int find_near_online_node(int node)
  606. {
  607. int n, val;
  608. int min_val = INT_MAX;
  609. int best_node = -1;
  610. for_each_online_node(n) {
  611. val = node_distance(node, n);
  612. if (val < min_val) {
  613. min_val = val;
  614. best_node = n;
  615. }
  616. }
  617. return best_node;
  618. }
  619. /*
  620. * Setup early cpu_to_node.
  621. *
  622. * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
  623. * and apicid_to_node[] tables have valid entries for a CPU.
  624. * This means we skip cpu_to_node[] initialisation for NUMA
  625. * emulation and faking node case (when running a kernel compiled
  626. * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
  627. * is already initialized in a round robin manner at numa_init_array,
  628. * prior to this call, and this initialization is good enough
  629. * for the fake NUMA cases.
  630. *
  631. * Called before the per_cpu areas are setup.
  632. */
  633. void __init init_cpu_to_node(void)
  634. {
  635. int cpu;
  636. u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
  637. BUG_ON(cpu_to_apicid == NULL);
  638. for_each_possible_cpu(cpu) {
  639. int node = numa_cpu_node(cpu);
  640. if (node == NUMA_NO_NODE)
  641. continue;
  642. if (!node_online(node))
  643. node = find_near_online_node(node);
  644. numa_set_node(cpu, node);
  645. }
  646. }
  647. #ifndef CONFIG_DEBUG_PER_CPU_MAPS
  648. # ifndef CONFIG_NUMA_EMU
  649. void numa_add_cpu(int cpu)
  650. {
  651. cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
  652. }
  653. void numa_remove_cpu(int cpu)
  654. {
  655. cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
  656. }
  657. # endif /* !CONFIG_NUMA_EMU */
  658. #else /* !CONFIG_DEBUG_PER_CPU_MAPS */
  659. int __cpu_to_node(int cpu)
  660. {
  661. if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
  662. printk(KERN_WARNING
  663. "cpu_to_node(%d): usage too early!\n", cpu);
  664. dump_stack();
  665. return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
  666. }
  667. return per_cpu(x86_cpu_to_node_map, cpu);
  668. }
  669. EXPORT_SYMBOL(__cpu_to_node);
  670. /*
  671. * Same function as cpu_to_node() but used if called before the
  672. * per_cpu areas are setup.
  673. */
  674. int early_cpu_to_node(int cpu)
  675. {
  676. if (early_per_cpu_ptr(x86_cpu_to_node_map))
  677. return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
  678. if (!cpu_possible(cpu)) {
  679. printk(KERN_WARNING
  680. "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
  681. dump_stack();
  682. return NUMA_NO_NODE;
  683. }
  684. return per_cpu(x86_cpu_to_node_map, cpu);
  685. }
  686. void debug_cpumask_set_cpu(int cpu, int node, bool enable)
  687. {
  688. struct cpumask *mask;
  689. if (node == NUMA_NO_NODE) {
  690. /* early_cpu_to_node() already emits a warning and trace */
  691. return;
  692. }
  693. mask = node_to_cpumask_map[node];
  694. if (!mask) {
  695. pr_err("node_to_cpumask_map[%i] NULL\n", node);
  696. dump_stack();
  697. return;
  698. }
  699. if (enable)
  700. cpumask_set_cpu(cpu, mask);
  701. else
  702. cpumask_clear_cpu(cpu, mask);
  703. printk(KERN_DEBUG "%s cpu %d node %d: mask now %*pbl\n",
  704. enable ? "numa_add_cpu" : "numa_remove_cpu",
  705. cpu, node, cpumask_pr_args(mask));
  706. return;
  707. }
  708. # ifndef CONFIG_NUMA_EMU
  709. static void numa_set_cpumask(int cpu, bool enable)
  710. {
  711. debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable);
  712. }
  713. void numa_add_cpu(int cpu)
  714. {
  715. numa_set_cpumask(cpu, true);
  716. }
  717. void numa_remove_cpu(int cpu)
  718. {
  719. numa_set_cpumask(cpu, false);
  720. }
  721. # endif /* !CONFIG_NUMA_EMU */
  722. /*
  723. * Returns a pointer to the bitmask of CPUs on Node 'node'.
  724. */
  725. const struct cpumask *cpumask_of_node(int node)
  726. {
  727. if (node >= nr_node_ids) {
  728. printk(KERN_WARNING
  729. "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
  730. node, nr_node_ids);
  731. dump_stack();
  732. return cpu_none_mask;
  733. }
  734. if (node_to_cpumask_map[node] == NULL) {
  735. printk(KERN_WARNING
  736. "cpumask_of_node(%d): no node_to_cpumask_map!\n",
  737. node);
  738. dump_stack();
  739. return cpu_online_mask;
  740. }
  741. return node_to_cpumask_map[node];
  742. }
  743. EXPORT_SYMBOL(cpumask_of_node);
  744. #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
  745. #ifdef CONFIG_MEMORY_HOTPLUG
  746. int memory_add_physaddr_to_nid(u64 start)
  747. {
  748. struct numa_meminfo *mi = &numa_meminfo;
  749. int nid = mi->blk[0].nid;
  750. int i;
  751. for (i = 0; i < mi->nr_blks; i++)
  752. if (mi->blk[i].start <= start && mi->blk[i].end > start)
  753. nid = mi->blk[i].nid;
  754. return nid;
  755. }
  756. EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
  757. #endif