numa.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557
  1. /*
  2. * pSeries NUMA support
  3. *
  4. * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #define pr_fmt(fmt) "numa: " fmt
  12. #include <linux/threads.h>
  13. #include <linux/bootmem.h>
  14. #include <linux/init.h>
  15. #include <linux/mm.h>
  16. #include <linux/mmzone.h>
  17. #include <linux/export.h>
  18. #include <linux/nodemask.h>
  19. #include <linux/cpu.h>
  20. #include <linux/notifier.h>
  21. #include <linux/memblock.h>
  22. #include <linux/of.h>
  23. #include <linux/pfn.h>
  24. #include <linux/cpuset.h>
  25. #include <linux/node.h>
  26. #include <linux/stop_machine.h>
  27. #include <linux/proc_fs.h>
  28. #include <linux/seq_file.h>
  29. #include <linux/uaccess.h>
  30. #include <linux/slab.h>
  31. #include <asm/cputhreads.h>
  32. #include <asm/sparsemem.h>
  33. #include <asm/prom.h>
  34. #include <asm/smp.h>
  35. #include <asm/cputhreads.h>
  36. #include <asm/topology.h>
  37. #include <asm/firmware.h>
  38. #include <asm/paca.h>
  39. #include <asm/hvcall.h>
  40. #include <asm/setup.h>
  41. #include <asm/vdso.h>
  42. #include <asm/drmem.h>
  43. static int numa_enabled = 1;
  44. static char *cmdline __initdata;
  45. static int numa_debug;
  46. #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
  47. int numa_cpu_lookup_table[NR_CPUS];
  48. cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
  49. struct pglist_data *node_data[MAX_NUMNODES];
  50. EXPORT_SYMBOL(numa_cpu_lookup_table);
  51. EXPORT_SYMBOL(node_to_cpumask_map);
  52. EXPORT_SYMBOL(node_data);
  53. static int min_common_depth;
  54. static int n_mem_addr_cells, n_mem_size_cells;
  55. static int form1_affinity;
  56. #define MAX_DISTANCE_REF_POINTS 4
  57. static int distance_ref_points_depth;
  58. static const __be32 *distance_ref_points;
  59. static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
  60. /*
  61. * Allocate node_to_cpumask_map based on number of available nodes
  62. * Requires node_possible_map to be valid.
  63. *
  64. * Note: cpumask_of_node() is not valid until after this is done.
  65. */
  66. static void __init setup_node_to_cpumask_map(void)
  67. {
  68. unsigned int node;
  69. /* setup nr_node_ids if not done yet */
  70. if (nr_node_ids == MAX_NUMNODES)
  71. setup_nr_node_ids();
  72. /* allocate the map */
  73. for_each_node(node)
  74. alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
  75. /* cpumask_of_node() will now work */
  76. dbg("Node to cpumask map for %d nodes\n", nr_node_ids);
  77. }
  78. static int __init fake_numa_create_new_node(unsigned long end_pfn,
  79. unsigned int *nid)
  80. {
  81. unsigned long long mem;
  82. char *p = cmdline;
  83. static unsigned int fake_nid;
  84. static unsigned long long curr_boundary;
  85. /*
  86. * Modify node id, iff we started creating NUMA nodes
  87. * We want to continue from where we left of the last time
  88. */
  89. if (fake_nid)
  90. *nid = fake_nid;
  91. /*
  92. * In case there are no more arguments to parse, the
  93. * node_id should be the same as the last fake node id
  94. * (we've handled this above).
  95. */
  96. if (!p)
  97. return 0;
  98. mem = memparse(p, &p);
  99. if (!mem)
  100. return 0;
  101. if (mem < curr_boundary)
  102. return 0;
  103. curr_boundary = mem;
  104. if ((end_pfn << PAGE_SHIFT) > mem) {
  105. /*
  106. * Skip commas and spaces
  107. */
  108. while (*p == ',' || *p == ' ' || *p == '\t')
  109. p++;
  110. cmdline = p;
  111. fake_nid++;
  112. *nid = fake_nid;
  113. dbg("created new fake_node with id %d\n", fake_nid);
  114. return 1;
  115. }
  116. return 0;
  117. }
  118. static void reset_numa_cpu_lookup_table(void)
  119. {
  120. unsigned int cpu;
  121. for_each_possible_cpu(cpu)
  122. numa_cpu_lookup_table[cpu] = -1;
  123. }
  124. static void update_numa_cpu_lookup_table(unsigned int cpu, int node)
  125. {
  126. numa_cpu_lookup_table[cpu] = node;
  127. }
  128. static void map_cpu_to_node(int cpu, int node)
  129. {
  130. update_numa_cpu_lookup_table(cpu, node);
  131. dbg("adding cpu %d to node %d\n", cpu, node);
  132. if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node])))
  133. cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
  134. }
  135. #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
  136. static void unmap_cpu_from_node(unsigned long cpu)
  137. {
  138. int node = numa_cpu_lookup_table[cpu];
  139. dbg("removing cpu %lu from node %d\n", cpu, node);
  140. if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
  141. cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
  142. } else {
  143. printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
  144. cpu, node);
  145. }
  146. }
  147. #endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
  148. /* must hold reference to node during call */
  149. static const __be32 *of_get_associativity(struct device_node *dev)
  150. {
  151. return of_get_property(dev, "ibm,associativity", NULL);
  152. }
  153. int __node_distance(int a, int b)
  154. {
  155. int i;
  156. int distance = LOCAL_DISTANCE;
  157. if (!form1_affinity)
  158. return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
  159. for (i = 0; i < distance_ref_points_depth; i++) {
  160. if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
  161. break;
  162. /* Double the distance for each NUMA level */
  163. distance *= 2;
  164. }
  165. return distance;
  166. }
  167. EXPORT_SYMBOL(__node_distance);
  168. static void initialize_distance_lookup_table(int nid,
  169. const __be32 *associativity)
  170. {
  171. int i;
  172. if (!form1_affinity)
  173. return;
  174. for (i = 0; i < distance_ref_points_depth; i++) {
  175. const __be32 *entry;
  176. entry = &associativity[be32_to_cpu(distance_ref_points[i]) - 1];
  177. distance_lookup_table[nid][i] = of_read_number(entry, 1);
  178. }
  179. }
  180. /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
  181. * info is found.
  182. */
  183. static int associativity_to_nid(const __be32 *associativity)
  184. {
  185. int nid = -1;
  186. if (min_common_depth == -1)
  187. goto out;
  188. if (of_read_number(associativity, 1) >= min_common_depth)
  189. nid = of_read_number(&associativity[min_common_depth], 1);
  190. /* POWER4 LPAR uses 0xffff as invalid node */
  191. if (nid == 0xffff || nid >= MAX_NUMNODES)
  192. nid = -1;
  193. if (nid > 0 &&
  194. of_read_number(associativity, 1) >= distance_ref_points_depth) {
  195. /*
  196. * Skip the length field and send start of associativity array
  197. */
  198. initialize_distance_lookup_table(nid, associativity + 1);
  199. }
  200. out:
  201. return nid;
  202. }
  203. /* Returns the nid associated with the given device tree node,
  204. * or -1 if not found.
  205. */
  206. static int of_node_to_nid_single(struct device_node *device)
  207. {
  208. int nid = -1;
  209. const __be32 *tmp;
  210. tmp = of_get_associativity(device);
  211. if (tmp)
  212. nid = associativity_to_nid(tmp);
  213. return nid;
  214. }
  215. /* Walk the device tree upwards, looking for an associativity id */
  216. int of_node_to_nid(struct device_node *device)
  217. {
  218. int nid = -1;
  219. of_node_get(device);
  220. while (device) {
  221. nid = of_node_to_nid_single(device);
  222. if (nid != -1)
  223. break;
  224. device = of_get_next_parent(device);
  225. }
  226. of_node_put(device);
  227. return nid;
  228. }
  229. EXPORT_SYMBOL(of_node_to_nid);
  230. static int __init find_min_common_depth(void)
  231. {
  232. int depth;
  233. struct device_node *root;
  234. if (firmware_has_feature(FW_FEATURE_OPAL))
  235. root = of_find_node_by_path("/ibm,opal");
  236. else
  237. root = of_find_node_by_path("/rtas");
  238. if (!root)
  239. root = of_find_node_by_path("/");
  240. /*
  241. * This property is a set of 32-bit integers, each representing
  242. * an index into the ibm,associativity nodes.
  243. *
  244. * With form 0 affinity the first integer is for an SMP configuration
  245. * (should be all 0's) and the second is for a normal NUMA
  246. * configuration. We have only one level of NUMA.
  247. *
  248. * With form 1 affinity the first integer is the most significant
  249. * NUMA boundary and the following are progressively less significant
  250. * boundaries. There can be more than one level of NUMA.
  251. */
  252. distance_ref_points = of_get_property(root,
  253. "ibm,associativity-reference-points",
  254. &distance_ref_points_depth);
  255. if (!distance_ref_points) {
  256. dbg("NUMA: ibm,associativity-reference-points not found.\n");
  257. goto err;
  258. }
  259. distance_ref_points_depth /= sizeof(int);
  260. if (firmware_has_feature(FW_FEATURE_OPAL) ||
  261. firmware_has_feature(FW_FEATURE_TYPE1_AFFINITY)) {
  262. dbg("Using form 1 affinity\n");
  263. form1_affinity = 1;
  264. }
  265. if (form1_affinity) {
  266. depth = of_read_number(distance_ref_points, 1);
  267. } else {
  268. if (distance_ref_points_depth < 2) {
  269. printk(KERN_WARNING "NUMA: "
  270. "short ibm,associativity-reference-points\n");
  271. goto err;
  272. }
  273. depth = of_read_number(&distance_ref_points[1], 1);
  274. }
  275. /*
  276. * Warn and cap if the hardware supports more than
  277. * MAX_DISTANCE_REF_POINTS domains.
  278. */
  279. if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) {
  280. printk(KERN_WARNING "NUMA: distance array capped at "
  281. "%d entries\n", MAX_DISTANCE_REF_POINTS);
  282. distance_ref_points_depth = MAX_DISTANCE_REF_POINTS;
  283. }
  284. of_node_put(root);
  285. return depth;
  286. err:
  287. of_node_put(root);
  288. return -1;
  289. }
  290. static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
  291. {
  292. struct device_node *memory = NULL;
  293. memory = of_find_node_by_type(memory, "memory");
  294. if (!memory)
  295. panic("numa.c: No memory nodes found!");
  296. *n_addr_cells = of_n_addr_cells(memory);
  297. *n_size_cells = of_n_size_cells(memory);
  298. of_node_put(memory);
  299. }
  300. static unsigned long read_n_cells(int n, const __be32 **buf)
  301. {
  302. unsigned long result = 0;
  303. while (n--) {
  304. result = (result << 32) | of_read_number(*buf, 1);
  305. (*buf)++;
  306. }
  307. return result;
  308. }
  309. struct assoc_arrays {
  310. u32 n_arrays;
  311. u32 array_sz;
  312. const __be32 *arrays;
  313. };
  314. /*
  315. * Retrieve and validate the list of associativity arrays for drconf
  316. * memory from the ibm,associativity-lookup-arrays property of the
  317. * device tree..
  318. *
  319. * The layout of the ibm,associativity-lookup-arrays property is a number N
  320. * indicating the number of associativity arrays, followed by a number M
  321. * indicating the size of each associativity array, followed by a list
  322. * of N associativity arrays.
  323. */
  324. static int of_get_assoc_arrays(struct assoc_arrays *aa)
  325. {
  326. struct device_node *memory;
  327. const __be32 *prop;
  328. u32 len;
  329. memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
  330. if (!memory)
  331. return -1;
  332. prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
  333. if (!prop || len < 2 * sizeof(unsigned int)) {
  334. of_node_put(memory);
  335. return -1;
  336. }
  337. aa->n_arrays = of_read_number(prop++, 1);
  338. aa->array_sz = of_read_number(prop++, 1);
  339. of_node_put(memory);
  340. /* Now that we know the number of arrays and size of each array,
  341. * revalidate the size of the property read in.
  342. */
  343. if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
  344. return -1;
  345. aa->arrays = prop;
  346. return 0;
  347. }
  348. /*
  349. * This is like of_node_to_nid_single() for memory represented in the
  350. * ibm,dynamic-reconfiguration-memory node.
  351. */
  352. static int of_drconf_to_nid_single(struct drmem_lmb *lmb)
  353. {
  354. struct assoc_arrays aa = { .arrays = NULL };
  355. int default_nid = 0;
  356. int nid = default_nid;
  357. int rc, index;
  358. rc = of_get_assoc_arrays(&aa);
  359. if (rc)
  360. return default_nid;
  361. if (min_common_depth > 0 && min_common_depth <= aa.array_sz &&
  362. !(lmb->flags & DRCONF_MEM_AI_INVALID) &&
  363. lmb->aa_index < aa.n_arrays) {
  364. index = lmb->aa_index * aa.array_sz + min_common_depth - 1;
  365. nid = of_read_number(&aa.arrays[index], 1);
  366. if (nid == 0xffff || nid >= MAX_NUMNODES)
  367. nid = default_nid;
  368. if (nid > 0) {
  369. index = lmb->aa_index * aa.array_sz;
  370. initialize_distance_lookup_table(nid,
  371. &aa.arrays[index]);
  372. }
  373. }
  374. return nid;
  375. }
  376. /*
  377. * Figure out to which domain a cpu belongs and stick it there.
  378. * Return the id of the domain used.
  379. */
  380. static int numa_setup_cpu(unsigned long lcpu)
  381. {
  382. int nid = -1;
  383. struct device_node *cpu;
  384. /*
  385. * If a valid cpu-to-node mapping is already available, use it
  386. * directly instead of querying the firmware, since it represents
  387. * the most recent mapping notified to us by the platform (eg: VPHN).
  388. */
  389. if ((nid = numa_cpu_lookup_table[lcpu]) >= 0) {
  390. map_cpu_to_node(lcpu, nid);
  391. return nid;
  392. }
  393. cpu = of_get_cpu_node(lcpu, NULL);
  394. if (!cpu) {
  395. WARN_ON(1);
  396. if (cpu_present(lcpu))
  397. goto out_present;
  398. else
  399. goto out;
  400. }
  401. nid = of_node_to_nid_single(cpu);
  402. out_present:
  403. if (nid < 0 || !node_online(nid))
  404. nid = first_online_node;
  405. map_cpu_to_node(lcpu, nid);
  406. of_node_put(cpu);
  407. out:
  408. return nid;
  409. }
  410. static void verify_cpu_node_mapping(int cpu, int node)
  411. {
  412. int base, sibling, i;
  413. /* Verify that all the threads in the core belong to the same node */
  414. base = cpu_first_thread_sibling(cpu);
  415. for (i = 0; i < threads_per_core; i++) {
  416. sibling = base + i;
  417. if (sibling == cpu || cpu_is_offline(sibling))
  418. continue;
  419. if (cpu_to_node(sibling) != node) {
  420. WARN(1, "CPU thread siblings %d and %d don't belong"
  421. " to the same node!\n", cpu, sibling);
  422. break;
  423. }
  424. }
  425. }
  426. /* Must run before sched domains notifier. */
  427. static int ppc_numa_cpu_prepare(unsigned int cpu)
  428. {
  429. int nid;
  430. nid = numa_setup_cpu(cpu);
  431. verify_cpu_node_mapping(cpu, nid);
  432. return 0;
  433. }
  434. static int ppc_numa_cpu_dead(unsigned int cpu)
  435. {
  436. #ifdef CONFIG_HOTPLUG_CPU
  437. unmap_cpu_from_node(cpu);
  438. #endif
  439. return 0;
  440. }
  441. /*
  442. * Check and possibly modify a memory region to enforce the memory limit.
  443. *
  444. * Returns the size the region should have to enforce the memory limit.
  445. * This will either be the original value of size, a truncated value,
  446. * or zero. If the returned value of size is 0 the region should be
  447. * discarded as it lies wholly above the memory limit.
  448. */
  449. static unsigned long __init numa_enforce_memory_limit(unsigned long start,
  450. unsigned long size)
  451. {
  452. /*
  453. * We use memblock_end_of_DRAM() in here instead of memory_limit because
  454. * we've already adjusted it for the limit and it takes care of
  455. * having memory holes below the limit. Also, in the case of
  456. * iommu_is_off, memory_limit is not set but is implicitly enforced.
  457. */
  458. if (start + size <= memblock_end_of_DRAM())
  459. return size;
  460. if (start >= memblock_end_of_DRAM())
  461. return 0;
  462. return memblock_end_of_DRAM() - start;
  463. }
  464. /*
  465. * Reads the counter for a given entry in
  466. * linux,drconf-usable-memory property
  467. */
  468. static inline int __init read_usm_ranges(const __be32 **usm)
  469. {
  470. /*
  471. * For each lmb in ibm,dynamic-memory a corresponding
  472. * entry in linux,drconf-usable-memory property contains
  473. * a counter followed by that many (base, size) duple.
  474. * read the counter from linux,drconf-usable-memory
  475. */
  476. return read_n_cells(n_mem_size_cells, usm);
  477. }
  478. /*
  479. * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
  480. * node. This assumes n_mem_{addr,size}_cells have been set.
  481. */
  482. static void __init numa_setup_drmem_lmb(struct drmem_lmb *lmb,
  483. const __be32 **usm)
  484. {
  485. unsigned int ranges, is_kexec_kdump = 0;
  486. unsigned long base, size, sz;
  487. int nid;
  488. /*
  489. * Skip this block if the reserved bit is set in flags (0x80)
  490. * or if the block is not assigned to this partition (0x8)
  491. */
  492. if ((lmb->flags & DRCONF_MEM_RESERVED)
  493. || !(lmb->flags & DRCONF_MEM_ASSIGNED))
  494. return;
  495. if (*usm)
  496. is_kexec_kdump = 1;
  497. base = lmb->base_addr;
  498. size = drmem_lmb_size();
  499. ranges = 1;
  500. if (is_kexec_kdump) {
  501. ranges = read_usm_ranges(usm);
  502. if (!ranges) /* there are no (base, size) duple */
  503. return;
  504. }
  505. do {
  506. if (is_kexec_kdump) {
  507. base = read_n_cells(n_mem_addr_cells, usm);
  508. size = read_n_cells(n_mem_size_cells, usm);
  509. }
  510. nid = of_drconf_to_nid_single(lmb);
  511. fake_numa_create_new_node(((base + size) >> PAGE_SHIFT),
  512. &nid);
  513. node_set_online(nid);
  514. sz = numa_enforce_memory_limit(base, size);
  515. if (sz)
  516. memblock_set_node(base, sz, &memblock.memory, nid);
  517. } while (--ranges);
  518. }
  519. static int __init parse_numa_properties(void)
  520. {
  521. struct device_node *memory;
  522. int default_nid = 0;
  523. unsigned long i;
  524. if (numa_enabled == 0) {
  525. printk(KERN_WARNING "NUMA disabled by user\n");
  526. return -1;
  527. }
  528. min_common_depth = find_min_common_depth();
  529. if (min_common_depth < 0)
  530. return min_common_depth;
  531. dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
  532. /*
  533. * Even though we connect cpus to numa domains later in SMP
  534. * init, we need to know the node ids now. This is because
  535. * each node to be onlined must have NODE_DATA etc backing it.
  536. */
  537. for_each_present_cpu(i) {
  538. struct device_node *cpu;
  539. int nid;
  540. cpu = of_get_cpu_node(i, NULL);
  541. BUG_ON(!cpu);
  542. nid = of_node_to_nid_single(cpu);
  543. of_node_put(cpu);
  544. /*
  545. * Don't fall back to default_nid yet -- we will plug
  546. * cpus into nodes once the memory scan has discovered
  547. * the topology.
  548. */
  549. if (nid < 0)
  550. continue;
  551. node_set_online(nid);
  552. }
  553. get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
  554. for_each_node_by_type(memory, "memory") {
  555. unsigned long start;
  556. unsigned long size;
  557. int nid;
  558. int ranges;
  559. const __be32 *memcell_buf;
  560. unsigned int len;
  561. memcell_buf = of_get_property(memory,
  562. "linux,usable-memory", &len);
  563. if (!memcell_buf || len <= 0)
  564. memcell_buf = of_get_property(memory, "reg", &len);
  565. if (!memcell_buf || len <= 0)
  566. continue;
  567. /* ranges in cell */
  568. ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
  569. new_range:
  570. /* these are order-sensitive, and modify the buffer pointer */
  571. start = read_n_cells(n_mem_addr_cells, &memcell_buf);
  572. size = read_n_cells(n_mem_size_cells, &memcell_buf);
  573. /*
  574. * Assumption: either all memory nodes or none will
  575. * have associativity properties. If none, then
  576. * everything goes to default_nid.
  577. */
  578. nid = of_node_to_nid_single(memory);
  579. if (nid < 0)
  580. nid = default_nid;
  581. fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid);
  582. node_set_online(nid);
  583. size = numa_enforce_memory_limit(start, size);
  584. if (size)
  585. memblock_set_node(start, size, &memblock.memory, nid);
  586. if (--ranges)
  587. goto new_range;
  588. }
  589. /*
  590. * Now do the same thing for each MEMBLOCK listed in the
  591. * ibm,dynamic-memory property in the
  592. * ibm,dynamic-reconfiguration-memory node.
  593. */
  594. memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
  595. if (memory) {
  596. walk_drmem_lmbs(memory, numa_setup_drmem_lmb);
  597. of_node_put(memory);
  598. }
  599. return 0;
  600. }
  601. static void __init setup_nonnuma(void)
  602. {
  603. unsigned long top_of_ram = memblock_end_of_DRAM();
  604. unsigned long total_ram = memblock_phys_mem_size();
  605. unsigned long start_pfn, end_pfn;
  606. unsigned int nid = 0;
  607. struct memblock_region *reg;
  608. printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
  609. top_of_ram, total_ram);
  610. printk(KERN_DEBUG "Memory hole size: %ldMB\n",
  611. (top_of_ram - total_ram) >> 20);
  612. for_each_memblock(memory, reg) {
  613. start_pfn = memblock_region_memory_base_pfn(reg);
  614. end_pfn = memblock_region_memory_end_pfn(reg);
  615. fake_numa_create_new_node(end_pfn, &nid);
  616. memblock_set_node(PFN_PHYS(start_pfn),
  617. PFN_PHYS(end_pfn - start_pfn),
  618. &memblock.memory, nid);
  619. node_set_online(nid);
  620. }
  621. }
  622. void __init dump_numa_cpu_topology(void)
  623. {
  624. unsigned int node;
  625. unsigned int cpu, count;
  626. if (min_common_depth == -1 || !numa_enabled)
  627. return;
  628. for_each_online_node(node) {
  629. pr_info("Node %d CPUs:", node);
  630. count = 0;
  631. /*
  632. * If we used a CPU iterator here we would miss printing
  633. * the holes in the cpumap.
  634. */
  635. for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
  636. if (cpumask_test_cpu(cpu,
  637. node_to_cpumask_map[node])) {
  638. if (count == 0)
  639. pr_cont(" %u", cpu);
  640. ++count;
  641. } else {
  642. if (count > 1)
  643. pr_cont("-%u", cpu - 1);
  644. count = 0;
  645. }
  646. }
  647. if (count > 1)
  648. pr_cont("-%u", nr_cpu_ids - 1);
  649. pr_cont("\n");
  650. }
  651. }
  652. /* Initialize NODE_DATA for a node on the local memory */
  653. static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
  654. {
  655. u64 spanned_pages = end_pfn - start_pfn;
  656. const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES);
  657. u64 nd_pa;
  658. void *nd;
  659. int tnid;
  660. nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
  661. nd = __va(nd_pa);
  662. /* report and initialize */
  663. pr_info(" NODE_DATA [mem %#010Lx-%#010Lx]\n",
  664. nd_pa, nd_pa + nd_size - 1);
  665. tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
  666. if (tnid != nid)
  667. pr_info(" NODE_DATA(%d) on node %d\n", nid, tnid);
  668. node_data[nid] = nd;
  669. memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
  670. NODE_DATA(nid)->node_id = nid;
  671. NODE_DATA(nid)->node_start_pfn = start_pfn;
  672. NODE_DATA(nid)->node_spanned_pages = spanned_pages;
  673. }
  674. void __init initmem_init(void)
  675. {
  676. int nid, cpu;
  677. max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
  678. max_pfn = max_low_pfn;
  679. if (parse_numa_properties())
  680. setup_nonnuma();
  681. memblock_dump_all();
  682. /*
  683. * Reduce the possible NUMA nodes to the online NUMA nodes,
  684. * since we do not support node hotplug. This ensures that we
  685. * lower the maximum NUMA node ID to what is actually present.
  686. */
  687. nodes_and(node_possible_map, node_possible_map, node_online_map);
  688. for_each_online_node(nid) {
  689. unsigned long start_pfn, end_pfn;
  690. get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
  691. setup_node_data(nid, start_pfn, end_pfn);
  692. sparse_memory_present_with_active_regions(nid);
  693. }
  694. sparse_init();
  695. setup_node_to_cpumask_map();
  696. reset_numa_cpu_lookup_table();
  697. /*
  698. * We need the numa_cpu_lookup_table to be accurate for all CPUs,
  699. * even before we online them, so that we can use cpu_to_{node,mem}
  700. * early in boot, cf. smp_prepare_cpus().
  701. * _nocalls() + manual invocation is used because cpuhp is not yet
  702. * initialized for the boot CPU.
  703. */
  704. cpuhp_setup_state_nocalls(CPUHP_POWER_NUMA_PREPARE, "powerpc/numa:prepare",
  705. ppc_numa_cpu_prepare, ppc_numa_cpu_dead);
  706. for_each_present_cpu(cpu)
  707. numa_setup_cpu(cpu);
  708. }
  709. static int __init early_numa(char *p)
  710. {
  711. if (!p)
  712. return 0;
  713. if (strstr(p, "off"))
  714. numa_enabled = 0;
  715. if (strstr(p, "debug"))
  716. numa_debug = 1;
  717. p = strstr(p, "fake=");
  718. if (p)
  719. cmdline = p + strlen("fake=");
  720. return 0;
  721. }
  722. early_param("numa", early_numa);
  723. static bool topology_updates_enabled = true;
  724. static int __init early_topology_updates(char *p)
  725. {
  726. if (!p)
  727. return 0;
  728. if (!strcmp(p, "off")) {
  729. pr_info("Disabling topology updates\n");
  730. topology_updates_enabled = false;
  731. }
  732. return 0;
  733. }
  734. early_param("topology_updates", early_topology_updates);
  735. #ifdef CONFIG_MEMORY_HOTPLUG
  736. /*
  737. * Find the node associated with a hot added memory section for
  738. * memory represented in the device tree by the property
  739. * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory.
  740. */
  741. static int hot_add_drconf_scn_to_nid(unsigned long scn_addr)
  742. {
  743. struct drmem_lmb *lmb;
  744. unsigned long lmb_size;
  745. int nid = -1;
  746. lmb_size = drmem_lmb_size();
  747. for_each_drmem_lmb(lmb) {
  748. /* skip this block if it is reserved or not assigned to
  749. * this partition */
  750. if ((lmb->flags & DRCONF_MEM_RESERVED)
  751. || !(lmb->flags & DRCONF_MEM_ASSIGNED))
  752. continue;
  753. if ((scn_addr < lmb->base_addr)
  754. || (scn_addr >= (lmb->base_addr + lmb_size)))
  755. continue;
  756. nid = of_drconf_to_nid_single(lmb);
  757. break;
  758. }
  759. return nid;
  760. }
  761. /*
  762. * Find the node associated with a hot added memory section for memory
  763. * represented in the device tree as a node (i.e. memory@XXXX) for
  764. * each memblock.
  765. */
  766. static int hot_add_node_scn_to_nid(unsigned long scn_addr)
  767. {
  768. struct device_node *memory;
  769. int nid = -1;
  770. for_each_node_by_type(memory, "memory") {
  771. unsigned long start, size;
  772. int ranges;
  773. const __be32 *memcell_buf;
  774. unsigned int len;
  775. memcell_buf = of_get_property(memory, "reg", &len);
  776. if (!memcell_buf || len <= 0)
  777. continue;
  778. /* ranges in cell */
  779. ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
  780. while (ranges--) {
  781. start = read_n_cells(n_mem_addr_cells, &memcell_buf);
  782. size = read_n_cells(n_mem_size_cells, &memcell_buf);
  783. if ((scn_addr < start) || (scn_addr >= (start + size)))
  784. continue;
  785. nid = of_node_to_nid_single(memory);
  786. break;
  787. }
  788. if (nid >= 0)
  789. break;
  790. }
  791. of_node_put(memory);
  792. return nid;
  793. }
  794. /*
  795. * Find the node associated with a hot added memory section. Section
  796. * corresponds to a SPARSEMEM section, not an MEMBLOCK. It is assumed that
  797. * sections are fully contained within a single MEMBLOCK.
  798. */
  799. int hot_add_scn_to_nid(unsigned long scn_addr)
  800. {
  801. struct device_node *memory = NULL;
  802. int nid;
  803. if (!numa_enabled || (min_common_depth < 0))
  804. return first_online_node;
  805. memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
  806. if (memory) {
  807. nid = hot_add_drconf_scn_to_nid(scn_addr);
  808. of_node_put(memory);
  809. } else {
  810. nid = hot_add_node_scn_to_nid(scn_addr);
  811. }
  812. if (nid < 0 || !node_possible(nid))
  813. nid = first_online_node;
  814. return nid;
  815. }
  816. static u64 hot_add_drconf_memory_max(void)
  817. {
  818. struct device_node *memory = NULL;
  819. struct device_node *dn = NULL;
  820. const __be64 *lrdr = NULL;
  821. dn = of_find_node_by_path("/rtas");
  822. if (dn) {
  823. lrdr = of_get_property(dn, "ibm,lrdr-capacity", NULL);
  824. of_node_put(dn);
  825. if (lrdr)
  826. return be64_to_cpup(lrdr);
  827. }
  828. memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
  829. if (memory) {
  830. of_node_put(memory);
  831. return drmem_lmb_memory_max();
  832. }
  833. return 0;
  834. }
  835. /*
  836. * memory_hotplug_max - return max address of memory that may be added
  837. *
  838. * This is currently only used on systems that support drconfig memory
  839. * hotplug.
  840. */
  841. u64 memory_hotplug_max(void)
  842. {
  843. return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
  844. }
  845. #endif /* CONFIG_MEMORY_HOTPLUG */
  846. /* Virtual Processor Home Node (VPHN) support */
  847. #ifdef CONFIG_PPC_SPLPAR
  848. #include "vphn.h"
  849. struct topology_update_data {
  850. struct topology_update_data *next;
  851. unsigned int cpu;
  852. int old_nid;
  853. int new_nid;
  854. };
  855. #define TOPOLOGY_DEF_TIMER_SECS 60
  856. static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS];
  857. static cpumask_t cpu_associativity_changes_mask;
  858. static int vphn_enabled;
  859. static int prrn_enabled;
  860. static void reset_topology_timer(void);
  861. static int topology_timer_secs = 1;
  862. static int topology_inited;
  863. static int topology_update_needed;
  864. /*
  865. * Change polling interval for associativity changes.
  866. */
  867. int timed_topology_update(int nsecs)
  868. {
  869. if (vphn_enabled) {
  870. if (nsecs > 0)
  871. topology_timer_secs = nsecs;
  872. else
  873. topology_timer_secs = TOPOLOGY_DEF_TIMER_SECS;
  874. reset_topology_timer();
  875. }
  876. return 0;
  877. }
  878. /*
  879. * Store the current values of the associativity change counters in the
  880. * hypervisor.
  881. */
  882. static void setup_cpu_associativity_change_counters(void)
  883. {
  884. int cpu;
  885. /* The VPHN feature supports a maximum of 8 reference points */
  886. BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8);
  887. for_each_possible_cpu(cpu) {
  888. int i;
  889. u8 *counts = vphn_cpu_change_counts[cpu];
  890. volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
  891. for (i = 0; i < distance_ref_points_depth; i++)
  892. counts[i] = hypervisor_counts[i];
  893. }
  894. }
  895. /*
  896. * The hypervisor maintains a set of 8 associativity change counters in
  897. * the VPA of each cpu that correspond to the associativity levels in the
  898. * ibm,associativity-reference-points property. When an associativity
  899. * level changes, the corresponding counter is incremented.
  900. *
  901. * Set a bit in cpu_associativity_changes_mask for each cpu whose home
  902. * node associativity levels have changed.
  903. *
  904. * Returns the number of cpus with unhandled associativity changes.
  905. */
  906. static int update_cpu_associativity_changes_mask(void)
  907. {
  908. int cpu;
  909. cpumask_t *changes = &cpu_associativity_changes_mask;
  910. for_each_possible_cpu(cpu) {
  911. int i, changed = 0;
  912. u8 *counts = vphn_cpu_change_counts[cpu];
  913. volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
  914. for (i = 0; i < distance_ref_points_depth; i++) {
  915. if (hypervisor_counts[i] != counts[i]) {
  916. counts[i] = hypervisor_counts[i];
  917. changed = 1;
  918. }
  919. }
  920. if (changed) {
  921. cpumask_or(changes, changes, cpu_sibling_mask(cpu));
  922. cpu = cpu_last_thread_sibling(cpu);
  923. }
  924. }
  925. return cpumask_weight(changes);
  926. }
  927. /*
  928. * Retrieve the new associativity information for a virtual processor's
  929. * home node.
  930. */
  931. static long hcall_vphn(unsigned long cpu, __be32 *associativity)
  932. {
  933. long rc;
  934. long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
  935. u64 flags = 1;
  936. int hwcpu = get_hard_smp_processor_id(cpu);
  937. rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu);
  938. vphn_unpack_associativity(retbuf, associativity);
  939. return rc;
  940. }
  941. static long vphn_get_associativity(unsigned long cpu,
  942. __be32 *associativity)
  943. {
  944. long rc;
  945. rc = hcall_vphn(cpu, associativity);
  946. switch (rc) {
  947. case H_FUNCTION:
  948. printk(KERN_INFO
  949. "VPHN is not supported. Disabling polling...\n");
  950. stop_topology_update();
  951. break;
  952. case H_HARDWARE:
  953. printk(KERN_ERR
  954. "hcall_vphn() experienced a hardware fault "
  955. "preventing VPHN. Disabling polling...\n");
  956. stop_topology_update();
  957. break;
  958. case H_SUCCESS:
  959. dbg("VPHN hcall succeeded. Reset polling...\n");
  960. timed_topology_update(0);
  961. break;
  962. }
  963. return rc;
  964. }
  965. /*
  966. * Update the CPU maps and sysfs entries for a single CPU when its NUMA
  967. * characteristics change. This function doesn't perform any locking and is
  968. * only safe to call from stop_machine().
  969. */
  970. static int update_cpu_topology(void *data)
  971. {
  972. struct topology_update_data *update;
  973. unsigned long cpu;
  974. if (!data)
  975. return -EINVAL;
  976. cpu = smp_processor_id();
  977. for (update = data; update; update = update->next) {
  978. int new_nid = update->new_nid;
  979. if (cpu != update->cpu)
  980. continue;
  981. unmap_cpu_from_node(cpu);
  982. map_cpu_to_node(cpu, new_nid);
  983. set_cpu_numa_node(cpu, new_nid);
  984. set_cpu_numa_mem(cpu, local_memory_node(new_nid));
  985. vdso_getcpu_init();
  986. }
  987. return 0;
  988. }
  989. static int update_lookup_table(void *data)
  990. {
  991. struct topology_update_data *update;
  992. if (!data)
  993. return -EINVAL;
  994. /*
  995. * Upon topology update, the numa-cpu lookup table needs to be updated
  996. * for all threads in the core, including offline CPUs, to ensure that
  997. * future hotplug operations respect the cpu-to-node associativity
  998. * properly.
  999. */
  1000. for (update = data; update; update = update->next) {
  1001. int nid, base, j;
  1002. nid = update->new_nid;
  1003. base = cpu_first_thread_sibling(update->cpu);
  1004. for (j = 0; j < threads_per_core; j++) {
  1005. update_numa_cpu_lookup_table(base + j, nid);
  1006. }
  1007. }
  1008. return 0;
  1009. }
  1010. /*
  1011. * Update the node maps and sysfs entries for each cpu whose home node
  1012. * has changed. Returns 1 when the topology has changed, and 0 otherwise.
  1013. *
  1014. * cpus_locked says whether we already hold cpu_hotplug_lock.
  1015. */
  1016. int numa_update_cpu_topology(bool cpus_locked)
  1017. {
  1018. unsigned int cpu, sibling, changed = 0;
  1019. struct topology_update_data *updates, *ud;
  1020. __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
  1021. cpumask_t updated_cpus;
  1022. struct device *dev;
  1023. int weight, new_nid, i = 0;
  1024. if (!prrn_enabled && !vphn_enabled) {
  1025. if (!topology_inited)
  1026. topology_update_needed = 1;
  1027. return 0;
  1028. }
  1029. weight = cpumask_weight(&cpu_associativity_changes_mask);
  1030. if (!weight)
  1031. return 0;
  1032. updates = kzalloc(weight * (sizeof(*updates)), GFP_KERNEL);
  1033. if (!updates)
  1034. return 0;
  1035. cpumask_clear(&updated_cpus);
  1036. for_each_cpu(cpu, &cpu_associativity_changes_mask) {
  1037. /*
  1038. * If siblings aren't flagged for changes, updates list
  1039. * will be too short. Skip on this update and set for next
  1040. * update.
  1041. */
  1042. if (!cpumask_subset(cpu_sibling_mask(cpu),
  1043. &cpu_associativity_changes_mask)) {
  1044. pr_info("Sibling bits not set for associativity "
  1045. "change, cpu%d\n", cpu);
  1046. cpumask_or(&cpu_associativity_changes_mask,
  1047. &cpu_associativity_changes_mask,
  1048. cpu_sibling_mask(cpu));
  1049. cpu = cpu_last_thread_sibling(cpu);
  1050. continue;
  1051. }
  1052. /* Use associativity from first thread for all siblings */
  1053. vphn_get_associativity(cpu, associativity);
  1054. new_nid = associativity_to_nid(associativity);
  1055. if (new_nid < 0 || !node_online(new_nid))
  1056. new_nid = first_online_node;
  1057. if (new_nid == numa_cpu_lookup_table[cpu]) {
  1058. cpumask_andnot(&cpu_associativity_changes_mask,
  1059. &cpu_associativity_changes_mask,
  1060. cpu_sibling_mask(cpu));
  1061. dbg("Assoc chg gives same node %d for cpu%d\n",
  1062. new_nid, cpu);
  1063. cpu = cpu_last_thread_sibling(cpu);
  1064. continue;
  1065. }
  1066. for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
  1067. ud = &updates[i++];
  1068. ud->next = &updates[i];
  1069. ud->cpu = sibling;
  1070. ud->new_nid = new_nid;
  1071. ud->old_nid = numa_cpu_lookup_table[sibling];
  1072. cpumask_set_cpu(sibling, &updated_cpus);
  1073. }
  1074. cpu = cpu_last_thread_sibling(cpu);
  1075. }
  1076. /*
  1077. * Prevent processing of 'updates' from overflowing array
  1078. * where last entry filled in a 'next' pointer.
  1079. */
  1080. if (i)
  1081. updates[i-1].next = NULL;
  1082. pr_debug("Topology update for the following CPUs:\n");
  1083. if (cpumask_weight(&updated_cpus)) {
  1084. for (ud = &updates[0]; ud; ud = ud->next) {
  1085. pr_debug("cpu %d moving from node %d "
  1086. "to %d\n", ud->cpu,
  1087. ud->old_nid, ud->new_nid);
  1088. }
  1089. }
  1090. /*
  1091. * In cases where we have nothing to update (because the updates list
  1092. * is too short or because the new topology is same as the old one),
  1093. * skip invoking update_cpu_topology() via stop-machine(). This is
  1094. * necessary (and not just a fast-path optimization) since stop-machine
  1095. * can end up electing a random CPU to run update_cpu_topology(), and
  1096. * thus trick us into setting up incorrect cpu-node mappings (since
  1097. * 'updates' is kzalloc()'ed).
  1098. *
  1099. * And for the similar reason, we will skip all the following updating.
  1100. */
  1101. if (!cpumask_weight(&updated_cpus))
  1102. goto out;
  1103. if (cpus_locked)
  1104. stop_machine_cpuslocked(update_cpu_topology, &updates[0],
  1105. &updated_cpus);
  1106. else
  1107. stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
  1108. /*
  1109. * Update the numa-cpu lookup table with the new mappings, even for
  1110. * offline CPUs. It is best to perform this update from the stop-
  1111. * machine context.
  1112. */
  1113. if (cpus_locked)
  1114. stop_machine_cpuslocked(update_lookup_table, &updates[0],
  1115. cpumask_of(raw_smp_processor_id()));
  1116. else
  1117. stop_machine(update_lookup_table, &updates[0],
  1118. cpumask_of(raw_smp_processor_id()));
  1119. for (ud = &updates[0]; ud; ud = ud->next) {
  1120. unregister_cpu_under_node(ud->cpu, ud->old_nid);
  1121. register_cpu_under_node(ud->cpu, ud->new_nid);
  1122. dev = get_cpu_device(ud->cpu);
  1123. if (dev)
  1124. kobject_uevent(&dev->kobj, KOBJ_CHANGE);
  1125. cpumask_clear_cpu(ud->cpu, &cpu_associativity_changes_mask);
  1126. changed = 1;
  1127. }
  1128. out:
  1129. kfree(updates);
  1130. topology_update_needed = 0;
  1131. return changed;
  1132. }
  1133. int arch_update_cpu_topology(void)
  1134. {
  1135. return numa_update_cpu_topology(true);
  1136. }
  1137. static void topology_work_fn(struct work_struct *work)
  1138. {
  1139. rebuild_sched_domains();
  1140. }
  1141. static DECLARE_WORK(topology_work, topology_work_fn);
  1142. static void topology_schedule_update(void)
  1143. {
  1144. schedule_work(&topology_work);
  1145. }
  1146. static void topology_timer_fn(struct timer_list *unused)
  1147. {
  1148. if (prrn_enabled && cpumask_weight(&cpu_associativity_changes_mask))
  1149. topology_schedule_update();
  1150. else if (vphn_enabled) {
  1151. if (update_cpu_associativity_changes_mask() > 0)
  1152. topology_schedule_update();
  1153. reset_topology_timer();
  1154. }
  1155. }
  1156. static struct timer_list topology_timer;
  1157. static void reset_topology_timer(void)
  1158. {
  1159. mod_timer(&topology_timer, jiffies + topology_timer_secs * HZ);
  1160. }
  1161. #ifdef CONFIG_SMP
  1162. static void stage_topology_update(int core_id)
  1163. {
  1164. cpumask_or(&cpu_associativity_changes_mask,
  1165. &cpu_associativity_changes_mask, cpu_sibling_mask(core_id));
  1166. reset_topology_timer();
  1167. }
  1168. static int dt_update_callback(struct notifier_block *nb,
  1169. unsigned long action, void *data)
  1170. {
  1171. struct of_reconfig_data *update = data;
  1172. int rc = NOTIFY_DONE;
  1173. switch (action) {
  1174. case OF_RECONFIG_UPDATE_PROPERTY:
  1175. if (!of_prop_cmp(update->dn->type, "cpu") &&
  1176. !of_prop_cmp(update->prop->name, "ibm,associativity")) {
  1177. u32 core_id;
  1178. of_property_read_u32(update->dn, "reg", &core_id);
  1179. stage_topology_update(core_id);
  1180. rc = NOTIFY_OK;
  1181. }
  1182. break;
  1183. }
  1184. return rc;
  1185. }
  1186. static struct notifier_block dt_update_nb = {
  1187. .notifier_call = dt_update_callback,
  1188. };
  1189. #endif
  1190. /*
  1191. * Start polling for associativity changes.
  1192. */
  1193. int start_topology_update(void)
  1194. {
  1195. int rc = 0;
  1196. if (firmware_has_feature(FW_FEATURE_PRRN)) {
  1197. if (!prrn_enabled) {
  1198. prrn_enabled = 1;
  1199. #ifdef CONFIG_SMP
  1200. rc = of_reconfig_notifier_register(&dt_update_nb);
  1201. #endif
  1202. }
  1203. }
  1204. if (firmware_has_feature(FW_FEATURE_VPHN) &&
  1205. lppaca_shared_proc(get_lppaca())) {
  1206. if (!vphn_enabled) {
  1207. vphn_enabled = 1;
  1208. setup_cpu_associativity_change_counters();
  1209. timer_setup(&topology_timer, topology_timer_fn,
  1210. TIMER_DEFERRABLE);
  1211. reset_topology_timer();
  1212. }
  1213. }
  1214. return rc;
  1215. }
  1216. /*
  1217. * Disable polling for VPHN associativity changes.
  1218. */
  1219. int stop_topology_update(void)
  1220. {
  1221. int rc = 0;
  1222. if (prrn_enabled) {
  1223. prrn_enabled = 0;
  1224. #ifdef CONFIG_SMP
  1225. rc = of_reconfig_notifier_unregister(&dt_update_nb);
  1226. #endif
  1227. }
  1228. if (vphn_enabled) {
  1229. vphn_enabled = 0;
  1230. rc = del_timer_sync(&topology_timer);
  1231. }
  1232. return rc;
  1233. }
  1234. int prrn_is_enabled(void)
  1235. {
  1236. return prrn_enabled;
  1237. }
  1238. static int topology_read(struct seq_file *file, void *v)
  1239. {
  1240. if (vphn_enabled || prrn_enabled)
  1241. seq_puts(file, "on\n");
  1242. else
  1243. seq_puts(file, "off\n");
  1244. return 0;
  1245. }
  1246. static int topology_open(struct inode *inode, struct file *file)
  1247. {
  1248. return single_open(file, topology_read, NULL);
  1249. }
  1250. static ssize_t topology_write(struct file *file, const char __user *buf,
  1251. size_t count, loff_t *off)
  1252. {
  1253. char kbuf[4]; /* "on" or "off" plus null. */
  1254. int read_len;
  1255. read_len = count < 3 ? count : 3;
  1256. if (copy_from_user(kbuf, buf, read_len))
  1257. return -EINVAL;
  1258. kbuf[read_len] = '\0';
  1259. if (!strncmp(kbuf, "on", 2))
  1260. start_topology_update();
  1261. else if (!strncmp(kbuf, "off", 3))
  1262. stop_topology_update();
  1263. else
  1264. return -EINVAL;
  1265. return count;
  1266. }
  1267. static const struct file_operations topology_ops = {
  1268. .read = seq_read,
  1269. .write = topology_write,
  1270. .open = topology_open,
  1271. .release = single_release
  1272. };
  1273. static int topology_update_init(void)
  1274. {
  1275. /* Do not poll for changes if disabled at boot */
  1276. if (topology_updates_enabled)
  1277. start_topology_update();
  1278. if (vphn_enabled)
  1279. topology_schedule_update();
  1280. if (!proc_create("powerpc/topology_updates", 0644, NULL, &topology_ops))
  1281. return -ENOMEM;
  1282. topology_inited = 1;
  1283. if (topology_update_needed)
  1284. bitmap_fill(cpumask_bits(&cpu_associativity_changes_mask),
  1285. nr_cpumask_bits);
  1286. return 0;
  1287. }
  1288. device_initcall(topology_update_init);
  1289. #endif /* CONFIG_PPC_SPLPAR */