topology.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283
  1. /*
  2. * arch/arm64/kernel/topology.c
  3. *
  4. * Copyright (C) 2011,2013,2014 Linaro Limited.
  5. *
  6. * Based on the arm32 version written by Vincent Guittot in turn based on
  7. * arch/sh/kernel/topology.c
  8. *
  9. * This file is subject to the terms and conditions of the GNU General Public
  10. * License. See the file "COPYING" in the main directory of this archive
  11. * for more details.
  12. */
  13. #include <linux/cpu.h>
  14. #include <linux/cpumask.h>
  15. #include <linux/init.h>
  16. #include <linux/percpu.h>
  17. #include <linux/node.h>
  18. #include <linux/nodemask.h>
  19. #include <linux/of.h>
  20. #include <linux/sched.h>
  21. #include <asm/topology.h>
  22. static int __init get_cpu_for_node(struct device_node *node)
  23. {
  24. struct device_node *cpu_node;
  25. int cpu;
  26. cpu_node = of_parse_phandle(node, "cpu", 0);
  27. if (!cpu_node)
  28. return -1;
  29. for_each_possible_cpu(cpu) {
  30. if (of_get_cpu_node(cpu, NULL) == cpu_node) {
  31. of_node_put(cpu_node);
  32. return cpu;
  33. }
  34. }
  35. pr_crit("Unable to find CPU node for %s\n", cpu_node->full_name);
  36. of_node_put(cpu_node);
  37. return -1;
  38. }
  39. static int __init parse_core(struct device_node *core, int cluster_id,
  40. int core_id)
  41. {
  42. char name[10];
  43. bool leaf = true;
  44. int i = 0;
  45. int cpu;
  46. struct device_node *t;
  47. do {
  48. snprintf(name, sizeof(name), "thread%d", i);
  49. t = of_get_child_by_name(core, name);
  50. if (t) {
  51. leaf = false;
  52. cpu = get_cpu_for_node(t);
  53. if (cpu >= 0) {
  54. cpu_topology[cpu].cluster_id = cluster_id;
  55. cpu_topology[cpu].core_id = core_id;
  56. cpu_topology[cpu].thread_id = i;
  57. } else {
  58. pr_err("%s: Can't get CPU for thread\n",
  59. t->full_name);
  60. of_node_put(t);
  61. return -EINVAL;
  62. }
  63. of_node_put(t);
  64. }
  65. i++;
  66. } while (t);
  67. cpu = get_cpu_for_node(core);
  68. if (cpu >= 0) {
  69. if (!leaf) {
  70. pr_err("%s: Core has both threads and CPU\n",
  71. core->full_name);
  72. return -EINVAL;
  73. }
  74. cpu_topology[cpu].cluster_id = cluster_id;
  75. cpu_topology[cpu].core_id = core_id;
  76. } else if (leaf) {
  77. pr_err("%s: Can't get CPU for leaf core\n", core->full_name);
  78. return -EINVAL;
  79. }
  80. return 0;
  81. }
  82. static int __init parse_cluster(struct device_node *cluster, int depth)
  83. {
  84. char name[10];
  85. bool leaf = true;
  86. bool has_cores = false;
  87. struct device_node *c;
  88. static int cluster_id __initdata;
  89. int core_id = 0;
  90. int i, ret;
  91. /*
  92. * First check for child clusters; we currently ignore any
  93. * information about the nesting of clusters and present the
  94. * scheduler with a flat list of them.
  95. */
  96. i = 0;
  97. do {
  98. snprintf(name, sizeof(name), "cluster%d", i);
  99. c = of_get_child_by_name(cluster, name);
  100. if (c) {
  101. leaf = false;
  102. ret = parse_cluster(c, depth + 1);
  103. of_node_put(c);
  104. if (ret != 0)
  105. return ret;
  106. }
  107. i++;
  108. } while (c);
  109. /* Now check for cores */
  110. i = 0;
  111. do {
  112. snprintf(name, sizeof(name), "core%d", i);
  113. c = of_get_child_by_name(cluster, name);
  114. if (c) {
  115. has_cores = true;
  116. if (depth == 0) {
  117. pr_err("%s: cpu-map children should be clusters\n",
  118. c->full_name);
  119. of_node_put(c);
  120. return -EINVAL;
  121. }
  122. if (leaf) {
  123. ret = parse_core(c, cluster_id, core_id++);
  124. } else {
  125. pr_err("%s: Non-leaf cluster with core %s\n",
  126. cluster->full_name, name);
  127. ret = -EINVAL;
  128. }
  129. of_node_put(c);
  130. if (ret != 0)
  131. return ret;
  132. }
  133. i++;
  134. } while (c);
  135. if (leaf && !has_cores)
  136. pr_warn("%s: empty cluster\n", cluster->full_name);
  137. if (leaf)
  138. cluster_id++;
  139. return 0;
  140. }
  141. static int __init parse_dt_topology(void)
  142. {
  143. struct device_node *cn, *map;
  144. int ret = 0;
  145. int cpu;
  146. cn = of_find_node_by_path("/cpus");
  147. if (!cn) {
  148. pr_err("No CPU information found in DT\n");
  149. return 0;
  150. }
  151. /*
  152. * When topology is provided cpu-map is essentially a root
  153. * cluster with restricted subnodes.
  154. */
  155. map = of_get_child_by_name(cn, "cpu-map");
  156. if (!map)
  157. goto out;
  158. ret = parse_cluster(map, 0);
  159. if (ret != 0)
  160. goto out_map;
  161. /*
  162. * Check that all cores are in the topology; the SMP code will
  163. * only mark cores described in the DT as possible.
  164. */
  165. for_each_possible_cpu(cpu) {
  166. if (cpu_topology[cpu].cluster_id == -1) {
  167. pr_err("CPU%d: No topology information specified\n",
  168. cpu);
  169. ret = -EINVAL;
  170. }
  171. }
  172. out_map:
  173. of_node_put(map);
  174. out:
  175. of_node_put(cn);
  176. return ret;
  177. }
  178. /*
  179. * cpu topology table
  180. */
  181. struct cpu_topology cpu_topology[NR_CPUS];
  182. EXPORT_SYMBOL_GPL(cpu_topology);
  183. const struct cpumask *cpu_coregroup_mask(int cpu)
  184. {
  185. return &cpu_topology[cpu].core_sibling;
  186. }
  187. static void update_siblings_masks(unsigned int cpuid)
  188. {
  189. struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
  190. int cpu;
  191. if (cpuid_topo->cluster_id == -1) {
  192. /*
  193. * DT does not contain topology information for this cpu.
  194. */
  195. pr_debug("CPU%u: No topology information configured\n", cpuid);
  196. return;
  197. }
  198. /* update core and thread sibling masks */
  199. for_each_possible_cpu(cpu) {
  200. cpu_topo = &cpu_topology[cpu];
  201. if (cpuid_topo->cluster_id != cpu_topo->cluster_id)
  202. continue;
  203. cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
  204. if (cpu != cpuid)
  205. cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
  206. if (cpuid_topo->core_id != cpu_topo->core_id)
  207. continue;
  208. cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
  209. if (cpu != cpuid)
  210. cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
  211. }
  212. }
  213. void store_cpu_topology(unsigned int cpuid)
  214. {
  215. update_siblings_masks(cpuid);
  216. }
  217. static void __init reset_cpu_topology(void)
  218. {
  219. unsigned int cpu;
  220. for_each_possible_cpu(cpu) {
  221. struct cpu_topology *cpu_topo = &cpu_topology[cpu];
  222. cpu_topo->thread_id = -1;
  223. cpu_topo->core_id = 0;
  224. cpu_topo->cluster_id = -1;
  225. cpumask_clear(&cpu_topo->core_sibling);
  226. cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
  227. cpumask_clear(&cpu_topo->thread_sibling);
  228. cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
  229. }
  230. }
  231. void __init init_cpu_topology(void)
  232. {
  233. reset_cpu_topology();
  234. /*
  235. * Discard anything that was parsed if we hit an error so we
  236. * don't use partial information.
  237. */
  238. if (parse_dt_topology())
  239. reset_cpu_topology();
  240. }