topology.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535
  1. /*
  2. * arch/arm/kernel/topology.c
  3. *
  4. * Copyright (C) 2011 Linaro Limited.
  5. * Written by: Vincent Guittot
  6. *
  7. * based on arch/sh/kernel/topology.c
  8. *
  9. * This file is subject to the terms and conditions of the GNU General Public
  10. * License. See the file "COPYING" in the main directory of this archive
  11. * for more details.
  12. */
  13. #include <linux/cpu.h>
  14. #include <linux/cpufreq.h>
  15. #include <linux/cpumask.h>
  16. #include <linux/export.h>
  17. #include <linux/init.h>
  18. #include <linux/percpu.h>
  19. #include <linux/node.h>
  20. #include <linux/nodemask.h>
  21. #include <linux/of.h>
  22. #include <linux/sched.h>
  23. #include <linux/sched/topology.h>
  24. #include <linux/slab.h>
  25. #include <linux/string.h>
  26. #include <asm/cpu.h>
  27. #include <asm/cputype.h>
  28. #include <asm/topology.h>
  29. /*
  30. * cpu capacity scale management
  31. */
  32. /*
  33. * cpu capacity table
  34. * This per cpu data structure describes the relative capacity of each core.
  35. * On a heteregenous system, cores don't have the same computation capacity
  36. * and we reflect that difference in the cpu_capacity field so the scheduler
  37. * can take this difference into account during load balance. A per cpu
  38. * structure is preferred because each CPU updates its own cpu_capacity field
  39. * during the load balance except for idle cores. One idle core is selected
  40. * to run the rebalance_domains for all idle cores and the cpu_capacity can be
  41. * updated during this sequence.
  42. */
  43. static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
  44. static DEFINE_MUTEX(cpu_scale_mutex);
  45. unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
  46. {
  47. return per_cpu(cpu_scale, cpu);
  48. }
  49. static void set_capacity_scale(unsigned int cpu, unsigned long capacity)
  50. {
  51. per_cpu(cpu_scale, cpu) = capacity;
  52. }
  53. #ifdef CONFIG_PROC_SYSCTL
  54. static ssize_t cpu_capacity_show(struct device *dev,
  55. struct device_attribute *attr,
  56. char *buf)
  57. {
  58. struct cpu *cpu = container_of(dev, struct cpu, dev);
  59. return sprintf(buf, "%lu\n",
  60. arch_scale_cpu_capacity(NULL, cpu->dev.id));
  61. }
  62. static ssize_t cpu_capacity_store(struct device *dev,
  63. struct device_attribute *attr,
  64. const char *buf,
  65. size_t count)
  66. {
  67. struct cpu *cpu = container_of(dev, struct cpu, dev);
  68. int this_cpu = cpu->dev.id, i;
  69. unsigned long new_capacity;
  70. ssize_t ret;
  71. if (count) {
  72. ret = kstrtoul(buf, 0, &new_capacity);
  73. if (ret)
  74. return ret;
  75. if (new_capacity > SCHED_CAPACITY_SCALE)
  76. return -EINVAL;
  77. mutex_lock(&cpu_scale_mutex);
  78. for_each_cpu(i, &cpu_topology[this_cpu].core_sibling)
  79. set_capacity_scale(i, new_capacity);
  80. mutex_unlock(&cpu_scale_mutex);
  81. }
  82. return count;
  83. }
  84. static DEVICE_ATTR_RW(cpu_capacity);
  85. static int register_cpu_capacity_sysctl(void)
  86. {
  87. int i;
  88. struct device *cpu;
  89. for_each_possible_cpu(i) {
  90. cpu = get_cpu_device(i);
  91. if (!cpu) {
  92. pr_err("%s: too early to get CPU%d device!\n",
  93. __func__, i);
  94. continue;
  95. }
  96. device_create_file(cpu, &dev_attr_cpu_capacity);
  97. }
  98. return 0;
  99. }
  100. subsys_initcall(register_cpu_capacity_sysctl);
  101. #endif
  102. #ifdef CONFIG_OF
  103. struct cpu_efficiency {
  104. const char *compatible;
  105. unsigned long efficiency;
  106. };
  107. /*
  108. * Table of relative efficiency of each processors
  109. * The efficiency value must fit in 20bit and the final
  110. * cpu_scale value must be in the range
  111. * 0 < cpu_scale < 3*SCHED_CAPACITY_SCALE/2
  112. * in order to return at most 1 when DIV_ROUND_CLOSEST
  113. * is used to compute the capacity of a CPU.
  114. * Processors that are not defined in the table,
  115. * use the default SCHED_CAPACITY_SCALE value for cpu_scale.
  116. */
  117. static const struct cpu_efficiency table_efficiency[] = {
  118. {"arm,cortex-a15", 3891},
  119. {"arm,cortex-a7", 2048},
  120. {NULL, },
  121. };
  122. static unsigned long *__cpu_capacity;
  123. #define cpu_capacity(cpu) __cpu_capacity[cpu]
  124. static unsigned long middle_capacity = 1;
  125. static bool cap_from_dt = true;
  126. static u32 *raw_capacity;
  127. static bool cap_parsing_failed;
  128. static u32 capacity_scale;
  129. static int __init parse_cpu_capacity(struct device_node *cpu_node, int cpu)
  130. {
  131. int ret = 1;
  132. u32 cpu_capacity;
  133. if (cap_parsing_failed)
  134. return !ret;
  135. ret = of_property_read_u32(cpu_node,
  136. "capacity-dmips-mhz",
  137. &cpu_capacity);
  138. if (!ret) {
  139. if (!raw_capacity) {
  140. raw_capacity = kcalloc(num_possible_cpus(),
  141. sizeof(*raw_capacity),
  142. GFP_KERNEL);
  143. if (!raw_capacity) {
  144. pr_err("cpu_capacity: failed to allocate memory for raw capacities\n");
  145. cap_parsing_failed = true;
  146. return !ret;
  147. }
  148. }
  149. capacity_scale = max(cpu_capacity, capacity_scale);
  150. raw_capacity[cpu] = cpu_capacity;
  151. pr_debug("cpu_capacity: %s cpu_capacity=%u (raw)\n",
  152. cpu_node->full_name, raw_capacity[cpu]);
  153. } else {
  154. if (raw_capacity) {
  155. pr_err("cpu_capacity: missing %s raw capacity\n",
  156. cpu_node->full_name);
  157. pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
  158. }
  159. cap_parsing_failed = true;
  160. kfree(raw_capacity);
  161. }
  162. return !ret;
  163. }
  164. static void normalize_cpu_capacity(void)
  165. {
  166. u64 capacity;
  167. int cpu;
  168. if (!raw_capacity || cap_parsing_failed)
  169. return;
  170. pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale);
  171. mutex_lock(&cpu_scale_mutex);
  172. for_each_possible_cpu(cpu) {
  173. capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT)
  174. / capacity_scale;
  175. set_capacity_scale(cpu, capacity);
  176. pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
  177. cpu, arch_scale_cpu_capacity(NULL, cpu));
  178. }
  179. mutex_unlock(&cpu_scale_mutex);
  180. }
  181. #ifdef CONFIG_CPU_FREQ
  182. static cpumask_var_t cpus_to_visit;
  183. static bool cap_parsing_done;
  184. static void parsing_done_workfn(struct work_struct *work);
  185. static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
  186. static int
  187. init_cpu_capacity_callback(struct notifier_block *nb,
  188. unsigned long val,
  189. void *data)
  190. {
  191. struct cpufreq_policy *policy = data;
  192. int cpu;
  193. if (cap_parsing_failed || cap_parsing_done)
  194. return 0;
  195. switch (val) {
  196. case CPUFREQ_NOTIFY:
  197. pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
  198. cpumask_pr_args(policy->related_cpus),
  199. cpumask_pr_args(cpus_to_visit));
  200. cpumask_andnot(cpus_to_visit,
  201. cpus_to_visit,
  202. policy->related_cpus);
  203. for_each_cpu(cpu, policy->related_cpus) {
  204. raw_capacity[cpu] = arch_scale_cpu_capacity(NULL, cpu) *
  205. policy->cpuinfo.max_freq / 1000UL;
  206. capacity_scale = max(raw_capacity[cpu], capacity_scale);
  207. }
  208. if (cpumask_empty(cpus_to_visit)) {
  209. normalize_cpu_capacity();
  210. kfree(raw_capacity);
  211. pr_debug("cpu_capacity: parsing done\n");
  212. cap_parsing_done = true;
  213. schedule_work(&parsing_done_work);
  214. }
  215. }
  216. return 0;
  217. }
  218. static struct notifier_block init_cpu_capacity_notifier = {
  219. .notifier_call = init_cpu_capacity_callback,
  220. };
  221. static int __init register_cpufreq_notifier(void)
  222. {
  223. if (cap_parsing_failed)
  224. return -EINVAL;
  225. if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) {
  226. pr_err("cpu_capacity: failed to allocate memory for cpus_to_visit\n");
  227. return -ENOMEM;
  228. }
  229. cpumask_copy(cpus_to_visit, cpu_possible_mask);
  230. return cpufreq_register_notifier(&init_cpu_capacity_notifier,
  231. CPUFREQ_POLICY_NOTIFIER);
  232. }
  233. core_initcall(register_cpufreq_notifier);
  234. static void parsing_done_workfn(struct work_struct *work)
  235. {
  236. cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
  237. CPUFREQ_POLICY_NOTIFIER);
  238. }
  239. #else
  240. static int __init free_raw_capacity(void)
  241. {
  242. kfree(raw_capacity);
  243. return 0;
  244. }
  245. core_initcall(free_raw_capacity);
  246. #endif
  247. /*
  248. * Iterate all CPUs' descriptor in DT and compute the efficiency
  249. * (as per table_efficiency). Also calculate a middle efficiency
  250. * as close as possible to (max{eff_i} - min{eff_i}) / 2
  251. * This is later used to scale the cpu_capacity field such that an
  252. * 'average' CPU is of middle capacity. Also see the comments near
  253. * table_efficiency[] and update_cpu_capacity().
  254. */
  255. static void __init parse_dt_topology(void)
  256. {
  257. const struct cpu_efficiency *cpu_eff;
  258. struct device_node *cn = NULL;
  259. unsigned long min_capacity = ULONG_MAX;
  260. unsigned long max_capacity = 0;
  261. unsigned long capacity = 0;
  262. int cpu = 0;
  263. __cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity),
  264. GFP_NOWAIT);
  265. cn = of_find_node_by_path("/cpus");
  266. if (!cn) {
  267. pr_err("No CPU information found in DT\n");
  268. return;
  269. }
  270. for_each_possible_cpu(cpu) {
  271. const u32 *rate;
  272. int len;
  273. /* too early to use cpu->of_node */
  274. cn = of_get_cpu_node(cpu, NULL);
  275. if (!cn) {
  276. pr_err("missing device node for CPU %d\n", cpu);
  277. continue;
  278. }
  279. if (parse_cpu_capacity(cn, cpu)) {
  280. of_node_put(cn);
  281. continue;
  282. }
  283. cap_from_dt = false;
  284. for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++)
  285. if (of_device_is_compatible(cn, cpu_eff->compatible))
  286. break;
  287. if (cpu_eff->compatible == NULL)
  288. continue;
  289. rate = of_get_property(cn, "clock-frequency", &len);
  290. if (!rate || len != 4) {
  291. pr_err("%s missing clock-frequency property\n",
  292. cn->full_name);
  293. continue;
  294. }
  295. capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency;
  296. /* Save min capacity of the system */
  297. if (capacity < min_capacity)
  298. min_capacity = capacity;
  299. /* Save max capacity of the system */
  300. if (capacity > max_capacity)
  301. max_capacity = capacity;
  302. cpu_capacity(cpu) = capacity;
  303. }
  304. /* If min and max capacities are equals, we bypass the update of the
  305. * cpu_scale because all CPUs have the same capacity. Otherwise, we
  306. * compute a middle_capacity factor that will ensure that the capacity
  307. * of an 'average' CPU of the system will be as close as possible to
  308. * SCHED_CAPACITY_SCALE, which is the default value, but with the
  309. * constraint explained near table_efficiency[].
  310. */
  311. if (4*max_capacity < (3*(max_capacity + min_capacity)))
  312. middle_capacity = (min_capacity + max_capacity)
  313. >> (SCHED_CAPACITY_SHIFT+1);
  314. else
  315. middle_capacity = ((max_capacity / 3)
  316. >> (SCHED_CAPACITY_SHIFT-1)) + 1;
  317. if (cap_from_dt && !cap_parsing_failed)
  318. normalize_cpu_capacity();
  319. }
  320. /*
  321. * Look for a customed capacity of a CPU in the cpu_capacity table during the
  322. * boot. The update of all CPUs is in O(n^2) for heteregeneous system but the
  323. * function returns directly for SMP system.
  324. */
  325. static void update_cpu_capacity(unsigned int cpu)
  326. {
  327. if (!cpu_capacity(cpu) || cap_from_dt)
  328. return;
  329. set_capacity_scale(cpu, cpu_capacity(cpu) / middle_capacity);
  330. pr_info("CPU%u: update cpu_capacity %lu\n",
  331. cpu, arch_scale_cpu_capacity(NULL, cpu));
  332. }
  333. #else
  334. static inline void parse_dt_topology(void) {}
  335. static inline void update_cpu_capacity(unsigned int cpuid) {}
  336. #endif
  337. /*
  338. * cpu topology table
  339. */
  340. struct cputopo_arm cpu_topology[NR_CPUS];
  341. EXPORT_SYMBOL_GPL(cpu_topology);
  342. const struct cpumask *cpu_coregroup_mask(int cpu)
  343. {
  344. return &cpu_topology[cpu].core_sibling;
  345. }
  346. /*
  347. * The current assumption is that we can power gate each core independently.
  348. * This will be superseded by DT binding once available.
  349. */
  350. const struct cpumask *cpu_corepower_mask(int cpu)
  351. {
  352. return &cpu_topology[cpu].thread_sibling;
  353. }
  354. static void update_siblings_masks(unsigned int cpuid)
  355. {
  356. struct cputopo_arm *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
  357. int cpu;
  358. /* update core and thread sibling masks */
  359. for_each_possible_cpu(cpu) {
  360. cpu_topo = &cpu_topology[cpu];
  361. if (cpuid_topo->socket_id != cpu_topo->socket_id)
  362. continue;
  363. cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
  364. if (cpu != cpuid)
  365. cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
  366. if (cpuid_topo->core_id != cpu_topo->core_id)
  367. continue;
  368. cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
  369. if (cpu != cpuid)
  370. cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
  371. }
  372. smp_wmb();
  373. }
  374. /*
  375. * store_cpu_topology is called at boot when only one cpu is running
  376. * and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
  377. * which prevents simultaneous write access to cpu_topology array
  378. */
  379. void store_cpu_topology(unsigned int cpuid)
  380. {
  381. struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid];
  382. unsigned int mpidr;
  383. /* If the cpu topology has been already set, just return */
  384. if (cpuid_topo->core_id != -1)
  385. return;
  386. mpidr = read_cpuid_mpidr();
  387. /* create cpu topology mapping */
  388. if ((mpidr & MPIDR_SMP_BITMASK) == MPIDR_SMP_VALUE) {
  389. /*
  390. * This is a multiprocessor system
  391. * multiprocessor format & multiprocessor mode field are set
  392. */
  393. if (mpidr & MPIDR_MT_BITMASK) {
  394. /* core performance interdependency */
  395. cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
  396. cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
  397. cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
  398. } else {
  399. /* largely independent cores */
  400. cpuid_topo->thread_id = -1;
  401. cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
  402. cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
  403. }
  404. } else {
  405. /*
  406. * This is an uniprocessor system
  407. * we are in multiprocessor format but uniprocessor system
  408. * or in the old uniprocessor format
  409. */
  410. cpuid_topo->thread_id = -1;
  411. cpuid_topo->core_id = 0;
  412. cpuid_topo->socket_id = -1;
  413. }
  414. update_siblings_masks(cpuid);
  415. update_cpu_capacity(cpuid);
  416. pr_info("CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n",
  417. cpuid, cpu_topology[cpuid].thread_id,
  418. cpu_topology[cpuid].core_id,
  419. cpu_topology[cpuid].socket_id, mpidr);
  420. }
  421. static inline int cpu_corepower_flags(void)
  422. {
  423. return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN;
  424. }
  425. static struct sched_domain_topology_level arm_topology[] = {
  426. #ifdef CONFIG_SCHED_MC
  427. { cpu_corepower_mask, cpu_corepower_flags, SD_INIT_NAME(GMC) },
  428. { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
  429. #endif
  430. { cpu_cpu_mask, SD_INIT_NAME(DIE) },
  431. { NULL, },
  432. };
  433. /*
  434. * init_cpu_topology is called at boot when only one cpu is running
  435. * which prevent simultaneous write access to cpu_topology array
  436. */
  437. void __init init_cpu_topology(void)
  438. {
  439. unsigned int cpu;
  440. /* init core mask and capacity */
  441. for_each_possible_cpu(cpu) {
  442. struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]);
  443. cpu_topo->thread_id = -1;
  444. cpu_topo->core_id = -1;
  445. cpu_topo->socket_id = -1;
  446. cpumask_clear(&cpu_topo->core_sibling);
  447. cpumask_clear(&cpu_topo->thread_sibling);
  448. }
  449. smp_wmb();
  450. parse_dt_topology();
  451. /* Set scheduler topology descriptor */
  452. set_sched_topology(arm_topology);
  453. }