topology.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534
  1. /*
  2. * arch/arm/kernel/topology.c
  3. *
  4. * Copyright (C) 2011 Linaro Limited.
  5. * Written by: Vincent Guittot
  6. *
  7. * based on arch/sh/kernel/topology.c
  8. *
  9. * This file is subject to the terms and conditions of the GNU General Public
  10. * License. See the file "COPYING" in the main directory of this archive
  11. * for more details.
  12. */
  13. #include <linux/cpu.h>
  14. #include <linux/cpufreq.h>
  15. #include <linux/cpumask.h>
  16. #include <linux/export.h>
  17. #include <linux/init.h>
  18. #include <linux/percpu.h>
  19. #include <linux/node.h>
  20. #include <linux/nodemask.h>
  21. #include <linux/of.h>
  22. #include <linux/sched.h>
  23. #include <linux/slab.h>
  24. #include <linux/string.h>
  25. #include <asm/cpu.h>
  26. #include <asm/cputype.h>
  27. #include <asm/topology.h>
  28. /*
  29. * cpu capacity scale management
  30. */
  31. /*
  32. * cpu capacity table
  33. * This per cpu data structure describes the relative capacity of each core.
  34. * On a heteregenous system, cores don't have the same computation capacity
  35. * and we reflect that difference in the cpu_capacity field so the scheduler
  36. * can take this difference into account during load balance. A per cpu
  37. * structure is preferred because each CPU updates its own cpu_capacity field
  38. * during the load balance except for idle cores. One idle core is selected
  39. * to run the rebalance_domains for all idle cores and the cpu_capacity can be
  40. * updated during this sequence.
  41. */
  42. static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
  43. static DEFINE_MUTEX(cpu_scale_mutex);
  44. unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
  45. {
  46. return per_cpu(cpu_scale, cpu);
  47. }
  48. static void set_capacity_scale(unsigned int cpu, unsigned long capacity)
  49. {
  50. per_cpu(cpu_scale, cpu) = capacity;
  51. }
  52. #ifdef CONFIG_PROC_SYSCTL
  53. static ssize_t cpu_capacity_show(struct device *dev,
  54. struct device_attribute *attr,
  55. char *buf)
  56. {
  57. struct cpu *cpu = container_of(dev, struct cpu, dev);
  58. return sprintf(buf, "%lu\n",
  59. arch_scale_cpu_capacity(NULL, cpu->dev.id));
  60. }
  61. static ssize_t cpu_capacity_store(struct device *dev,
  62. struct device_attribute *attr,
  63. const char *buf,
  64. size_t count)
  65. {
  66. struct cpu *cpu = container_of(dev, struct cpu, dev);
  67. int this_cpu = cpu->dev.id, i;
  68. unsigned long new_capacity;
  69. ssize_t ret;
  70. if (count) {
  71. ret = kstrtoul(buf, 0, &new_capacity);
  72. if (ret)
  73. return ret;
  74. if (new_capacity > SCHED_CAPACITY_SCALE)
  75. return -EINVAL;
  76. mutex_lock(&cpu_scale_mutex);
  77. for_each_cpu(i, &cpu_topology[this_cpu].core_sibling)
  78. set_capacity_scale(i, new_capacity);
  79. mutex_unlock(&cpu_scale_mutex);
  80. }
  81. return count;
  82. }
  83. static DEVICE_ATTR_RW(cpu_capacity);
  84. static int register_cpu_capacity_sysctl(void)
  85. {
  86. int i;
  87. struct device *cpu;
  88. for_each_possible_cpu(i) {
  89. cpu = get_cpu_device(i);
  90. if (!cpu) {
  91. pr_err("%s: too early to get CPU%d device!\n",
  92. __func__, i);
  93. continue;
  94. }
  95. device_create_file(cpu, &dev_attr_cpu_capacity);
  96. }
  97. return 0;
  98. }
  99. subsys_initcall(register_cpu_capacity_sysctl);
  100. #endif
  101. #ifdef CONFIG_OF
  102. struct cpu_efficiency {
  103. const char *compatible;
  104. unsigned long efficiency;
  105. };
  106. /*
  107. * Table of relative efficiency of each processors
  108. * The efficiency value must fit in 20bit and the final
  109. * cpu_scale value must be in the range
  110. * 0 < cpu_scale < 3*SCHED_CAPACITY_SCALE/2
  111. * in order to return at most 1 when DIV_ROUND_CLOSEST
  112. * is used to compute the capacity of a CPU.
  113. * Processors that are not defined in the table,
  114. * use the default SCHED_CAPACITY_SCALE value for cpu_scale.
  115. */
  116. static const struct cpu_efficiency table_efficiency[] = {
  117. {"arm,cortex-a15", 3891},
  118. {"arm,cortex-a7", 2048},
  119. {NULL, },
  120. };
  121. static unsigned long *__cpu_capacity;
  122. #define cpu_capacity(cpu) __cpu_capacity[cpu]
  123. static unsigned long middle_capacity = 1;
  124. static bool cap_from_dt = true;
  125. static u32 *raw_capacity;
  126. static bool cap_parsing_failed;
  127. static u32 capacity_scale;
  128. static int __init parse_cpu_capacity(struct device_node *cpu_node, int cpu)
  129. {
  130. int ret = 1;
  131. u32 cpu_capacity;
  132. if (cap_parsing_failed)
  133. return !ret;
  134. ret = of_property_read_u32(cpu_node,
  135. "capacity-dmips-mhz",
  136. &cpu_capacity);
  137. if (!ret) {
  138. if (!raw_capacity) {
  139. raw_capacity = kcalloc(num_possible_cpus(),
  140. sizeof(*raw_capacity),
  141. GFP_KERNEL);
  142. if (!raw_capacity) {
  143. pr_err("cpu_capacity: failed to allocate memory for raw capacities\n");
  144. cap_parsing_failed = true;
  145. return !ret;
  146. }
  147. }
  148. capacity_scale = max(cpu_capacity, capacity_scale);
  149. raw_capacity[cpu] = cpu_capacity;
  150. pr_debug("cpu_capacity: %s cpu_capacity=%u (raw)\n",
  151. cpu_node->full_name, raw_capacity[cpu]);
  152. } else {
  153. if (raw_capacity) {
  154. pr_err("cpu_capacity: missing %s raw capacity\n",
  155. cpu_node->full_name);
  156. pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
  157. }
  158. cap_parsing_failed = true;
  159. kfree(raw_capacity);
  160. }
  161. return !ret;
  162. }
  163. static void normalize_cpu_capacity(void)
  164. {
  165. u64 capacity;
  166. int cpu;
  167. if (!raw_capacity || cap_parsing_failed)
  168. return;
  169. pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale);
  170. mutex_lock(&cpu_scale_mutex);
  171. for_each_possible_cpu(cpu) {
  172. capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT)
  173. / capacity_scale;
  174. set_capacity_scale(cpu, capacity);
  175. pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
  176. cpu, arch_scale_cpu_capacity(NULL, cpu));
  177. }
  178. mutex_unlock(&cpu_scale_mutex);
  179. }
  180. #ifdef CONFIG_CPU_FREQ
  181. static cpumask_var_t cpus_to_visit;
  182. static bool cap_parsing_done;
  183. static void parsing_done_workfn(struct work_struct *work);
  184. static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
  185. static int
  186. init_cpu_capacity_callback(struct notifier_block *nb,
  187. unsigned long val,
  188. void *data)
  189. {
  190. struct cpufreq_policy *policy = data;
  191. int cpu;
  192. if (cap_parsing_failed || cap_parsing_done)
  193. return 0;
  194. switch (val) {
  195. case CPUFREQ_NOTIFY:
  196. pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
  197. cpumask_pr_args(policy->related_cpus),
  198. cpumask_pr_args(cpus_to_visit));
  199. cpumask_andnot(cpus_to_visit,
  200. cpus_to_visit,
  201. policy->related_cpus);
  202. for_each_cpu(cpu, policy->related_cpus) {
  203. raw_capacity[cpu] = arch_scale_cpu_capacity(NULL, cpu) *
  204. policy->cpuinfo.max_freq / 1000UL;
  205. capacity_scale = max(raw_capacity[cpu], capacity_scale);
  206. }
  207. if (cpumask_empty(cpus_to_visit)) {
  208. normalize_cpu_capacity();
  209. kfree(raw_capacity);
  210. pr_debug("cpu_capacity: parsing done\n");
  211. cap_parsing_done = true;
  212. schedule_work(&parsing_done_work);
  213. }
  214. }
  215. return 0;
  216. }
  217. static struct notifier_block init_cpu_capacity_notifier = {
  218. .notifier_call = init_cpu_capacity_callback,
  219. };
  220. static int __init register_cpufreq_notifier(void)
  221. {
  222. if (cap_parsing_failed)
  223. return -EINVAL;
  224. if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) {
  225. pr_err("cpu_capacity: failed to allocate memory for cpus_to_visit\n");
  226. return -ENOMEM;
  227. }
  228. cpumask_copy(cpus_to_visit, cpu_possible_mask);
  229. return cpufreq_register_notifier(&init_cpu_capacity_notifier,
  230. CPUFREQ_POLICY_NOTIFIER);
  231. }
  232. core_initcall(register_cpufreq_notifier);
  233. static void parsing_done_workfn(struct work_struct *work)
  234. {
  235. cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
  236. CPUFREQ_POLICY_NOTIFIER);
  237. }
  238. #else
  239. static int __init free_raw_capacity(void)
  240. {
  241. kfree(raw_capacity);
  242. return 0;
  243. }
  244. core_initcall(free_raw_capacity);
  245. #endif
  246. /*
  247. * Iterate all CPUs' descriptor in DT and compute the efficiency
  248. * (as per table_efficiency). Also calculate a middle efficiency
  249. * as close as possible to (max{eff_i} - min{eff_i}) / 2
  250. * This is later used to scale the cpu_capacity field such that an
  251. * 'average' CPU is of middle capacity. Also see the comments near
  252. * table_efficiency[] and update_cpu_capacity().
  253. */
  254. static void __init parse_dt_topology(void)
  255. {
  256. const struct cpu_efficiency *cpu_eff;
  257. struct device_node *cn = NULL;
  258. unsigned long min_capacity = ULONG_MAX;
  259. unsigned long max_capacity = 0;
  260. unsigned long capacity = 0;
  261. int cpu = 0;
  262. __cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity),
  263. GFP_NOWAIT);
  264. cn = of_find_node_by_path("/cpus");
  265. if (!cn) {
  266. pr_err("No CPU information found in DT\n");
  267. return;
  268. }
  269. for_each_possible_cpu(cpu) {
  270. const u32 *rate;
  271. int len;
  272. /* too early to use cpu->of_node */
  273. cn = of_get_cpu_node(cpu, NULL);
  274. if (!cn) {
  275. pr_err("missing device node for CPU %d\n", cpu);
  276. continue;
  277. }
  278. if (parse_cpu_capacity(cn, cpu)) {
  279. of_node_put(cn);
  280. continue;
  281. }
  282. cap_from_dt = false;
  283. for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++)
  284. if (of_device_is_compatible(cn, cpu_eff->compatible))
  285. break;
  286. if (cpu_eff->compatible == NULL)
  287. continue;
  288. rate = of_get_property(cn, "clock-frequency", &len);
  289. if (!rate || len != 4) {
  290. pr_err("%s missing clock-frequency property\n",
  291. cn->full_name);
  292. continue;
  293. }
  294. capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency;
  295. /* Save min capacity of the system */
  296. if (capacity < min_capacity)
  297. min_capacity = capacity;
  298. /* Save max capacity of the system */
  299. if (capacity > max_capacity)
  300. max_capacity = capacity;
  301. cpu_capacity(cpu) = capacity;
  302. }
  303. /* If min and max capacities are equals, we bypass the update of the
  304. * cpu_scale because all CPUs have the same capacity. Otherwise, we
  305. * compute a middle_capacity factor that will ensure that the capacity
  306. * of an 'average' CPU of the system will be as close as possible to
  307. * SCHED_CAPACITY_SCALE, which is the default value, but with the
  308. * constraint explained near table_efficiency[].
  309. */
  310. if (4*max_capacity < (3*(max_capacity + min_capacity)))
  311. middle_capacity = (min_capacity + max_capacity)
  312. >> (SCHED_CAPACITY_SHIFT+1);
  313. else
  314. middle_capacity = ((max_capacity / 3)
  315. >> (SCHED_CAPACITY_SHIFT-1)) + 1;
  316. if (cap_from_dt && !cap_parsing_failed)
  317. normalize_cpu_capacity();
  318. }
  319. /*
  320. * Look for a customed capacity of a CPU in the cpu_capacity table during the
  321. * boot. The update of all CPUs is in O(n^2) for heteregeneous system but the
  322. * function returns directly for SMP system.
  323. */
  324. static void update_cpu_capacity(unsigned int cpu)
  325. {
  326. if (!cpu_capacity(cpu) || cap_from_dt)
  327. return;
  328. set_capacity_scale(cpu, cpu_capacity(cpu) / middle_capacity);
  329. pr_info("CPU%u: update cpu_capacity %lu\n",
  330. cpu, arch_scale_cpu_capacity(NULL, cpu));
  331. }
  332. #else
  333. static inline void parse_dt_topology(void) {}
  334. static inline void update_cpu_capacity(unsigned int cpuid) {}
  335. #endif
  336. /*
  337. * cpu topology table
  338. */
  339. struct cputopo_arm cpu_topology[NR_CPUS];
  340. EXPORT_SYMBOL_GPL(cpu_topology);
  341. const struct cpumask *cpu_coregroup_mask(int cpu)
  342. {
  343. return &cpu_topology[cpu].core_sibling;
  344. }
  345. /*
  346. * The current assumption is that we can power gate each core independently.
  347. * This will be superseded by DT binding once available.
  348. */
  349. const struct cpumask *cpu_corepower_mask(int cpu)
  350. {
  351. return &cpu_topology[cpu].thread_sibling;
  352. }
  353. static void update_siblings_masks(unsigned int cpuid)
  354. {
  355. struct cputopo_arm *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
  356. int cpu;
  357. /* update core and thread sibling masks */
  358. for_each_possible_cpu(cpu) {
  359. cpu_topo = &cpu_topology[cpu];
  360. if (cpuid_topo->socket_id != cpu_topo->socket_id)
  361. continue;
  362. cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
  363. if (cpu != cpuid)
  364. cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
  365. if (cpuid_topo->core_id != cpu_topo->core_id)
  366. continue;
  367. cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
  368. if (cpu != cpuid)
  369. cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
  370. }
  371. smp_wmb();
  372. }
  373. /*
  374. * store_cpu_topology is called at boot when only one cpu is running
  375. * and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
  376. * which prevents simultaneous write access to cpu_topology array
  377. */
  378. void store_cpu_topology(unsigned int cpuid)
  379. {
  380. struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid];
  381. unsigned int mpidr;
  382. /* If the cpu topology has been already set, just return */
  383. if (cpuid_topo->core_id != -1)
  384. return;
  385. mpidr = read_cpuid_mpidr();
  386. /* create cpu topology mapping */
  387. if ((mpidr & MPIDR_SMP_BITMASK) == MPIDR_SMP_VALUE) {
  388. /*
  389. * This is a multiprocessor system
  390. * multiprocessor format & multiprocessor mode field are set
  391. */
  392. if (mpidr & MPIDR_MT_BITMASK) {
  393. /* core performance interdependency */
  394. cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
  395. cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
  396. cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
  397. } else {
  398. /* largely independent cores */
  399. cpuid_topo->thread_id = -1;
  400. cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
  401. cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
  402. }
  403. } else {
  404. /*
  405. * This is an uniprocessor system
  406. * we are in multiprocessor format but uniprocessor system
  407. * or in the old uniprocessor format
  408. */
  409. cpuid_topo->thread_id = -1;
  410. cpuid_topo->core_id = 0;
  411. cpuid_topo->socket_id = -1;
  412. }
  413. update_siblings_masks(cpuid);
  414. update_cpu_capacity(cpuid);
  415. pr_info("CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n",
  416. cpuid, cpu_topology[cpuid].thread_id,
  417. cpu_topology[cpuid].core_id,
  418. cpu_topology[cpuid].socket_id, mpidr);
  419. }
  420. static inline int cpu_corepower_flags(void)
  421. {
  422. return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN;
  423. }
  424. static struct sched_domain_topology_level arm_topology[] = {
  425. #ifdef CONFIG_SCHED_MC
  426. { cpu_corepower_mask, cpu_corepower_flags, SD_INIT_NAME(GMC) },
  427. { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
  428. #endif
  429. { cpu_cpu_mask, SD_INIT_NAME(DIE) },
  430. { NULL, },
  431. };
  432. /*
  433. * init_cpu_topology is called at boot when only one cpu is running
  434. * which prevent simultaneous write access to cpu_topology array
  435. */
  436. void __init init_cpu_topology(void)
  437. {
  438. unsigned int cpu;
  439. /* init core mask and capacity */
  440. for_each_possible_cpu(cpu) {
  441. struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]);
  442. cpu_topo->thread_id = -1;
  443. cpu_topo->core_id = -1;
  444. cpu_topo->socket_id = -1;
  445. cpumask_clear(&cpu_topo->core_sibling);
  446. cpumask_clear(&cpu_topo->thread_sibling);
  447. }
  448. smp_wmb();
  449. parse_dt_topology();
  450. /* Set scheduler topology descriptor */
  451. set_sched_topology(arm_topology);
  452. }