cppc_cpufreq.c 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341
  1. /*
  2. * CPPC (Collaborative Processor Performance Control) driver for
  3. * interfacing with the CPUfreq layer and governors. See
  4. * cppc_acpi.c for CPPC specific methods.
  5. *
  6. * (C) Copyright 2014, 2015 Linaro Ltd.
  7. * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * as published by the Free Software Foundation; version 2
  12. * of the License.
  13. */
  14. #define pr_fmt(fmt) "CPPC Cpufreq:" fmt
  15. #include <linux/kernel.h>
  16. #include <linux/module.h>
  17. #include <linux/delay.h>
  18. #include <linux/cpu.h>
  19. #include <linux/cpufreq.h>
  20. #include <linux/dmi.h>
  21. #include <linux/time.h>
  22. #include <linux/vmalloc.h>
  23. #include <asm/unaligned.h>
  24. #include <acpi/cppc_acpi.h>
  25. /* Minimum struct length needed for the DMI processor entry we want */
  26. #define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48
  27. /* Offest in the DMI processor structure for the max frequency */
  28. #define DMI_PROCESSOR_MAX_SPEED 0x14
  29. /*
  30. * These structs contain information parsed from per CPU
  31. * ACPI _CPC structures.
  32. * e.g. For each CPU the highest, lowest supported
  33. * performance capabilities, desired performance level
  34. * requested etc.
  35. */
  36. static struct cppc_cpudata **all_cpu_data;
  37. /* Callback function used to retrieve the max frequency from DMI */
  38. static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
  39. {
  40. const u8 *dmi_data = (const u8 *)dm;
  41. u16 *mhz = (u16 *)private;
  42. if (dm->type == DMI_ENTRY_PROCESSOR &&
  43. dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) {
  44. u16 val = (u16)get_unaligned((const u16 *)
  45. (dmi_data + DMI_PROCESSOR_MAX_SPEED));
  46. *mhz = val > *mhz ? val : *mhz;
  47. }
  48. }
  49. /* Look up the max frequency in DMI */
  50. static u64 cppc_get_dmi_max_khz(void)
  51. {
  52. u16 mhz = 0;
  53. dmi_walk(cppc_find_dmi_mhz, &mhz);
  54. /*
  55. * Real stupid fallback value, just in case there is no
  56. * actual value set.
  57. */
  58. mhz = mhz ? mhz : 1;
  59. return (1000 * mhz);
  60. }
  61. /*
  62. * If CPPC lowest_freq and nominal_freq registers are exposed then we can
  63. * use them to convert perf to freq and vice versa
  64. *
  65. * If the perf/freq point lies between Nominal and Lowest, we can treat
  66. * (Low perf, Low freq) and (Nom Perf, Nom freq) as 2D co-ordinates of a line
  67. * and extrapolate the rest
  68. * For perf/freq > Nominal, we use the ratio perf:freq at Nominal for conversion
  69. */
  70. static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu,
  71. unsigned int perf)
  72. {
  73. static u64 max_khz;
  74. struct cppc_perf_caps *caps = &cpu->perf_caps;
  75. u64 mul, div;
  76. if (caps->lowest_freq && caps->nominal_freq) {
  77. if (perf >= caps->nominal_perf) {
  78. mul = caps->nominal_freq;
  79. div = caps->nominal_perf;
  80. } else {
  81. mul = caps->nominal_freq - caps->lowest_freq;
  82. div = caps->nominal_perf - caps->lowest_perf;
  83. }
  84. } else {
  85. if (!max_khz)
  86. max_khz = cppc_get_dmi_max_khz();
  87. mul = max_khz;
  88. div = cpu->perf_caps.highest_perf;
  89. }
  90. return (u64)perf * mul / div;
  91. }
  92. static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu,
  93. unsigned int freq)
  94. {
  95. static u64 max_khz;
  96. struct cppc_perf_caps *caps = &cpu->perf_caps;
  97. u64 mul, div;
  98. if (caps->lowest_freq && caps->nominal_freq) {
  99. if (freq >= caps->nominal_freq) {
  100. mul = caps->nominal_perf;
  101. div = caps->nominal_freq;
  102. } else {
  103. mul = caps->lowest_perf;
  104. div = caps->lowest_freq;
  105. }
  106. } else {
  107. if (!max_khz)
  108. max_khz = cppc_get_dmi_max_khz();
  109. mul = cpu->perf_caps.highest_perf;
  110. div = max_khz;
  111. }
  112. return (u64)freq * mul / div;
  113. }
  114. static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
  115. unsigned int target_freq,
  116. unsigned int relation)
  117. {
  118. struct cppc_cpudata *cpu;
  119. struct cpufreq_freqs freqs;
  120. u32 desired_perf;
  121. int ret = 0;
  122. cpu = all_cpu_data[policy->cpu];
  123. desired_perf = cppc_cpufreq_khz_to_perf(cpu, target_freq);
  124. /* Return if it is exactly the same perf */
  125. if (desired_perf == cpu->perf_ctrls.desired_perf)
  126. return ret;
  127. cpu->perf_ctrls.desired_perf = desired_perf;
  128. freqs.old = policy->cur;
  129. freqs.new = target_freq;
  130. cpufreq_freq_transition_begin(policy, &freqs);
  131. ret = cppc_set_perf(cpu->cpu, &cpu->perf_ctrls);
  132. cpufreq_freq_transition_end(policy, &freqs, ret != 0);
  133. if (ret)
  134. pr_debug("Failed to set target on CPU:%d. ret:%d\n",
  135. cpu->cpu, ret);
  136. return ret;
  137. }
  138. static int cppc_verify_policy(struct cpufreq_policy *policy)
  139. {
  140. cpufreq_verify_within_cpu_limits(policy);
  141. return 0;
  142. }
  143. static void cppc_cpufreq_stop_cpu(struct cpufreq_policy *policy)
  144. {
  145. int cpu_num = policy->cpu;
  146. struct cppc_cpudata *cpu = all_cpu_data[cpu_num];
  147. int ret;
  148. cpu->perf_ctrls.desired_perf = cpu->perf_caps.lowest_perf;
  149. ret = cppc_set_perf(cpu_num, &cpu->perf_ctrls);
  150. if (ret)
  151. pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
  152. cpu->perf_caps.lowest_perf, cpu_num, ret);
  153. }
  154. static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
  155. {
  156. struct cppc_cpudata *cpu;
  157. unsigned int cpu_num = policy->cpu;
  158. int ret = 0;
  159. cpu = all_cpu_data[policy->cpu];
  160. cpu->cpu = cpu_num;
  161. ret = cppc_get_perf_caps(policy->cpu, &cpu->perf_caps);
  162. if (ret) {
  163. pr_debug("Err reading CPU%d perf capabilities. ret:%d\n",
  164. cpu_num, ret);
  165. return ret;
  166. }
  167. /* Convert the lowest and nominal freq from MHz to KHz */
  168. cpu->perf_caps.lowest_freq *= 1000;
  169. cpu->perf_caps.nominal_freq *= 1000;
  170. /*
  171. * Set min to lowest nonlinear perf to avoid any efficiency penalty (see
  172. * Section 8.4.7.1.1.5 of ACPI 6.1 spec)
  173. */
  174. policy->min = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.lowest_nonlinear_perf);
  175. policy->max = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.highest_perf);
  176. /*
  177. * Set cpuinfo.min_freq to Lowest to make the full range of performance
  178. * available if userspace wants to use any perf between lowest & lowest
  179. * nonlinear perf
  180. */
  181. policy->cpuinfo.min_freq = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.lowest_perf);
  182. policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.highest_perf);
  183. policy->transition_delay_us = cppc_get_transition_latency(cpu_num) /
  184. NSEC_PER_USEC;
  185. policy->shared_type = cpu->shared_type;
  186. if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
  187. int i;
  188. cpumask_copy(policy->cpus, cpu->shared_cpu_map);
  189. for_each_cpu(i, policy->cpus) {
  190. if (unlikely(i == policy->cpu))
  191. continue;
  192. memcpy(&all_cpu_data[i]->perf_caps, &cpu->perf_caps,
  193. sizeof(cpu->perf_caps));
  194. }
  195. } else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) {
  196. /* Support only SW_ANY for now. */
  197. pr_debug("Unsupported CPU co-ord type\n");
  198. return -EFAULT;
  199. }
  200. cpu->cur_policy = policy;
  201. /* Set policy->cur to max now. The governors will adjust later. */
  202. policy->cur = cppc_cpufreq_perf_to_khz(cpu,
  203. cpu->perf_caps.highest_perf);
  204. cpu->perf_ctrls.desired_perf = cpu->perf_caps.highest_perf;
  205. ret = cppc_set_perf(cpu_num, &cpu->perf_ctrls);
  206. if (ret)
  207. pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
  208. cpu->perf_caps.highest_perf, cpu_num, ret);
  209. return ret;
  210. }
  211. static struct cpufreq_driver cppc_cpufreq_driver = {
  212. .flags = CPUFREQ_CONST_LOOPS,
  213. .verify = cppc_verify_policy,
  214. .target = cppc_cpufreq_set_target,
  215. .init = cppc_cpufreq_cpu_init,
  216. .stop_cpu = cppc_cpufreq_stop_cpu,
  217. .name = "cppc_cpufreq",
  218. };
  219. static int __init cppc_cpufreq_init(void)
  220. {
  221. int i, ret = 0;
  222. struct cppc_cpudata *cpu;
  223. if (acpi_disabled)
  224. return -ENODEV;
  225. all_cpu_data = kzalloc(sizeof(void *) * num_possible_cpus(), GFP_KERNEL);
  226. if (!all_cpu_data)
  227. return -ENOMEM;
  228. for_each_possible_cpu(i) {
  229. all_cpu_data[i] = kzalloc(sizeof(struct cppc_cpudata), GFP_KERNEL);
  230. if (!all_cpu_data[i])
  231. goto out;
  232. cpu = all_cpu_data[i];
  233. if (!zalloc_cpumask_var(&cpu->shared_cpu_map, GFP_KERNEL))
  234. goto out;
  235. }
  236. ret = acpi_get_psd_map(all_cpu_data);
  237. if (ret) {
  238. pr_debug("Error parsing PSD data. Aborting cpufreq registration.\n");
  239. goto out;
  240. }
  241. ret = cpufreq_register_driver(&cppc_cpufreq_driver);
  242. if (ret)
  243. goto out;
  244. return ret;
  245. out:
  246. for_each_possible_cpu(i) {
  247. cpu = all_cpu_data[i];
  248. if (!cpu)
  249. break;
  250. free_cpumask_var(cpu->shared_cpu_map);
  251. kfree(cpu);
  252. }
  253. kfree(all_cpu_data);
  254. return -ENODEV;
  255. }
  256. static void __exit cppc_cpufreq_exit(void)
  257. {
  258. struct cppc_cpudata *cpu;
  259. int i;
  260. cpufreq_unregister_driver(&cppc_cpufreq_driver);
  261. for_each_possible_cpu(i) {
  262. cpu = all_cpu_data[i];
  263. free_cpumask_var(cpu->shared_cpu_map);
  264. kfree(cpu);
  265. }
  266. kfree(all_cpu_data);
  267. }
  268. module_exit(cppc_cpufreq_exit);
  269. MODULE_AUTHOR("Ashwin Chaugule");
  270. MODULE_DESCRIPTION("CPUFreq driver based on the ACPI CPPC v5.0+ spec");
  271. MODULE_LICENSE("GPL");
  272. late_initcall(cppc_cpufreq_init);
  273. static const struct acpi_device_id cppc_acpi_ids[] = {
  274. {ACPI_PROCESSOR_DEVICE_HID, },
  275. {}
  276. };
  277. MODULE_DEVICE_TABLE(acpi, cppc_acpi_ids);