cpufreq-dt.c 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384
  1. /*
  2. * Copyright (C) 2012 Freescale Semiconductor, Inc.
  3. *
  4. * Copyright (C) 2014 Linaro.
  5. * Viresh Kumar <viresh.kumar@linaro.org>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  12. #include <linux/clk.h>
  13. #include <linux/cpu.h>
  14. #include <linux/cpu_cooling.h>
  15. #include <linux/cpufreq.h>
  16. #include <linux/cpumask.h>
  17. #include <linux/err.h>
  18. #include <linux/module.h>
  19. #include <linux/of.h>
  20. #include <linux/pm_opp.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/regulator/consumer.h>
  23. #include <linux/slab.h>
  24. #include <linux/thermal.h>
  25. #include "cpufreq-dt.h"
  26. struct private_data {
  27. struct opp_table *opp_table;
  28. struct device *cpu_dev;
  29. struct thermal_cooling_device *cdev;
  30. const char *reg_name;
  31. };
  32. static struct freq_attr *cpufreq_dt_attr[] = {
  33. &cpufreq_freq_attr_scaling_available_freqs,
  34. NULL, /* Extra space for boost-attr if required */
  35. NULL,
  36. };
  37. static int set_target(struct cpufreq_policy *policy, unsigned int index)
  38. {
  39. struct private_data *priv = policy->driver_data;
  40. unsigned long freq = policy->freq_table[index].frequency;
  41. int ret;
  42. ret = dev_pm_opp_set_rate(priv->cpu_dev, freq * 1000);
  43. if (!ret) {
  44. arch_set_freq_scale(policy->related_cpus, freq,
  45. policy->cpuinfo.max_freq);
  46. }
  47. return ret;
  48. }
  49. /*
  50. * An earlier version of opp-v1 bindings used to name the regulator
  51. * "cpu0-supply", we still need to handle that for backwards compatibility.
  52. */
  53. static const char *find_supply_name(struct device *dev)
  54. {
  55. struct device_node *np;
  56. struct property *pp;
  57. int cpu = dev->id;
  58. const char *name = NULL;
  59. np = of_node_get(dev->of_node);
  60. /* This must be valid for sure */
  61. if (WARN_ON(!np))
  62. return NULL;
  63. /* Try "cpu0" for older DTs */
  64. if (!cpu) {
  65. pp = of_find_property(np, "cpu0-supply", NULL);
  66. if (pp) {
  67. name = "cpu0";
  68. goto node_put;
  69. }
  70. }
  71. pp = of_find_property(np, "cpu-supply", NULL);
  72. if (pp) {
  73. name = "cpu";
  74. goto node_put;
  75. }
  76. dev_dbg(dev, "no regulator for cpu%d\n", cpu);
  77. node_put:
  78. of_node_put(np);
  79. return name;
  80. }
  81. static int resources_available(void)
  82. {
  83. struct device *cpu_dev;
  84. struct regulator *cpu_reg;
  85. struct clk *cpu_clk;
  86. int ret = 0;
  87. const char *name;
  88. cpu_dev = get_cpu_device(0);
  89. if (!cpu_dev) {
  90. pr_err("failed to get cpu0 device\n");
  91. return -ENODEV;
  92. }
  93. cpu_clk = clk_get(cpu_dev, NULL);
  94. ret = PTR_ERR_OR_ZERO(cpu_clk);
  95. if (ret) {
  96. /*
  97. * If cpu's clk node is present, but clock is not yet
  98. * registered, we should try defering probe.
  99. */
  100. if (ret == -EPROBE_DEFER)
  101. dev_dbg(cpu_dev, "clock not ready, retry\n");
  102. else
  103. dev_err(cpu_dev, "failed to get clock: %d\n", ret);
  104. return ret;
  105. }
  106. clk_put(cpu_clk);
  107. name = find_supply_name(cpu_dev);
  108. /* Platform doesn't require regulator */
  109. if (!name)
  110. return 0;
  111. cpu_reg = regulator_get_optional(cpu_dev, name);
  112. ret = PTR_ERR_OR_ZERO(cpu_reg);
  113. if (ret) {
  114. /*
  115. * If cpu's regulator supply node is present, but regulator is
  116. * not yet registered, we should try defering probe.
  117. */
  118. if (ret == -EPROBE_DEFER)
  119. dev_dbg(cpu_dev, "cpu0 regulator not ready, retry\n");
  120. else
  121. dev_dbg(cpu_dev, "no regulator for cpu0: %d\n", ret);
  122. return ret;
  123. }
  124. regulator_put(cpu_reg);
  125. return 0;
  126. }
  127. static int cpufreq_init(struct cpufreq_policy *policy)
  128. {
  129. struct cpufreq_frequency_table *freq_table;
  130. struct opp_table *opp_table = NULL;
  131. struct private_data *priv;
  132. struct device *cpu_dev;
  133. struct clk *cpu_clk;
  134. unsigned int transition_latency;
  135. bool fallback = false;
  136. const char *name;
  137. int ret;
  138. cpu_dev = get_cpu_device(policy->cpu);
  139. if (!cpu_dev) {
  140. pr_err("failed to get cpu%d device\n", policy->cpu);
  141. return -ENODEV;
  142. }
  143. cpu_clk = clk_get(cpu_dev, NULL);
  144. if (IS_ERR(cpu_clk)) {
  145. ret = PTR_ERR(cpu_clk);
  146. dev_err(cpu_dev, "%s: failed to get clk: %d\n", __func__, ret);
  147. return ret;
  148. }
  149. /* Get OPP-sharing information from "operating-points-v2" bindings */
  150. ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, policy->cpus);
  151. if (ret) {
  152. if (ret != -ENOENT)
  153. goto out_put_clk;
  154. /*
  155. * operating-points-v2 not supported, fallback to old method of
  156. * finding shared-OPPs for backward compatibility if the
  157. * platform hasn't set sharing CPUs.
  158. */
  159. if (dev_pm_opp_get_sharing_cpus(cpu_dev, policy->cpus))
  160. fallback = true;
  161. }
  162. /*
  163. * OPP layer will be taking care of regulators now, but it needs to know
  164. * the name of the regulator first.
  165. */
  166. name = find_supply_name(cpu_dev);
  167. if (name) {
  168. opp_table = dev_pm_opp_set_regulators(cpu_dev, &name, 1);
  169. if (IS_ERR(opp_table)) {
  170. ret = PTR_ERR(opp_table);
  171. dev_err(cpu_dev, "Failed to set regulator for cpu%d: %d\n",
  172. policy->cpu, ret);
  173. goto out_put_clk;
  174. }
  175. }
  176. /*
  177. * Initialize OPP tables for all policy->cpus. They will be shared by
  178. * all CPUs which have marked their CPUs shared with OPP bindings.
  179. *
  180. * For platforms not using operating-points-v2 bindings, we do this
  181. * before updating policy->cpus. Otherwise, we will end up creating
  182. * duplicate OPPs for policy->cpus.
  183. *
  184. * OPPs might be populated at runtime, don't check for error here
  185. */
  186. dev_pm_opp_of_cpumask_add_table(policy->cpus);
  187. /*
  188. * But we need OPP table to function so if it is not there let's
  189. * give platform code chance to provide it for us.
  190. */
  191. ret = dev_pm_opp_get_opp_count(cpu_dev);
  192. if (ret <= 0) {
  193. dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n");
  194. ret = -EPROBE_DEFER;
  195. goto out_free_opp;
  196. }
  197. if (fallback) {
  198. cpumask_setall(policy->cpus);
  199. /*
  200. * OPP tables are initialized only for policy->cpu, do it for
  201. * others as well.
  202. */
  203. ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
  204. if (ret)
  205. dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
  206. __func__, ret);
  207. }
  208. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  209. if (!priv) {
  210. ret = -ENOMEM;
  211. goto out_free_opp;
  212. }
  213. priv->reg_name = name;
  214. priv->opp_table = opp_table;
  215. ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
  216. if (ret) {
  217. dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
  218. goto out_free_priv;
  219. }
  220. priv->cpu_dev = cpu_dev;
  221. policy->driver_data = priv;
  222. policy->clk = cpu_clk;
  223. policy->freq_table = freq_table;
  224. policy->suspend_freq = dev_pm_opp_get_suspend_opp_freq(cpu_dev) / 1000;
  225. /* Support turbo/boost mode */
  226. if (policy_has_boost_freq(policy)) {
  227. /* This gets disabled by core on driver unregister */
  228. ret = cpufreq_enable_boost_support();
  229. if (ret)
  230. goto out_free_cpufreq_table;
  231. cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
  232. }
  233. transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev);
  234. if (!transition_latency)
  235. transition_latency = CPUFREQ_ETERNAL;
  236. policy->cpuinfo.transition_latency = transition_latency;
  237. policy->dvfs_possible_from_any_cpu = true;
  238. return 0;
  239. out_free_cpufreq_table:
  240. dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
  241. out_free_priv:
  242. kfree(priv);
  243. out_free_opp:
  244. dev_pm_opp_of_cpumask_remove_table(policy->cpus);
  245. if (name)
  246. dev_pm_opp_put_regulators(opp_table);
  247. out_put_clk:
  248. clk_put(cpu_clk);
  249. return ret;
  250. }
  251. static int cpufreq_exit(struct cpufreq_policy *policy)
  252. {
  253. struct private_data *priv = policy->driver_data;
  254. cpufreq_cooling_unregister(priv->cdev);
  255. dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
  256. dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
  257. if (priv->reg_name)
  258. dev_pm_opp_put_regulators(priv->opp_table);
  259. clk_put(policy->clk);
  260. kfree(priv);
  261. return 0;
  262. }
  263. static void cpufreq_ready(struct cpufreq_policy *policy)
  264. {
  265. struct private_data *priv = policy->driver_data;
  266. priv->cdev = of_cpufreq_cooling_register(policy);
  267. }
  268. static struct cpufreq_driver dt_cpufreq_driver = {
  269. .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
  270. .verify = cpufreq_generic_frequency_table_verify,
  271. .target_index = set_target,
  272. .get = cpufreq_generic_get,
  273. .init = cpufreq_init,
  274. .exit = cpufreq_exit,
  275. .ready = cpufreq_ready,
  276. .name = "cpufreq-dt",
  277. .attr = cpufreq_dt_attr,
  278. .suspend = cpufreq_generic_suspend,
  279. };
  280. static int dt_cpufreq_probe(struct platform_device *pdev)
  281. {
  282. struct cpufreq_dt_platform_data *data = dev_get_platdata(&pdev->dev);
  283. int ret;
  284. /*
  285. * All per-cluster (CPUs sharing clock/voltages) initialization is done
  286. * from ->init(). In probe(), we just need to make sure that clk and
  287. * regulators are available. Else defer probe and retry.
  288. *
  289. * FIXME: Is checking this only for CPU0 sufficient ?
  290. */
  291. ret = resources_available();
  292. if (ret)
  293. return ret;
  294. if (data) {
  295. if (data->have_governor_per_policy)
  296. dt_cpufreq_driver.flags |= CPUFREQ_HAVE_GOVERNOR_PER_POLICY;
  297. dt_cpufreq_driver.resume = data->resume;
  298. if (data->suspend)
  299. dt_cpufreq_driver.suspend = data->suspend;
  300. }
  301. ret = cpufreq_register_driver(&dt_cpufreq_driver);
  302. if (ret)
  303. dev_err(&pdev->dev, "failed register driver: %d\n", ret);
  304. return ret;
  305. }
  306. static int dt_cpufreq_remove(struct platform_device *pdev)
  307. {
  308. cpufreq_unregister_driver(&dt_cpufreq_driver);
  309. return 0;
  310. }
  311. static struct platform_driver dt_cpufreq_platdrv = {
  312. .driver = {
  313. .name = "cpufreq-dt",
  314. },
  315. .probe = dt_cpufreq_probe,
  316. .remove = dt_cpufreq_remove,
  317. };
  318. module_platform_driver(dt_cpufreq_platdrv);
  319. MODULE_ALIAS("platform:cpufreq-dt");
  320. MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
  321. MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
  322. MODULE_DESCRIPTION("Generic cpufreq driver");
  323. MODULE_LICENSE("GPL");