cpufreq-dt.c 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364
  1. /*
  2. * Copyright (C) 2012 Freescale Semiconductor, Inc.
  3. *
  4. * Copyright (C) 2014 Linaro.
  5. * Viresh Kumar <viresh.kumar@linaro.org>
  6. *
  7. * The OPP code in function set_target() is reused from
  8. * drivers/cpufreq/omap-cpufreq.c
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License version 2 as
  12. * published by the Free Software Foundation.
  13. */
  14. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  15. #include <linux/clk.h>
  16. #include <linux/cpu.h>
  17. #include <linux/cpu_cooling.h>
  18. #include <linux/cpufreq.h>
  19. #include <linux/cpumask.h>
  20. #include <linux/err.h>
  21. #include <linux/module.h>
  22. #include <linux/of.h>
  23. #include <linux/pm_opp.h>
  24. #include <linux/platform_device.h>
  25. #include <linux/regulator/consumer.h>
  26. #include <linux/slab.h>
  27. #include <linux/thermal.h>
  28. struct private_data {
  29. struct device *cpu_dev;
  30. struct regulator *cpu_reg;
  31. struct thermal_cooling_device *cdev;
  32. unsigned int voltage_tolerance; /* in percentage */
  33. };
  34. static int set_target(struct cpufreq_policy *policy, unsigned int index)
  35. {
  36. struct dev_pm_opp *opp;
  37. struct cpufreq_frequency_table *freq_table = policy->freq_table;
  38. struct clk *cpu_clk = policy->clk;
  39. struct private_data *priv = policy->driver_data;
  40. struct device *cpu_dev = priv->cpu_dev;
  41. struct regulator *cpu_reg = priv->cpu_reg;
  42. unsigned long volt = 0, volt_old = 0, tol = 0;
  43. unsigned int old_freq, new_freq;
  44. long freq_Hz, freq_exact;
  45. int ret;
  46. freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000);
  47. if (freq_Hz <= 0)
  48. freq_Hz = freq_table[index].frequency * 1000;
  49. freq_exact = freq_Hz;
  50. new_freq = freq_Hz / 1000;
  51. old_freq = clk_get_rate(cpu_clk) / 1000;
  52. if (!IS_ERR(cpu_reg)) {
  53. rcu_read_lock();
  54. opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz);
  55. if (IS_ERR(opp)) {
  56. rcu_read_unlock();
  57. dev_err(cpu_dev, "failed to find OPP for %ld\n",
  58. freq_Hz);
  59. return PTR_ERR(opp);
  60. }
  61. volt = dev_pm_opp_get_voltage(opp);
  62. rcu_read_unlock();
  63. tol = volt * priv->voltage_tolerance / 100;
  64. volt_old = regulator_get_voltage(cpu_reg);
  65. }
  66. dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n",
  67. old_freq / 1000, volt_old ? volt_old / 1000 : -1,
  68. new_freq / 1000, volt ? volt / 1000 : -1);
  69. /* scaling up? scale voltage before frequency */
  70. if (!IS_ERR(cpu_reg) && new_freq > old_freq) {
  71. ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
  72. if (ret) {
  73. dev_err(cpu_dev, "failed to scale voltage up: %d\n",
  74. ret);
  75. return ret;
  76. }
  77. }
  78. ret = clk_set_rate(cpu_clk, freq_exact);
  79. if (ret) {
  80. dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
  81. if (!IS_ERR(cpu_reg))
  82. regulator_set_voltage_tol(cpu_reg, volt_old, tol);
  83. return ret;
  84. }
  85. /* scaling down? scale voltage after frequency */
  86. if (!IS_ERR(cpu_reg) && new_freq < old_freq) {
  87. ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
  88. if (ret) {
  89. dev_err(cpu_dev, "failed to scale voltage down: %d\n",
  90. ret);
  91. clk_set_rate(cpu_clk, old_freq * 1000);
  92. }
  93. }
  94. return ret;
  95. }
  96. static int allocate_resources(int cpu, struct device **cdev,
  97. struct regulator **creg, struct clk **cclk)
  98. {
  99. struct device *cpu_dev;
  100. struct regulator *cpu_reg;
  101. struct clk *cpu_clk;
  102. int ret = 0;
  103. char *reg_cpu0 = "cpu0", *reg_cpu = "cpu", *reg;
  104. cpu_dev = get_cpu_device(cpu);
  105. if (!cpu_dev) {
  106. pr_err("failed to get cpu%d device\n", cpu);
  107. return -ENODEV;
  108. }
  109. /* Try "cpu0" for older DTs */
  110. if (!cpu)
  111. reg = reg_cpu0;
  112. else
  113. reg = reg_cpu;
  114. try_again:
  115. cpu_reg = regulator_get_optional(cpu_dev, reg);
  116. if (IS_ERR(cpu_reg)) {
  117. /*
  118. * If cpu's regulator supply node is present, but regulator is
  119. * not yet registered, we should try defering probe.
  120. */
  121. if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) {
  122. dev_dbg(cpu_dev, "cpu%d regulator not ready, retry\n",
  123. cpu);
  124. return -EPROBE_DEFER;
  125. }
  126. /* Try with "cpu-supply" */
  127. if (reg == reg_cpu0) {
  128. reg = reg_cpu;
  129. goto try_again;
  130. }
  131. dev_warn(cpu_dev, "failed to get cpu%d regulator: %ld\n",
  132. cpu, PTR_ERR(cpu_reg));
  133. }
  134. cpu_clk = clk_get(cpu_dev, NULL);
  135. if (IS_ERR(cpu_clk)) {
  136. /* put regulator */
  137. if (!IS_ERR(cpu_reg))
  138. regulator_put(cpu_reg);
  139. ret = PTR_ERR(cpu_clk);
  140. /*
  141. * If cpu's clk node is present, but clock is not yet
  142. * registered, we should try defering probe.
  143. */
  144. if (ret == -EPROBE_DEFER)
  145. dev_dbg(cpu_dev, "cpu%d clock not ready, retry\n", cpu);
  146. else
  147. dev_err(cpu_dev, "failed to get cpu%d clock: %d\n", ret,
  148. cpu);
  149. } else {
  150. *cdev = cpu_dev;
  151. *creg = cpu_reg;
  152. *cclk = cpu_clk;
  153. }
  154. return ret;
  155. }
  156. static int cpufreq_init(struct cpufreq_policy *policy)
  157. {
  158. struct cpufreq_frequency_table *freq_table;
  159. struct thermal_cooling_device *cdev;
  160. struct device_node *np;
  161. struct private_data *priv;
  162. struct device *cpu_dev;
  163. struct regulator *cpu_reg;
  164. struct clk *cpu_clk;
  165. unsigned int transition_latency;
  166. int ret;
  167. ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk);
  168. if (ret) {
  169. pr_err("%s: Failed to allocate resources\n: %d", __func__, ret);
  170. return ret;
  171. }
  172. np = of_node_get(cpu_dev->of_node);
  173. if (!np) {
  174. dev_err(cpu_dev, "failed to find cpu%d node\n", policy->cpu);
  175. ret = -ENOENT;
  176. goto out_put_reg_clk;
  177. }
  178. /* OPPs might be populated at runtime, don't check for error here */
  179. of_init_opp_table(cpu_dev);
  180. ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
  181. if (ret) {
  182. dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
  183. goto out_put_node;
  184. }
  185. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  186. if (!priv) {
  187. ret = -ENOMEM;
  188. goto out_free_table;
  189. }
  190. of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance);
  191. if (of_property_read_u32(np, "clock-latency", &transition_latency))
  192. transition_latency = CPUFREQ_ETERNAL;
  193. if (!IS_ERR(cpu_reg)) {
  194. struct dev_pm_opp *opp;
  195. unsigned long min_uV, max_uV;
  196. int i;
  197. /*
  198. * OPP is maintained in order of increasing frequency, and
  199. * freq_table initialised from OPP is therefore sorted in the
  200. * same order.
  201. */
  202. for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
  203. ;
  204. rcu_read_lock();
  205. opp = dev_pm_opp_find_freq_exact(cpu_dev,
  206. freq_table[0].frequency * 1000, true);
  207. min_uV = dev_pm_opp_get_voltage(opp);
  208. opp = dev_pm_opp_find_freq_exact(cpu_dev,
  209. freq_table[i-1].frequency * 1000, true);
  210. max_uV = dev_pm_opp_get_voltage(opp);
  211. rcu_read_unlock();
  212. ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV);
  213. if (ret > 0)
  214. transition_latency += ret * 1000;
  215. }
  216. /*
  217. * For now, just loading the cooling device;
  218. * thermal DT code takes care of matching them.
  219. */
  220. if (of_find_property(np, "#cooling-cells", NULL)) {
  221. cdev = of_cpufreq_cooling_register(np, cpu_present_mask);
  222. if (IS_ERR(cdev))
  223. dev_err(cpu_dev,
  224. "running cpufreq without cooling device: %ld\n",
  225. PTR_ERR(cdev));
  226. else
  227. priv->cdev = cdev;
  228. }
  229. priv->cpu_dev = cpu_dev;
  230. priv->cpu_reg = cpu_reg;
  231. policy->driver_data = priv;
  232. policy->clk = cpu_clk;
  233. ret = cpufreq_generic_init(policy, freq_table, transition_latency);
  234. if (ret)
  235. goto out_cooling_unregister;
  236. of_node_put(np);
  237. return 0;
  238. out_cooling_unregister:
  239. cpufreq_cooling_unregister(priv->cdev);
  240. kfree(priv);
  241. out_free_table:
  242. dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
  243. out_put_node:
  244. of_node_put(np);
  245. out_put_reg_clk:
  246. clk_put(cpu_clk);
  247. if (!IS_ERR(cpu_reg))
  248. regulator_put(cpu_reg);
  249. return ret;
  250. }
  251. static int cpufreq_exit(struct cpufreq_policy *policy)
  252. {
  253. struct private_data *priv = policy->driver_data;
  254. cpufreq_cooling_unregister(priv->cdev);
  255. dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
  256. clk_put(policy->clk);
  257. if (!IS_ERR(priv->cpu_reg))
  258. regulator_put(priv->cpu_reg);
  259. kfree(priv);
  260. return 0;
  261. }
  262. static struct cpufreq_driver dt_cpufreq_driver = {
  263. .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
  264. .verify = cpufreq_generic_frequency_table_verify,
  265. .target_index = set_target,
  266. .get = cpufreq_generic_get,
  267. .init = cpufreq_init,
  268. .exit = cpufreq_exit,
  269. .name = "cpufreq-dt",
  270. .attr = cpufreq_generic_attr,
  271. };
  272. static int dt_cpufreq_probe(struct platform_device *pdev)
  273. {
  274. struct device *cpu_dev;
  275. struct regulator *cpu_reg;
  276. struct clk *cpu_clk;
  277. int ret;
  278. /*
  279. * All per-cluster (CPUs sharing clock/voltages) initialization is done
  280. * from ->init(). In probe(), we just need to make sure that clk and
  281. * regulators are available. Else defer probe and retry.
  282. *
  283. * FIXME: Is checking this only for CPU0 sufficient ?
  284. */
  285. ret = allocate_resources(0, &cpu_dev, &cpu_reg, &cpu_clk);
  286. if (ret)
  287. return ret;
  288. clk_put(cpu_clk);
  289. if (!IS_ERR(cpu_reg))
  290. regulator_put(cpu_reg);
  291. ret = cpufreq_register_driver(&dt_cpufreq_driver);
  292. if (ret)
  293. dev_err(cpu_dev, "failed register driver: %d\n", ret);
  294. return ret;
  295. }
  296. static int dt_cpufreq_remove(struct platform_device *pdev)
  297. {
  298. cpufreq_unregister_driver(&dt_cpufreq_driver);
  299. return 0;
  300. }
  301. static struct platform_driver dt_cpufreq_platdrv = {
  302. .driver = {
  303. .name = "cpufreq-dt",
  304. .owner = THIS_MODULE,
  305. },
  306. .probe = dt_cpufreq_probe,
  307. .remove = dt_cpufreq_remove,
  308. };
  309. module_platform_driver(dt_cpufreq_platdrv);
  310. MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
  311. MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
  312. MODULE_DESCRIPTION("Generic cpufreq driver");
  313. MODULE_LICENSE("GPL");