mt8173-cpufreq.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605
  1. /*
  2. * Copyright (c) 2015 Linaro Ltd.
  3. * Author: Pi-Cheng Chen <pi-cheng.chen@linaro.org>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #include <linux/clk.h>
  15. #include <linux/cpu.h>
  16. #include <linux/cpu_cooling.h>
  17. #include <linux/cpufreq.h>
  18. #include <linux/cpumask.h>
  19. #include <linux/module.h>
  20. #include <linux/of.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/pm_opp.h>
  23. #include <linux/regulator/consumer.h>
  24. #include <linux/slab.h>
  25. #include <linux/thermal.h>
  26. #define MIN_VOLT_SHIFT (100000)
  27. #define MAX_VOLT_SHIFT (200000)
  28. #define MAX_VOLT_LIMIT (1150000)
  29. #define VOLT_TOL (10000)
  30. /*
  31. * The struct mtk_cpu_dvfs_info holds necessary information for doing CPU DVFS
  32. * on each CPU power/clock domain of Mediatek SoCs. Each CPU cluster in
  33. * Mediatek SoCs has two voltage inputs, Vproc and Vsram. In some cases the two
  34. * voltage inputs need to be controlled under a hardware limitation:
  35. * 100mV < Vsram - Vproc < 200mV
  36. *
  37. * When scaling the clock frequency of a CPU clock domain, the clock source
  38. * needs to be switched to another stable PLL clock temporarily until
  39. * the original PLL becomes stable at target frequency.
  40. */
  41. struct mtk_cpu_dvfs_info {
  42. struct cpumask cpus;
  43. struct device *cpu_dev;
  44. struct regulator *proc_reg;
  45. struct regulator *sram_reg;
  46. struct clk *cpu_clk;
  47. struct clk *inter_clk;
  48. struct thermal_cooling_device *cdev;
  49. struct list_head list_head;
  50. int intermediate_voltage;
  51. bool need_voltage_tracking;
  52. };
  53. static LIST_HEAD(dvfs_info_list);
  54. static struct mtk_cpu_dvfs_info *mtk_cpu_dvfs_info_lookup(int cpu)
  55. {
  56. struct mtk_cpu_dvfs_info *info;
  57. struct list_head *list;
  58. list_for_each(list, &dvfs_info_list) {
  59. info = list_entry(list, struct mtk_cpu_dvfs_info, list_head);
  60. if (cpumask_test_cpu(cpu, &info->cpus))
  61. return info;
  62. }
  63. return NULL;
  64. }
  65. static int mtk_cpufreq_voltage_tracking(struct mtk_cpu_dvfs_info *info,
  66. int new_vproc)
  67. {
  68. struct regulator *proc_reg = info->proc_reg;
  69. struct regulator *sram_reg = info->sram_reg;
  70. int old_vproc, old_vsram, new_vsram, vsram, vproc, ret;
  71. old_vproc = regulator_get_voltage(proc_reg);
  72. if (old_vproc < 0) {
  73. pr_err("%s: invalid Vproc value: %d\n", __func__, old_vproc);
  74. return old_vproc;
  75. }
  76. /* Vsram should not exceed the maximum allowed voltage of SoC. */
  77. new_vsram = min(new_vproc + MIN_VOLT_SHIFT, MAX_VOLT_LIMIT);
  78. if (old_vproc < new_vproc) {
  79. /*
  80. * When scaling up voltages, Vsram and Vproc scale up step
  81. * by step. At each step, set Vsram to (Vproc + 200mV) first,
  82. * then set Vproc to (Vsram - 100mV).
  83. * Keep doing it until Vsram and Vproc hit target voltages.
  84. */
  85. do {
  86. old_vsram = regulator_get_voltage(sram_reg);
  87. if (old_vsram < 0) {
  88. pr_err("%s: invalid Vsram value: %d\n",
  89. __func__, old_vsram);
  90. return old_vsram;
  91. }
  92. old_vproc = regulator_get_voltage(proc_reg);
  93. if (old_vproc < 0) {
  94. pr_err("%s: invalid Vproc value: %d\n",
  95. __func__, old_vproc);
  96. return old_vproc;
  97. }
  98. vsram = min(new_vsram, old_vproc + MAX_VOLT_SHIFT);
  99. if (vsram + VOLT_TOL >= MAX_VOLT_LIMIT) {
  100. vsram = MAX_VOLT_LIMIT;
  101. /*
  102. * If the target Vsram hits the maximum voltage,
  103. * try to set the exact voltage value first.
  104. */
  105. ret = regulator_set_voltage(sram_reg, vsram,
  106. vsram);
  107. if (ret)
  108. ret = regulator_set_voltage(sram_reg,
  109. vsram - VOLT_TOL,
  110. vsram);
  111. vproc = new_vproc;
  112. } else {
  113. ret = regulator_set_voltage(sram_reg, vsram,
  114. vsram + VOLT_TOL);
  115. vproc = vsram - MIN_VOLT_SHIFT;
  116. }
  117. if (ret)
  118. return ret;
  119. ret = regulator_set_voltage(proc_reg, vproc,
  120. vproc + VOLT_TOL);
  121. if (ret) {
  122. regulator_set_voltage(sram_reg, old_vsram,
  123. old_vsram);
  124. return ret;
  125. }
  126. } while (vproc < new_vproc || vsram < new_vsram);
  127. } else if (old_vproc > new_vproc) {
  128. /*
  129. * When scaling down voltages, Vsram and Vproc scale down step
  130. * by step. At each step, set Vproc to (Vsram - 200mV) first,
  131. * then set Vproc to (Vproc + 100mV).
  132. * Keep doing it until Vsram and Vproc hit target voltages.
  133. */
  134. do {
  135. old_vproc = regulator_get_voltage(proc_reg);
  136. if (old_vproc < 0) {
  137. pr_err("%s: invalid Vproc value: %d\n",
  138. __func__, old_vproc);
  139. return old_vproc;
  140. }
  141. old_vsram = regulator_get_voltage(sram_reg);
  142. if (old_vsram < 0) {
  143. pr_err("%s: invalid Vsram value: %d\n",
  144. __func__, old_vsram);
  145. return old_vsram;
  146. }
  147. vproc = max(new_vproc, old_vsram - MAX_VOLT_SHIFT);
  148. ret = regulator_set_voltage(proc_reg, vproc,
  149. vproc + VOLT_TOL);
  150. if (ret)
  151. return ret;
  152. if (vproc == new_vproc)
  153. vsram = new_vsram;
  154. else
  155. vsram = max(new_vsram, vproc + MIN_VOLT_SHIFT);
  156. if (vsram + VOLT_TOL >= MAX_VOLT_LIMIT) {
  157. vsram = MAX_VOLT_LIMIT;
  158. /*
  159. * If the target Vsram hits the maximum voltage,
  160. * try to set the exact voltage value first.
  161. */
  162. ret = regulator_set_voltage(sram_reg, vsram,
  163. vsram);
  164. if (ret)
  165. ret = regulator_set_voltage(sram_reg,
  166. vsram - VOLT_TOL,
  167. vsram);
  168. } else {
  169. ret = regulator_set_voltage(sram_reg, vsram,
  170. vsram + VOLT_TOL);
  171. }
  172. if (ret) {
  173. regulator_set_voltage(proc_reg, old_vproc,
  174. old_vproc);
  175. return ret;
  176. }
  177. } while (vproc > new_vproc + VOLT_TOL ||
  178. vsram > new_vsram + VOLT_TOL);
  179. }
  180. return 0;
  181. }
  182. static int mtk_cpufreq_set_voltage(struct mtk_cpu_dvfs_info *info, int vproc)
  183. {
  184. if (info->need_voltage_tracking)
  185. return mtk_cpufreq_voltage_tracking(info, vproc);
  186. else
  187. return regulator_set_voltage(info->proc_reg, vproc,
  188. vproc + VOLT_TOL);
  189. }
  190. static int mtk_cpufreq_set_target(struct cpufreq_policy *policy,
  191. unsigned int index)
  192. {
  193. struct cpufreq_frequency_table *freq_table = policy->freq_table;
  194. struct clk *cpu_clk = policy->clk;
  195. struct clk *armpll = clk_get_parent(cpu_clk);
  196. struct mtk_cpu_dvfs_info *info = policy->driver_data;
  197. struct device *cpu_dev = info->cpu_dev;
  198. struct dev_pm_opp *opp;
  199. long freq_hz, old_freq_hz;
  200. int vproc, old_vproc, inter_vproc, target_vproc, ret;
  201. inter_vproc = info->intermediate_voltage;
  202. old_freq_hz = clk_get_rate(cpu_clk);
  203. old_vproc = regulator_get_voltage(info->proc_reg);
  204. if (old_vproc < 0) {
  205. pr_err("%s: invalid Vproc value: %d\n", __func__, old_vproc);
  206. return old_vproc;
  207. }
  208. freq_hz = freq_table[index].frequency * 1000;
  209. rcu_read_lock();
  210. opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz);
  211. if (IS_ERR(opp)) {
  212. rcu_read_unlock();
  213. pr_err("cpu%d: failed to find OPP for %ld\n",
  214. policy->cpu, freq_hz);
  215. return PTR_ERR(opp);
  216. }
  217. vproc = dev_pm_opp_get_voltage(opp);
  218. rcu_read_unlock();
  219. /*
  220. * If the new voltage or the intermediate voltage is higher than the
  221. * current voltage, scale up voltage first.
  222. */
  223. target_vproc = (inter_vproc > vproc) ? inter_vproc : vproc;
  224. if (old_vproc < target_vproc) {
  225. ret = mtk_cpufreq_set_voltage(info, target_vproc);
  226. if (ret) {
  227. pr_err("cpu%d: failed to scale up voltage!\n",
  228. policy->cpu);
  229. mtk_cpufreq_set_voltage(info, old_vproc);
  230. return ret;
  231. }
  232. }
  233. /* Reparent the CPU clock to intermediate clock. */
  234. ret = clk_set_parent(cpu_clk, info->inter_clk);
  235. if (ret) {
  236. pr_err("cpu%d: failed to re-parent cpu clock!\n",
  237. policy->cpu);
  238. mtk_cpufreq_set_voltage(info, old_vproc);
  239. WARN_ON(1);
  240. return ret;
  241. }
  242. /* Set the original PLL to target rate. */
  243. ret = clk_set_rate(armpll, freq_hz);
  244. if (ret) {
  245. pr_err("cpu%d: failed to scale cpu clock rate!\n",
  246. policy->cpu);
  247. clk_set_parent(cpu_clk, armpll);
  248. mtk_cpufreq_set_voltage(info, old_vproc);
  249. return ret;
  250. }
  251. /* Set parent of CPU clock back to the original PLL. */
  252. ret = clk_set_parent(cpu_clk, armpll);
  253. if (ret) {
  254. pr_err("cpu%d: failed to re-parent cpu clock!\n",
  255. policy->cpu);
  256. mtk_cpufreq_set_voltage(info, inter_vproc);
  257. WARN_ON(1);
  258. return ret;
  259. }
  260. /*
  261. * If the new voltage is lower than the intermediate voltage or the
  262. * original voltage, scale down to the new voltage.
  263. */
  264. if (vproc < inter_vproc || vproc < old_vproc) {
  265. ret = mtk_cpufreq_set_voltage(info, vproc);
  266. if (ret) {
  267. pr_err("cpu%d: failed to scale down voltage!\n",
  268. policy->cpu);
  269. clk_set_parent(cpu_clk, info->inter_clk);
  270. clk_set_rate(armpll, old_freq_hz);
  271. clk_set_parent(cpu_clk, armpll);
  272. return ret;
  273. }
  274. }
  275. return 0;
  276. }
  277. static void mtk_cpufreq_ready(struct cpufreq_policy *policy)
  278. {
  279. struct mtk_cpu_dvfs_info *info = policy->driver_data;
  280. struct device_node *np = of_node_get(info->cpu_dev->of_node);
  281. if (WARN_ON(!np))
  282. return;
  283. if (of_find_property(np, "#cooling-cells", NULL)) {
  284. info->cdev = of_cpufreq_cooling_register(np,
  285. policy->related_cpus);
  286. if (IS_ERR(info->cdev)) {
  287. dev_err(info->cpu_dev,
  288. "running cpufreq without cooling device: %ld\n",
  289. PTR_ERR(info->cdev));
  290. info->cdev = NULL;
  291. }
  292. }
  293. of_node_put(np);
  294. }
  295. static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
  296. {
  297. struct device *cpu_dev;
  298. struct regulator *proc_reg = ERR_PTR(-ENODEV);
  299. struct regulator *sram_reg = ERR_PTR(-ENODEV);
  300. struct clk *cpu_clk = ERR_PTR(-ENODEV);
  301. struct clk *inter_clk = ERR_PTR(-ENODEV);
  302. struct dev_pm_opp *opp;
  303. unsigned long rate;
  304. int ret;
  305. cpu_dev = get_cpu_device(cpu);
  306. if (!cpu_dev) {
  307. pr_err("failed to get cpu%d device\n", cpu);
  308. return -ENODEV;
  309. }
  310. cpu_clk = clk_get(cpu_dev, "cpu");
  311. if (IS_ERR(cpu_clk)) {
  312. if (PTR_ERR(cpu_clk) == -EPROBE_DEFER)
  313. pr_warn("cpu clk for cpu%d not ready, retry.\n", cpu);
  314. else
  315. pr_err("failed to get cpu clk for cpu%d\n", cpu);
  316. ret = PTR_ERR(cpu_clk);
  317. return ret;
  318. }
  319. inter_clk = clk_get(cpu_dev, "intermediate");
  320. if (IS_ERR(inter_clk)) {
  321. if (PTR_ERR(inter_clk) == -EPROBE_DEFER)
  322. pr_warn("intermediate clk for cpu%d not ready, retry.\n",
  323. cpu);
  324. else
  325. pr_err("failed to get intermediate clk for cpu%d\n",
  326. cpu);
  327. ret = PTR_ERR(inter_clk);
  328. goto out_free_resources;
  329. }
  330. proc_reg = regulator_get_exclusive(cpu_dev, "proc");
  331. if (IS_ERR(proc_reg)) {
  332. if (PTR_ERR(proc_reg) == -EPROBE_DEFER)
  333. pr_warn("proc regulator for cpu%d not ready, retry.\n",
  334. cpu);
  335. else
  336. pr_err("failed to get proc regulator for cpu%d\n",
  337. cpu);
  338. ret = PTR_ERR(proc_reg);
  339. goto out_free_resources;
  340. }
  341. /* Both presence and absence of sram regulator are valid cases. */
  342. sram_reg = regulator_get_exclusive(cpu_dev, "sram");
  343. /* Get OPP-sharing information from "operating-points-v2" bindings */
  344. ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, &info->cpus);
  345. if (ret) {
  346. pr_err("failed to get OPP-sharing information for cpu%d\n",
  347. cpu);
  348. goto out_free_resources;
  349. }
  350. ret = dev_pm_opp_of_cpumask_add_table(&info->cpus);
  351. if (ret) {
  352. pr_warn("no OPP table for cpu%d\n", cpu);
  353. goto out_free_resources;
  354. }
  355. /* Search a safe voltage for intermediate frequency. */
  356. rate = clk_get_rate(inter_clk);
  357. rcu_read_lock();
  358. opp = dev_pm_opp_find_freq_ceil(cpu_dev, &rate);
  359. if (IS_ERR(opp)) {
  360. rcu_read_unlock();
  361. pr_err("failed to get intermediate opp for cpu%d\n", cpu);
  362. ret = PTR_ERR(opp);
  363. goto out_free_opp_table;
  364. }
  365. info->intermediate_voltage = dev_pm_opp_get_voltage(opp);
  366. rcu_read_unlock();
  367. info->cpu_dev = cpu_dev;
  368. info->proc_reg = proc_reg;
  369. info->sram_reg = IS_ERR(sram_reg) ? NULL : sram_reg;
  370. info->cpu_clk = cpu_clk;
  371. info->inter_clk = inter_clk;
  372. /*
  373. * If SRAM regulator is present, software "voltage tracking" is needed
  374. * for this CPU power domain.
  375. */
  376. info->need_voltage_tracking = !IS_ERR(sram_reg);
  377. return 0;
  378. out_free_opp_table:
  379. dev_pm_opp_of_cpumask_remove_table(&info->cpus);
  380. out_free_resources:
  381. if (!IS_ERR(proc_reg))
  382. regulator_put(proc_reg);
  383. if (!IS_ERR(sram_reg))
  384. regulator_put(sram_reg);
  385. if (!IS_ERR(cpu_clk))
  386. clk_put(cpu_clk);
  387. if (!IS_ERR(inter_clk))
  388. clk_put(inter_clk);
  389. return ret;
  390. }
  391. static void mtk_cpu_dvfs_info_release(struct mtk_cpu_dvfs_info *info)
  392. {
  393. if (!IS_ERR(info->proc_reg))
  394. regulator_put(info->proc_reg);
  395. if (!IS_ERR(info->sram_reg))
  396. regulator_put(info->sram_reg);
  397. if (!IS_ERR(info->cpu_clk))
  398. clk_put(info->cpu_clk);
  399. if (!IS_ERR(info->inter_clk))
  400. clk_put(info->inter_clk);
  401. dev_pm_opp_of_cpumask_remove_table(&info->cpus);
  402. }
  403. static int mtk_cpufreq_init(struct cpufreq_policy *policy)
  404. {
  405. struct mtk_cpu_dvfs_info *info;
  406. struct cpufreq_frequency_table *freq_table;
  407. int ret;
  408. info = mtk_cpu_dvfs_info_lookup(policy->cpu);
  409. if (!info) {
  410. pr_err("dvfs info for cpu%d is not initialized.\n",
  411. policy->cpu);
  412. return -EINVAL;
  413. }
  414. ret = dev_pm_opp_init_cpufreq_table(info->cpu_dev, &freq_table);
  415. if (ret) {
  416. pr_err("failed to init cpufreq table for cpu%d: %d\n",
  417. policy->cpu, ret);
  418. return ret;
  419. }
  420. ret = cpufreq_table_validate_and_show(policy, freq_table);
  421. if (ret) {
  422. pr_err("%s: invalid frequency table: %d\n", __func__, ret);
  423. goto out_free_cpufreq_table;
  424. }
  425. cpumask_copy(policy->cpus, &info->cpus);
  426. policy->driver_data = info;
  427. policy->clk = info->cpu_clk;
  428. return 0;
  429. out_free_cpufreq_table:
  430. dev_pm_opp_free_cpufreq_table(info->cpu_dev, &freq_table);
  431. return ret;
  432. }
  433. static int mtk_cpufreq_exit(struct cpufreq_policy *policy)
  434. {
  435. struct mtk_cpu_dvfs_info *info = policy->driver_data;
  436. cpufreq_cooling_unregister(info->cdev);
  437. dev_pm_opp_free_cpufreq_table(info->cpu_dev, &policy->freq_table);
  438. return 0;
  439. }
  440. static struct cpufreq_driver mt8173_cpufreq_driver = {
  441. .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
  442. CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
  443. .verify = cpufreq_generic_frequency_table_verify,
  444. .target_index = mtk_cpufreq_set_target,
  445. .get = cpufreq_generic_get,
  446. .init = mtk_cpufreq_init,
  447. .exit = mtk_cpufreq_exit,
  448. .ready = mtk_cpufreq_ready,
  449. .name = "mtk-cpufreq",
  450. .attr = cpufreq_generic_attr,
  451. };
  452. static int mt8173_cpufreq_probe(struct platform_device *pdev)
  453. {
  454. struct mtk_cpu_dvfs_info *info;
  455. struct list_head *list, *tmp;
  456. int cpu, ret;
  457. for_each_possible_cpu(cpu) {
  458. info = mtk_cpu_dvfs_info_lookup(cpu);
  459. if (info)
  460. continue;
  461. info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
  462. if (!info) {
  463. ret = -ENOMEM;
  464. goto release_dvfs_info_list;
  465. }
  466. ret = mtk_cpu_dvfs_info_init(info, cpu);
  467. if (ret) {
  468. dev_err(&pdev->dev,
  469. "failed to initialize dvfs info for cpu%d\n",
  470. cpu);
  471. goto release_dvfs_info_list;
  472. }
  473. list_add(&info->list_head, &dvfs_info_list);
  474. }
  475. ret = cpufreq_register_driver(&mt8173_cpufreq_driver);
  476. if (ret) {
  477. dev_err(&pdev->dev, "failed to register mtk cpufreq driver\n");
  478. goto release_dvfs_info_list;
  479. }
  480. return 0;
  481. release_dvfs_info_list:
  482. list_for_each_safe(list, tmp, &dvfs_info_list) {
  483. info = list_entry(list, struct mtk_cpu_dvfs_info, list_head);
  484. mtk_cpu_dvfs_info_release(info);
  485. list_del(list);
  486. }
  487. return ret;
  488. }
  489. static struct platform_driver mt8173_cpufreq_platdrv = {
  490. .driver = {
  491. .name = "mt8173-cpufreq",
  492. },
  493. .probe = mt8173_cpufreq_probe,
  494. };
  495. static int mt8173_cpufreq_driver_init(void)
  496. {
  497. struct platform_device *pdev;
  498. int err;
  499. if (!of_machine_is_compatible("mediatek,mt8173"))
  500. return -ENODEV;
  501. err = platform_driver_register(&mt8173_cpufreq_platdrv);
  502. if (err)
  503. return err;
  504. /*
  505. * Since there's no place to hold device registration code and no
  506. * device tree based way to match cpufreq driver yet, both the driver
  507. * and the device registration codes are put here to handle defer
  508. * probing.
  509. */
  510. pdev = platform_device_register_simple("mt8173-cpufreq", -1, NULL, 0);
  511. if (IS_ERR(pdev)) {
  512. pr_err("failed to register mtk-cpufreq platform device\n");
  513. return PTR_ERR(pdev);
  514. }
  515. return 0;
  516. }
  517. device_initcall(mt8173_cpufreq_driver_init);