clk-cpu.c 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248
  1. /*
  2. * Marvell MVEBU CPU clock handling.
  3. *
  4. * Copyright (C) 2012 Marvell
  5. *
  6. * Gregory CLEMENT <gregory.clement@free-electrons.com>
  7. *
  8. * This file is licensed under the terms of the GNU General Public
  9. * License version 2. This program is licensed "as is" without any
  10. * warranty of any kind, whether express or implied.
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/clkdev.h>
  14. #include <linux/clk-provider.h>
  15. #include <linux/of_address.h>
  16. #include <linux/io.h>
  17. #include <linux/of.h>
  18. #include <linux/delay.h>
  19. #include <linux/mvebu-pmsu.h>
  20. #include <asm/smp_plat.h>
  21. #define SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET 0x0
  22. #define SYS_CTRL_CLK_DIVIDER_CTRL_RESET_ALL 0xff
  23. #define SYS_CTRL_CLK_DIVIDER_CTRL_RESET_SHIFT 8
  24. #define SYS_CTRL_CLK_DIVIDER_CTRL2_OFFSET 0x8
  25. #define SYS_CTRL_CLK_DIVIDER_CTRL2_NBCLK_RATIO_SHIFT 16
  26. #define SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET 0xC
  27. #define SYS_CTRL_CLK_DIVIDER_MASK 0x3F
  28. #define PMU_DFS_RATIO_SHIFT 16
  29. #define PMU_DFS_RATIO_MASK 0x3F
  30. #define MAX_CPU 4
  31. struct cpu_clk {
  32. struct clk_hw hw;
  33. int cpu;
  34. const char *clk_name;
  35. const char *parent_name;
  36. void __iomem *reg_base;
  37. void __iomem *pmu_dfs;
  38. };
  39. static struct clk **clks;
  40. static struct clk_onecell_data clk_data;
  41. #define to_cpu_clk(p) container_of(p, struct cpu_clk, hw)
  42. static unsigned long clk_cpu_recalc_rate(struct clk_hw *hwclk,
  43. unsigned long parent_rate)
  44. {
  45. struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
  46. u32 reg, div;
  47. reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET);
  48. div = (reg >> (cpuclk->cpu * 8)) & SYS_CTRL_CLK_DIVIDER_MASK;
  49. return parent_rate / div;
  50. }
  51. static long clk_cpu_round_rate(struct clk_hw *hwclk, unsigned long rate,
  52. unsigned long *parent_rate)
  53. {
  54. /* Valid ratio are 1:1, 1:2 and 1:3 */
  55. u32 div;
  56. div = *parent_rate / rate;
  57. if (div == 0)
  58. div = 1;
  59. else if (div > 3)
  60. div = 3;
  61. return *parent_rate / div;
  62. }
  63. static int clk_cpu_off_set_rate(struct clk_hw *hwclk, unsigned long rate,
  64. unsigned long parent_rate)
  65. {
  66. struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
  67. u32 reg, div;
  68. u32 reload_mask;
  69. div = parent_rate / rate;
  70. reg = (readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET)
  71. & (~(SYS_CTRL_CLK_DIVIDER_MASK << (cpuclk->cpu * 8))))
  72. | (div << (cpuclk->cpu * 8));
  73. writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET);
  74. /* Set clock divider reload smooth bit mask */
  75. reload_mask = 1 << (20 + cpuclk->cpu);
  76. reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET)
  77. | reload_mask;
  78. writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
  79. /* Now trigger the clock update */
  80. reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET)
  81. | 1 << 24;
  82. writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
  83. /* Wait for clocks to settle down then clear reload request */
  84. udelay(1000);
  85. reg &= ~(reload_mask | 1 << 24);
  86. writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
  87. udelay(1000);
  88. return 0;
  89. }
  90. static int clk_cpu_on_set_rate(struct clk_hw *hwclk, unsigned long rate,
  91. unsigned long parent_rate)
  92. {
  93. u32 reg;
  94. unsigned long fabric_div, target_div, cur_rate;
  95. struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
  96. /*
  97. * PMU DFS registers are not mapped, Device Tree does not
  98. * describes them. We cannot change the frequency dynamically.
  99. */
  100. if (!cpuclk->pmu_dfs)
  101. return -ENODEV;
  102. cur_rate = __clk_get_rate(hwclk->clk);
  103. reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL2_OFFSET);
  104. fabric_div = (reg >> SYS_CTRL_CLK_DIVIDER_CTRL2_NBCLK_RATIO_SHIFT) &
  105. SYS_CTRL_CLK_DIVIDER_MASK;
  106. /* Frequency is going up */
  107. if (rate == 2 * cur_rate)
  108. target_div = fabric_div / 2;
  109. /* Frequency is going down */
  110. else
  111. target_div = fabric_div;
  112. if (target_div == 0)
  113. target_div = 1;
  114. reg = readl(cpuclk->pmu_dfs);
  115. reg &= ~(PMU_DFS_RATIO_MASK << PMU_DFS_RATIO_SHIFT);
  116. reg |= (target_div << PMU_DFS_RATIO_SHIFT);
  117. writel(reg, cpuclk->pmu_dfs);
  118. reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
  119. reg |= (SYS_CTRL_CLK_DIVIDER_CTRL_RESET_ALL <<
  120. SYS_CTRL_CLK_DIVIDER_CTRL_RESET_SHIFT);
  121. writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
  122. return mvebu_pmsu_dfs_request(cpuclk->cpu);
  123. }
  124. static int clk_cpu_set_rate(struct clk_hw *hwclk, unsigned long rate,
  125. unsigned long parent_rate)
  126. {
  127. if (__clk_is_enabled(hwclk->clk))
  128. return clk_cpu_on_set_rate(hwclk, rate, parent_rate);
  129. else
  130. return clk_cpu_off_set_rate(hwclk, rate, parent_rate);
  131. }
  132. static const struct clk_ops cpu_ops = {
  133. .recalc_rate = clk_cpu_recalc_rate,
  134. .round_rate = clk_cpu_round_rate,
  135. .set_rate = clk_cpu_set_rate,
  136. };
  137. static void __init of_cpu_clk_setup(struct device_node *node)
  138. {
  139. struct cpu_clk *cpuclk;
  140. void __iomem *clock_complex_base = of_iomap(node, 0);
  141. void __iomem *pmu_dfs_base = of_iomap(node, 1);
  142. int ncpus = 0;
  143. struct device_node *dn;
  144. if (clock_complex_base == NULL) {
  145. pr_err("%s: clock-complex base register not set\n",
  146. __func__);
  147. return;
  148. }
  149. if (pmu_dfs_base == NULL)
  150. pr_warn("%s: pmu-dfs base register not set, dynamic frequency scaling not available\n",
  151. __func__);
  152. for_each_node_by_type(dn, "cpu")
  153. ncpus++;
  154. cpuclk = kzalloc(ncpus * sizeof(*cpuclk), GFP_KERNEL);
  155. if (WARN_ON(!cpuclk))
  156. goto cpuclk_out;
  157. clks = kzalloc(ncpus * sizeof(*clks), GFP_KERNEL);
  158. if (WARN_ON(!clks))
  159. goto clks_out;
  160. for_each_node_by_type(dn, "cpu") {
  161. struct clk_init_data init;
  162. struct clk *clk;
  163. struct clk *parent_clk;
  164. char *clk_name = kzalloc(5, GFP_KERNEL);
  165. int cpu, err;
  166. if (WARN_ON(!clk_name))
  167. goto bail_out;
  168. err = of_property_read_u32(dn, "reg", &cpu);
  169. if (WARN_ON(err))
  170. goto bail_out;
  171. sprintf(clk_name, "cpu%d", cpu);
  172. parent_clk = of_clk_get(node, 0);
  173. cpuclk[cpu].parent_name = __clk_get_name(parent_clk);
  174. cpuclk[cpu].clk_name = clk_name;
  175. cpuclk[cpu].cpu = cpu;
  176. cpuclk[cpu].reg_base = clock_complex_base;
  177. if (pmu_dfs_base)
  178. cpuclk[cpu].pmu_dfs = pmu_dfs_base + 4 * cpu;
  179. cpuclk[cpu].hw.init = &init;
  180. init.name = cpuclk[cpu].clk_name;
  181. init.ops = &cpu_ops;
  182. init.flags = 0;
  183. init.parent_names = &cpuclk[cpu].parent_name;
  184. init.num_parents = 1;
  185. clk = clk_register(NULL, &cpuclk[cpu].hw);
  186. if (WARN_ON(IS_ERR(clk)))
  187. goto bail_out;
  188. clks[cpu] = clk;
  189. }
  190. clk_data.clk_num = MAX_CPU;
  191. clk_data.clks = clks;
  192. of_clk_add_provider(node, of_clk_src_onecell_get, &clk_data);
  193. return;
  194. bail_out:
  195. kfree(clks);
  196. while(ncpus--)
  197. kfree(cpuclk[ncpus].clk_name);
  198. clks_out:
  199. kfree(cpuclk);
  200. cpuclk_out:
  201. iounmap(clock_complex_base);
  202. }
  203. CLK_OF_DECLARE(armada_xp_cpu_clock, "marvell,armada-xp-cpu-clock",
  204. of_cpu_clk_setup);