common.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254
  1. /*
  2. * Marvell EBU SoC common clock handling
  3. *
  4. * Copyright (C) 2012 Marvell
  5. *
  6. * Gregory CLEMENT <gregory.clement@free-electrons.com>
  7. * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
  8. * Andrew Lunn <andrew@lunn.ch>
  9. *
  10. * This file is licensed under the terms of the GNU General Public
  11. * License version 2. This program is licensed "as is" without any
  12. * warranty of any kind, whether express or implied.
  13. */
  14. #include <linux/kernel.h>
  15. #include <linux/clk.h>
  16. #include <linux/clkdev.h>
  17. #include <linux/clk-provider.h>
  18. #include <linux/io.h>
  19. #include <linux/of.h>
  20. #include <linux/of_address.h>
  21. #include "common.h"
  22. /*
  23. * Core Clocks
  24. */
  25. #define SSCG_CONF_MODE(reg) (((reg) >> 16) & 0x3)
  26. #define SSCG_SPREAD_DOWN 0x0
  27. #define SSCG_SPREAD_UP 0x1
  28. #define SSCG_SPREAD_CENTRAL 0x2
  29. #define SSCG_CONF_LOW(reg) (((reg) >> 8) & 0xFF)
  30. #define SSCG_CONF_HIGH(reg) ((reg) & 0xFF)
  31. static struct clk_onecell_data clk_data;
  32. /*
  33. * This function can be used by the Kirkwood, the Armada 370, the
  34. * Armada XP and the Armada 375 SoC. The name of the function was
  35. * chosen following the dt convention: using the first known SoC
  36. * compatible with it.
  37. */
  38. u32 kirkwood_fix_sscg_deviation(u32 system_clk)
  39. {
  40. struct device_node *sscg_np = NULL;
  41. void __iomem *sscg_map;
  42. u32 sscg_reg;
  43. s32 low_bound, high_bound;
  44. u64 freq_swing_half;
  45. sscg_np = of_find_node_by_name(NULL, "sscg");
  46. if (sscg_np == NULL) {
  47. pr_err("cannot get SSCG register node\n");
  48. return system_clk;
  49. }
  50. sscg_map = of_iomap(sscg_np, 0);
  51. if (sscg_map == NULL) {
  52. pr_err("cannot map SSCG register\n");
  53. goto out;
  54. }
  55. sscg_reg = readl(sscg_map);
  56. high_bound = SSCG_CONF_HIGH(sscg_reg);
  57. low_bound = SSCG_CONF_LOW(sscg_reg);
  58. if ((high_bound - low_bound) <= 0)
  59. goto out;
  60. /*
  61. * From Marvell engineer we got the following formula (when
  62. * this code was written, the datasheet was erroneous)
  63. * Spread percentage = 1/96 * (H - L) / H
  64. * H = SSCG_High_Boundary
  65. * L = SSCG_Low_Boundary
  66. *
  67. * As the deviation is half of spread then it lead to the
  68. * following formula in the code.
  69. *
  70. * To avoid an overflow and not lose any significant digit in
  71. * the same time we have to use a 64 bit integer.
  72. */
  73. freq_swing_half = (((u64)high_bound - (u64)low_bound)
  74. * (u64)system_clk);
  75. do_div(freq_swing_half, (2 * 96 * high_bound));
  76. switch (SSCG_CONF_MODE(sscg_reg)) {
  77. case SSCG_SPREAD_DOWN:
  78. system_clk -= freq_swing_half;
  79. break;
  80. case SSCG_SPREAD_UP:
  81. system_clk += freq_swing_half;
  82. break;
  83. case SSCG_SPREAD_CENTRAL:
  84. default:
  85. break;
  86. }
  87. iounmap(sscg_map);
  88. out:
  89. of_node_put(sscg_np);
  90. return system_clk;
  91. }
  92. void __init mvebu_coreclk_setup(struct device_node *np,
  93. const struct coreclk_soc_desc *desc)
  94. {
  95. const char *tclk_name = "tclk";
  96. const char *cpuclk_name = "cpuclk";
  97. void __iomem *base;
  98. unsigned long rate;
  99. int n;
  100. base = of_iomap(np, 0);
  101. if (WARN_ON(!base))
  102. return;
  103. /* Allocate struct for TCLK, cpu clk, and core ratio clocks */
  104. clk_data.clk_num = 2 + desc->num_ratios;
  105. clk_data.clks = kzalloc(clk_data.clk_num * sizeof(struct clk *),
  106. GFP_KERNEL);
  107. if (WARN_ON(!clk_data.clks)) {
  108. iounmap(base);
  109. return;
  110. }
  111. /* Register TCLK */
  112. of_property_read_string_index(np, "clock-output-names", 0,
  113. &tclk_name);
  114. rate = desc->get_tclk_freq(base);
  115. clk_data.clks[0] = clk_register_fixed_rate(NULL, tclk_name, NULL,
  116. CLK_IS_ROOT, rate);
  117. WARN_ON(IS_ERR(clk_data.clks[0]));
  118. /* Register CPU clock */
  119. of_property_read_string_index(np, "clock-output-names", 1,
  120. &cpuclk_name);
  121. rate = desc->get_cpu_freq(base);
  122. if (desc->is_sscg_enabled && desc->fix_sscg_deviation
  123. && desc->is_sscg_enabled(base))
  124. rate = desc->fix_sscg_deviation(rate);
  125. clk_data.clks[1] = clk_register_fixed_rate(NULL, cpuclk_name, NULL,
  126. CLK_IS_ROOT, rate);
  127. WARN_ON(IS_ERR(clk_data.clks[1]));
  128. /* Register fixed-factor clocks derived from CPU clock */
  129. for (n = 0; n < desc->num_ratios; n++) {
  130. const char *rclk_name = desc->ratios[n].name;
  131. int mult, div;
  132. of_property_read_string_index(np, "clock-output-names",
  133. 2+n, &rclk_name);
  134. desc->get_clk_ratio(base, desc->ratios[n].id, &mult, &div);
  135. clk_data.clks[2+n] = clk_register_fixed_factor(NULL, rclk_name,
  136. cpuclk_name, 0, mult, div);
  137. WARN_ON(IS_ERR(clk_data.clks[2+n]));
  138. };
  139. /* SAR register isn't needed anymore */
  140. iounmap(base);
  141. of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
  142. }
  143. /*
  144. * Clock Gating Control
  145. */
  146. DEFINE_SPINLOCK(ctrl_gating_lock);
  147. struct clk_gating_ctrl {
  148. spinlock_t *lock;
  149. struct clk **gates;
  150. int num_gates;
  151. };
  152. #define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
  153. static struct clk *clk_gating_get_src(
  154. struct of_phandle_args *clkspec, void *data)
  155. {
  156. struct clk_gating_ctrl *ctrl = (struct clk_gating_ctrl *)data;
  157. int n;
  158. if (clkspec->args_count < 1)
  159. return ERR_PTR(-EINVAL);
  160. for (n = 0; n < ctrl->num_gates; n++) {
  161. struct clk_gate *gate =
  162. to_clk_gate(__clk_get_hw(ctrl->gates[n]));
  163. if (clkspec->args[0] == gate->bit_idx)
  164. return ctrl->gates[n];
  165. }
  166. return ERR_PTR(-ENODEV);
  167. }
  168. void __init mvebu_clk_gating_setup(struct device_node *np,
  169. const struct clk_gating_soc_desc *desc)
  170. {
  171. struct clk_gating_ctrl *ctrl;
  172. struct clk *clk;
  173. void __iomem *base;
  174. const char *default_parent = NULL;
  175. int n;
  176. base = of_iomap(np, 0);
  177. if (WARN_ON(!base))
  178. return;
  179. clk = of_clk_get(np, 0);
  180. if (!IS_ERR(clk)) {
  181. default_parent = __clk_get_name(clk);
  182. clk_put(clk);
  183. }
  184. ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
  185. if (WARN_ON(!ctrl))
  186. goto ctrl_out;
  187. /* lock must already be initialized */
  188. ctrl->lock = &ctrl_gating_lock;
  189. /* Count, allocate, and register clock gates */
  190. for (n = 0; desc[n].name;)
  191. n++;
  192. ctrl->num_gates = n;
  193. ctrl->gates = kzalloc(ctrl->num_gates * sizeof(struct clk *),
  194. GFP_KERNEL);
  195. if (WARN_ON(!ctrl->gates))
  196. goto gates_out;
  197. for (n = 0; n < ctrl->num_gates; n++) {
  198. const char *parent =
  199. (desc[n].parent) ? desc[n].parent : default_parent;
  200. ctrl->gates[n] = clk_register_gate(NULL, desc[n].name, parent,
  201. desc[n].flags, base, desc[n].bit_idx,
  202. 0, ctrl->lock);
  203. WARN_ON(IS_ERR(ctrl->gates[n]));
  204. }
  205. of_clk_add_provider(np, clk_gating_get_src, ctrl);
  206. return;
  207. gates_out:
  208. kfree(ctrl);
  209. ctrl_out:
  210. iounmap(base);
  211. }