clk-generated.c 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313
  1. /*
  2. * Copyright (C) 2015 Atmel Corporation,
  3. * Nicolas Ferre <nicolas.ferre@atmel.com>
  4. *
  5. * Based on clk-programmable & clk-peripheral drivers by Boris BREZILLON.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. */
  13. #include <linux/clk-provider.h>
  14. #include <linux/clkdev.h>
  15. #include <linux/clk/at91_pmc.h>
  16. #include <linux/of.h>
  17. #include <linux/mfd/syscon.h>
  18. #include <linux/regmap.h>
  19. #include "pmc.h"
  20. #define GENERATED_MAX_DIV 255
  21. #define GCK_INDEX_DT_AUDIO_PLL 5
  22. struct clk_generated {
  23. struct clk_hw hw;
  24. struct regmap *regmap;
  25. struct clk_range range;
  26. spinlock_t *lock;
  27. u32 id;
  28. u32 gckdiv;
  29. u8 parent_id;
  30. bool audio_pll_allowed;
  31. };
  32. #define to_clk_generated(hw) \
  33. container_of(hw, struct clk_generated, hw)
  34. static int clk_generated_enable(struct clk_hw *hw)
  35. {
  36. struct clk_generated *gck = to_clk_generated(hw);
  37. unsigned long flags;
  38. pr_debug("GCLK: %s, gckdiv = %d, parent id = %d\n",
  39. __func__, gck->gckdiv, gck->parent_id);
  40. spin_lock_irqsave(gck->lock, flags);
  41. regmap_write(gck->regmap, AT91_PMC_PCR,
  42. (gck->id & AT91_PMC_PCR_PID_MASK));
  43. regmap_update_bits(gck->regmap, AT91_PMC_PCR,
  44. AT91_PMC_PCR_GCKDIV_MASK | AT91_PMC_PCR_GCKCSS_MASK |
  45. AT91_PMC_PCR_CMD | AT91_PMC_PCR_GCKEN,
  46. AT91_PMC_PCR_GCKCSS(gck->parent_id) |
  47. AT91_PMC_PCR_CMD |
  48. AT91_PMC_PCR_GCKDIV(gck->gckdiv) |
  49. AT91_PMC_PCR_GCKEN);
  50. spin_unlock_irqrestore(gck->lock, flags);
  51. return 0;
  52. }
  53. static void clk_generated_disable(struct clk_hw *hw)
  54. {
  55. struct clk_generated *gck = to_clk_generated(hw);
  56. unsigned long flags;
  57. spin_lock_irqsave(gck->lock, flags);
  58. regmap_write(gck->regmap, AT91_PMC_PCR,
  59. (gck->id & AT91_PMC_PCR_PID_MASK));
  60. regmap_update_bits(gck->regmap, AT91_PMC_PCR,
  61. AT91_PMC_PCR_CMD | AT91_PMC_PCR_GCKEN,
  62. AT91_PMC_PCR_CMD);
  63. spin_unlock_irqrestore(gck->lock, flags);
  64. }
  65. static int clk_generated_is_enabled(struct clk_hw *hw)
  66. {
  67. struct clk_generated *gck = to_clk_generated(hw);
  68. unsigned long flags;
  69. unsigned int status;
  70. spin_lock_irqsave(gck->lock, flags);
  71. regmap_write(gck->regmap, AT91_PMC_PCR,
  72. (gck->id & AT91_PMC_PCR_PID_MASK));
  73. regmap_read(gck->regmap, AT91_PMC_PCR, &status);
  74. spin_unlock_irqrestore(gck->lock, flags);
  75. return status & AT91_PMC_PCR_GCKEN ? 1 : 0;
  76. }
  77. static unsigned long
  78. clk_generated_recalc_rate(struct clk_hw *hw,
  79. unsigned long parent_rate)
  80. {
  81. struct clk_generated *gck = to_clk_generated(hw);
  82. return DIV_ROUND_CLOSEST(parent_rate, gck->gckdiv + 1);
  83. }
  84. static void clk_generated_best_diff(struct clk_rate_request *req,
  85. struct clk_hw *parent,
  86. unsigned long parent_rate, u32 div,
  87. int *best_diff, long *best_rate)
  88. {
  89. unsigned long tmp_rate;
  90. int tmp_diff;
  91. if (!div)
  92. tmp_rate = parent_rate;
  93. else
  94. tmp_rate = parent_rate / div;
  95. tmp_diff = abs(req->rate - tmp_rate);
  96. if (*best_diff < 0 || *best_diff > tmp_diff) {
  97. *best_rate = tmp_rate;
  98. *best_diff = tmp_diff;
  99. req->best_parent_rate = parent_rate;
  100. req->best_parent_hw = parent;
  101. }
  102. }
  103. static int clk_generated_determine_rate(struct clk_hw *hw,
  104. struct clk_rate_request *req)
  105. {
  106. struct clk_generated *gck = to_clk_generated(hw);
  107. struct clk_hw *parent = NULL;
  108. struct clk_rate_request req_parent = *req;
  109. long best_rate = -EINVAL;
  110. unsigned long min_rate, parent_rate;
  111. int best_diff = -1;
  112. int i;
  113. u32 div;
  114. for (i = 0; i < clk_hw_get_num_parents(hw) - 1; i++) {
  115. parent = clk_hw_get_parent_by_index(hw, i);
  116. if (!parent)
  117. continue;
  118. parent_rate = clk_hw_get_rate(parent);
  119. min_rate = DIV_ROUND_CLOSEST(parent_rate, GENERATED_MAX_DIV + 1);
  120. if (!parent_rate ||
  121. (gck->range.max && min_rate > gck->range.max))
  122. continue;
  123. div = DIV_ROUND_CLOSEST(parent_rate, req->rate);
  124. clk_generated_best_diff(req, parent, parent_rate, div,
  125. &best_diff, &best_rate);
  126. if (!best_diff)
  127. break;
  128. }
  129. /*
  130. * The audio_pll rate can be modified, unlike the five others clocks
  131. * that should never be altered.
  132. * The audio_pll can technically be used by multiple consumers. However,
  133. * with the rate locking, the first consumer to enable to clock will be
  134. * the one definitely setting the rate of the clock.
  135. * Since audio IPs are most likely to request the same rate, we enforce
  136. * that the only clks able to modify gck rate are those of audio IPs.
  137. */
  138. if (!gck->audio_pll_allowed)
  139. goto end;
  140. parent = clk_hw_get_parent_by_index(hw, GCK_INDEX_DT_AUDIO_PLL);
  141. if (!parent)
  142. goto end;
  143. for (div = 1; div < GENERATED_MAX_DIV + 2; div++) {
  144. req_parent.rate = req->rate * div;
  145. __clk_determine_rate(parent, &req_parent);
  146. clk_generated_best_diff(req, parent, req_parent.rate, div,
  147. &best_diff, &best_rate);
  148. if (!best_diff)
  149. break;
  150. }
  151. end:
  152. pr_debug("GCLK: %s, best_rate = %ld, parent clk: %s @ %ld\n",
  153. __func__, best_rate,
  154. __clk_get_name((req->best_parent_hw)->clk),
  155. req->best_parent_rate);
  156. if (best_rate < 0)
  157. return best_rate;
  158. req->rate = best_rate;
  159. return 0;
  160. }
  161. /* No modification of hardware as we have the flag CLK_SET_PARENT_GATE set */
  162. static int clk_generated_set_parent(struct clk_hw *hw, u8 index)
  163. {
  164. struct clk_generated *gck = to_clk_generated(hw);
  165. if (index >= clk_hw_get_num_parents(hw))
  166. return -EINVAL;
  167. gck->parent_id = index;
  168. return 0;
  169. }
  170. static u8 clk_generated_get_parent(struct clk_hw *hw)
  171. {
  172. struct clk_generated *gck = to_clk_generated(hw);
  173. return gck->parent_id;
  174. }
  175. /* No modification of hardware as we have the flag CLK_SET_RATE_GATE set */
  176. static int clk_generated_set_rate(struct clk_hw *hw,
  177. unsigned long rate,
  178. unsigned long parent_rate)
  179. {
  180. struct clk_generated *gck = to_clk_generated(hw);
  181. u32 div;
  182. if (!rate)
  183. return -EINVAL;
  184. if (gck->range.max && rate > gck->range.max)
  185. return -EINVAL;
  186. div = DIV_ROUND_CLOSEST(parent_rate, rate);
  187. if (div > GENERATED_MAX_DIV + 1 || !div)
  188. return -EINVAL;
  189. gck->gckdiv = div - 1;
  190. return 0;
  191. }
  192. static const struct clk_ops generated_ops = {
  193. .enable = clk_generated_enable,
  194. .disable = clk_generated_disable,
  195. .is_enabled = clk_generated_is_enabled,
  196. .recalc_rate = clk_generated_recalc_rate,
  197. .determine_rate = clk_generated_determine_rate,
  198. .get_parent = clk_generated_get_parent,
  199. .set_parent = clk_generated_set_parent,
  200. .set_rate = clk_generated_set_rate,
  201. };
  202. /**
  203. * clk_generated_startup - Initialize a given clock to its default parent and
  204. * divisor parameter.
  205. *
  206. * @gck: Generated clock to set the startup parameters for.
  207. *
  208. * Take parameters from the hardware and update local clock configuration
  209. * accordingly.
  210. */
  211. static void clk_generated_startup(struct clk_generated *gck)
  212. {
  213. u32 tmp;
  214. unsigned long flags;
  215. spin_lock_irqsave(gck->lock, flags);
  216. regmap_write(gck->regmap, AT91_PMC_PCR,
  217. (gck->id & AT91_PMC_PCR_PID_MASK));
  218. regmap_read(gck->regmap, AT91_PMC_PCR, &tmp);
  219. spin_unlock_irqrestore(gck->lock, flags);
  220. gck->parent_id = (tmp & AT91_PMC_PCR_GCKCSS_MASK)
  221. >> AT91_PMC_PCR_GCKCSS_OFFSET;
  222. gck->gckdiv = (tmp & AT91_PMC_PCR_GCKDIV_MASK)
  223. >> AT91_PMC_PCR_GCKDIV_OFFSET;
  224. }
  225. struct clk_hw * __init
  226. at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock,
  227. const char *name, const char **parent_names,
  228. u8 num_parents, u8 id, bool pll_audio,
  229. const struct clk_range *range)
  230. {
  231. struct clk_generated *gck;
  232. struct clk_init_data init;
  233. struct clk_hw *hw;
  234. int ret;
  235. gck = kzalloc(sizeof(*gck), GFP_KERNEL);
  236. if (!gck)
  237. return ERR_PTR(-ENOMEM);
  238. init.name = name;
  239. init.ops = &generated_ops;
  240. init.parent_names = parent_names;
  241. init.num_parents = num_parents;
  242. init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
  243. CLK_SET_RATE_PARENT;
  244. gck->id = id;
  245. gck->hw.init = &init;
  246. gck->regmap = regmap;
  247. gck->lock = lock;
  248. gck->range = *range;
  249. gck->audio_pll_allowed = pll_audio;
  250. clk_generated_startup(gck);
  251. hw = &gck->hw;
  252. ret = clk_hw_register(NULL, &gck->hw);
  253. if (ret) {
  254. kfree(gck);
  255. hw = ERR_PTR(ret);
  256. } else {
  257. pmc_register_id(id);
  258. }
  259. return hw;
  260. }