ccu_nm.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231
  1. /*
  2. * Copyright (C) 2016 Maxime Ripard
  3. * Maxime Ripard <maxime.ripard@free-electrons.com>
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation; either version 2 of
  8. * the License, or (at your option) any later version.
  9. */
  10. #include <linux/clk-provider.h>
  11. #include "ccu_frac.h"
  12. #include "ccu_gate.h"
  13. #include "ccu_nm.h"
  14. struct _ccu_nm {
  15. unsigned long n, min_n, max_n;
  16. unsigned long m, min_m, max_m;
  17. };
  18. static void ccu_nm_find_best(unsigned long parent, unsigned long rate,
  19. struct _ccu_nm *nm)
  20. {
  21. unsigned long best_rate = 0;
  22. unsigned long best_n = 0, best_m = 0;
  23. unsigned long _n, _m;
  24. for (_n = nm->min_n; _n <= nm->max_n; _n++) {
  25. for (_m = nm->min_m; _m <= nm->max_m; _m++) {
  26. unsigned long tmp_rate = parent * _n / _m;
  27. if (tmp_rate > rate)
  28. continue;
  29. if ((rate - tmp_rate) < (rate - best_rate)) {
  30. best_rate = tmp_rate;
  31. best_n = _n;
  32. best_m = _m;
  33. }
  34. }
  35. }
  36. nm->n = best_n;
  37. nm->m = best_m;
  38. }
  39. static void ccu_nm_disable(struct clk_hw *hw)
  40. {
  41. struct ccu_nm *nm = hw_to_ccu_nm(hw);
  42. return ccu_gate_helper_disable(&nm->common, nm->enable);
  43. }
  44. static int ccu_nm_enable(struct clk_hw *hw)
  45. {
  46. struct ccu_nm *nm = hw_to_ccu_nm(hw);
  47. return ccu_gate_helper_enable(&nm->common, nm->enable);
  48. }
  49. static int ccu_nm_is_enabled(struct clk_hw *hw)
  50. {
  51. struct ccu_nm *nm = hw_to_ccu_nm(hw);
  52. return ccu_gate_helper_is_enabled(&nm->common, nm->enable);
  53. }
  54. static unsigned long ccu_nm_recalc_rate(struct clk_hw *hw,
  55. unsigned long parent_rate)
  56. {
  57. struct ccu_nm *nm = hw_to_ccu_nm(hw);
  58. unsigned long rate;
  59. unsigned long n, m;
  60. u32 reg;
  61. if (ccu_frac_helper_is_enabled(&nm->common, &nm->frac)) {
  62. rate = ccu_frac_helper_read_rate(&nm->common, &nm->frac);
  63. if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
  64. rate /= nm->fixed_post_div;
  65. return rate;
  66. }
  67. reg = readl(nm->common.base + nm->common.reg);
  68. n = reg >> nm->n.shift;
  69. n &= (1 << nm->n.width) - 1;
  70. n += nm->n.offset;
  71. if (!n)
  72. n++;
  73. m = reg >> nm->m.shift;
  74. m &= (1 << nm->m.width) - 1;
  75. m += nm->m.offset;
  76. if (!m)
  77. m++;
  78. if (ccu_sdm_helper_is_enabled(&nm->common, &nm->sdm))
  79. rate = ccu_sdm_helper_read_rate(&nm->common, &nm->sdm, m, n);
  80. else
  81. rate = parent_rate * n / m;
  82. if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
  83. rate /= nm->fixed_post_div;
  84. return rate;
  85. }
  86. static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long rate,
  87. unsigned long *parent_rate)
  88. {
  89. struct ccu_nm *nm = hw_to_ccu_nm(hw);
  90. struct _ccu_nm _nm;
  91. if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
  92. rate *= nm->fixed_post_div;
  93. if (rate < nm->min_rate) {
  94. rate = nm->min_rate;
  95. if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
  96. rate /= nm->fixed_post_div;
  97. return rate;
  98. }
  99. if (nm->max_rate && rate > nm->max_rate) {
  100. rate = nm->max_rate;
  101. if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
  102. rate /= nm->fixed_post_div;
  103. return rate;
  104. }
  105. if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate)) {
  106. if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
  107. rate /= nm->fixed_post_div;
  108. return rate;
  109. }
  110. if (ccu_sdm_helper_has_rate(&nm->common, &nm->sdm, rate)) {
  111. if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
  112. rate /= nm->fixed_post_div;
  113. return rate;
  114. }
  115. _nm.min_n = nm->n.min ?: 1;
  116. _nm.max_n = nm->n.max ?: 1 << nm->n.width;
  117. _nm.min_m = 1;
  118. _nm.max_m = nm->m.max ?: 1 << nm->m.width;
  119. ccu_nm_find_best(*parent_rate, rate, &_nm);
  120. rate = *parent_rate * _nm.n / _nm.m;
  121. if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
  122. rate /= nm->fixed_post_div;
  123. return rate;
  124. }
  125. static int ccu_nm_set_rate(struct clk_hw *hw, unsigned long rate,
  126. unsigned long parent_rate)
  127. {
  128. struct ccu_nm *nm = hw_to_ccu_nm(hw);
  129. struct _ccu_nm _nm;
  130. unsigned long flags;
  131. u32 reg;
  132. /* Adjust target rate according to post-dividers */
  133. if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
  134. rate = rate * nm->fixed_post_div;
  135. if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate)) {
  136. spin_lock_irqsave(nm->common.lock, flags);
  137. /* most SoCs require M to be 0 if fractional mode is used */
  138. reg = readl(nm->common.base + nm->common.reg);
  139. reg &= ~GENMASK(nm->m.width + nm->m.shift - 1, nm->m.shift);
  140. writel(reg, nm->common.base + nm->common.reg);
  141. spin_unlock_irqrestore(nm->common.lock, flags);
  142. ccu_frac_helper_enable(&nm->common, &nm->frac);
  143. return ccu_frac_helper_set_rate(&nm->common, &nm->frac,
  144. rate, nm->lock);
  145. } else {
  146. ccu_frac_helper_disable(&nm->common, &nm->frac);
  147. }
  148. _nm.min_n = nm->n.min ?: 1;
  149. _nm.max_n = nm->n.max ?: 1 << nm->n.width;
  150. _nm.min_m = 1;
  151. _nm.max_m = nm->m.max ?: 1 << nm->m.width;
  152. if (ccu_sdm_helper_has_rate(&nm->common, &nm->sdm, rate)) {
  153. ccu_sdm_helper_enable(&nm->common, &nm->sdm, rate);
  154. /* Sigma delta modulation requires specific N and M factors */
  155. ccu_sdm_helper_get_factors(&nm->common, &nm->sdm, rate,
  156. &_nm.m, &_nm.n);
  157. } else {
  158. ccu_sdm_helper_disable(&nm->common, &nm->sdm);
  159. ccu_nm_find_best(parent_rate, rate, &_nm);
  160. }
  161. spin_lock_irqsave(nm->common.lock, flags);
  162. reg = readl(nm->common.base + nm->common.reg);
  163. reg &= ~GENMASK(nm->n.width + nm->n.shift - 1, nm->n.shift);
  164. reg &= ~GENMASK(nm->m.width + nm->m.shift - 1, nm->m.shift);
  165. reg |= (_nm.n - nm->n.offset) << nm->n.shift;
  166. reg |= (_nm.m - nm->m.offset) << nm->m.shift;
  167. writel(reg, nm->common.base + nm->common.reg);
  168. spin_unlock_irqrestore(nm->common.lock, flags);
  169. ccu_helper_wait_for_lock(&nm->common, nm->lock);
  170. return 0;
  171. }
  172. const struct clk_ops ccu_nm_ops = {
  173. .disable = ccu_nm_disable,
  174. .enable = ccu_nm_enable,
  175. .is_enabled = ccu_nm_is_enabled,
  176. .recalc_rate = ccu_nm_recalc_rate,
  177. .round_rate = ccu_nm_round_rate,
  178. .set_rate = ccu_nm_set_rate,
  179. };