ccu_nm.c 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224
  1. /*
  2. * Copyright (C) 2016 Maxime Ripard
  3. * Maxime Ripard <maxime.ripard@free-electrons.com>
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation; either version 2 of
  8. * the License, or (at your option) any later version.
  9. */
  10. #include <linux/clk-provider.h>
  11. #include "ccu_frac.h"
  12. #include "ccu_gate.h"
  13. #include "ccu_nm.h"
  14. struct _ccu_nm {
  15. unsigned long n, min_n, max_n;
  16. unsigned long m, min_m, max_m;
  17. };
  18. static void ccu_nm_find_best(unsigned long parent, unsigned long rate,
  19. struct _ccu_nm *nm)
  20. {
  21. unsigned long best_rate = 0;
  22. unsigned long best_n = 0, best_m = 0;
  23. unsigned long _n, _m;
  24. for (_n = nm->min_n; _n <= nm->max_n; _n++) {
  25. for (_m = nm->min_m; _m <= nm->max_m; _m++) {
  26. unsigned long tmp_rate = parent * _n / _m;
  27. if (tmp_rate > rate)
  28. continue;
  29. if ((rate - tmp_rate) < (rate - best_rate)) {
  30. best_rate = tmp_rate;
  31. best_n = _n;
  32. best_m = _m;
  33. }
  34. }
  35. }
  36. nm->n = best_n;
  37. nm->m = best_m;
  38. }
  39. static void ccu_nm_disable(struct clk_hw *hw)
  40. {
  41. struct ccu_nm *nm = hw_to_ccu_nm(hw);
  42. return ccu_gate_helper_disable(&nm->common, nm->enable);
  43. }
  44. static int ccu_nm_enable(struct clk_hw *hw)
  45. {
  46. struct ccu_nm *nm = hw_to_ccu_nm(hw);
  47. return ccu_gate_helper_enable(&nm->common, nm->enable);
  48. }
  49. static int ccu_nm_is_enabled(struct clk_hw *hw)
  50. {
  51. struct ccu_nm *nm = hw_to_ccu_nm(hw);
  52. return ccu_gate_helper_is_enabled(&nm->common, nm->enable);
  53. }
  54. static unsigned long ccu_nm_recalc_rate(struct clk_hw *hw,
  55. unsigned long parent_rate)
  56. {
  57. struct ccu_nm *nm = hw_to_ccu_nm(hw);
  58. unsigned long rate;
  59. unsigned long n, m;
  60. u32 reg;
  61. if (ccu_frac_helper_is_enabled(&nm->common, &nm->frac)) {
  62. rate = ccu_frac_helper_read_rate(&nm->common, &nm->frac);
  63. if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
  64. rate /= nm->fixed_post_div;
  65. return rate;
  66. }
  67. reg = readl(nm->common.base + nm->common.reg);
  68. n = reg >> nm->n.shift;
  69. n &= (1 << nm->n.width) - 1;
  70. n += nm->n.offset;
  71. if (!n)
  72. n++;
  73. m = reg >> nm->m.shift;
  74. m &= (1 << nm->m.width) - 1;
  75. m += nm->m.offset;
  76. if (!m)
  77. m++;
  78. if (ccu_sdm_helper_is_enabled(&nm->common, &nm->sdm))
  79. rate = ccu_sdm_helper_read_rate(&nm->common, &nm->sdm, m, n);
  80. else
  81. rate = parent_rate * n / m;
  82. if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
  83. rate /= nm->fixed_post_div;
  84. return rate;
  85. }
  86. static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long rate,
  87. unsigned long *parent_rate)
  88. {
  89. struct ccu_nm *nm = hw_to_ccu_nm(hw);
  90. struct _ccu_nm _nm;
  91. if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
  92. rate *= nm->fixed_post_div;
  93. if (rate < nm->min_rate) {
  94. rate = nm->min_rate;
  95. if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
  96. rate /= nm->fixed_post_div;
  97. return rate;
  98. }
  99. if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate)) {
  100. if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
  101. rate /= nm->fixed_post_div;
  102. return rate;
  103. }
  104. if (ccu_sdm_helper_has_rate(&nm->common, &nm->sdm, rate)) {
  105. if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
  106. rate /= nm->fixed_post_div;
  107. return rate;
  108. }
  109. _nm.min_n = nm->n.min ?: 1;
  110. _nm.max_n = nm->n.max ?: 1 << nm->n.width;
  111. _nm.min_m = 1;
  112. _nm.max_m = nm->m.max ?: 1 << nm->m.width;
  113. ccu_nm_find_best(*parent_rate, rate, &_nm);
  114. rate = *parent_rate * _nm.n / _nm.m;
  115. if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
  116. rate /= nm->fixed_post_div;
  117. return rate;
  118. }
  119. static int ccu_nm_set_rate(struct clk_hw *hw, unsigned long rate,
  120. unsigned long parent_rate)
  121. {
  122. struct ccu_nm *nm = hw_to_ccu_nm(hw);
  123. struct _ccu_nm _nm;
  124. unsigned long flags;
  125. u32 reg;
  126. /* Adjust target rate according to post-dividers */
  127. if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
  128. rate = rate * nm->fixed_post_div;
  129. if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate)) {
  130. spin_lock_irqsave(nm->common.lock, flags);
  131. /* most SoCs require M to be 0 if fractional mode is used */
  132. reg = readl(nm->common.base + nm->common.reg);
  133. reg &= ~GENMASK(nm->m.width + nm->m.shift - 1, nm->m.shift);
  134. writel(reg, nm->common.base + nm->common.reg);
  135. spin_unlock_irqrestore(nm->common.lock, flags);
  136. ccu_frac_helper_enable(&nm->common, &nm->frac);
  137. return ccu_frac_helper_set_rate(&nm->common, &nm->frac,
  138. rate, nm->lock);
  139. } else {
  140. ccu_frac_helper_disable(&nm->common, &nm->frac);
  141. }
  142. _nm.min_n = nm->n.min ?: 1;
  143. _nm.max_n = nm->n.max ?: 1 << nm->n.width;
  144. _nm.min_m = 1;
  145. _nm.max_m = nm->m.max ?: 1 << nm->m.width;
  146. if (ccu_sdm_helper_has_rate(&nm->common, &nm->sdm, rate)) {
  147. ccu_sdm_helper_enable(&nm->common, &nm->sdm, rate);
  148. /* Sigma delta modulation requires specific N and M factors */
  149. ccu_sdm_helper_get_factors(&nm->common, &nm->sdm, rate,
  150. &_nm.m, &_nm.n);
  151. } else {
  152. ccu_sdm_helper_disable(&nm->common, &nm->sdm);
  153. ccu_nm_find_best(parent_rate, rate, &_nm);
  154. }
  155. spin_lock_irqsave(nm->common.lock, flags);
  156. reg = readl(nm->common.base + nm->common.reg);
  157. reg &= ~GENMASK(nm->n.width + nm->n.shift - 1, nm->n.shift);
  158. reg &= ~GENMASK(nm->m.width + nm->m.shift - 1, nm->m.shift);
  159. reg |= (_nm.n - nm->n.offset) << nm->n.shift;
  160. reg |= (_nm.m - nm->m.offset) << nm->m.shift;
  161. writel(reg, nm->common.base + nm->common.reg);
  162. spin_unlock_irqrestore(nm->common.lock, flags);
  163. ccu_helper_wait_for_lock(&nm->common, nm->lock);
  164. return 0;
  165. }
  166. const struct clk_ops ccu_nm_ops = {
  167. .disable = ccu_nm_disable,
  168. .enable = ccu_nm_enable,
  169. .is_enabled = ccu_nm_is_enabled,
  170. .recalc_rate = ccu_nm_recalc_rate,
  171. .round_rate = ccu_nm_round_rate,
  172. .set_rate = ccu_nm_set_rate,
  173. };