armada-37xx-periph.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Marvell Armada 37xx SoC Peripheral clocks
  4. *
  5. * Copyright (C) 2016 Marvell
  6. *
  7. * Gregory CLEMENT <gregory.clement@free-electrons.com>
  8. *
  9. * Most of the peripheral clocks can be modelled like this:
  10. * _____ _______ _______
  11. * TBG-A-P --| | | | | | ______
  12. * TBG-B-P --| Mux |--| /div1 |--| /div2 |--| Gate |--> perip_clk
  13. * TBG-A-S --| | | | | | |______|
  14. * TBG-B-S --|_____| |_______| |_______|
  15. *
  16. * However some clocks may use only one or two block or and use the
  17. * xtal clock as parent.
  18. */
  19. #include <linux/clk-provider.h>
  20. #include <linux/mfd/syscon.h>
  21. #include <linux/of.h>
  22. #include <linux/of_device.h>
  23. #include <linux/platform_device.h>
  24. #include <linux/regmap.h>
  25. #include <linux/slab.h>
  26. #define TBG_SEL 0x0
  27. #define DIV_SEL0 0x4
  28. #define DIV_SEL1 0x8
  29. #define DIV_SEL2 0xC
  30. #define CLK_SEL 0x10
  31. #define CLK_DIS 0x14
  32. #define ARMADA_37XX_DVFS_LOAD_1 1
  33. #define LOAD_LEVEL_NR 4
  34. #define ARMADA_37XX_NB_L0L1 0x18
  35. #define ARMADA_37XX_NB_L2L3 0x1C
  36. #define ARMADA_37XX_NB_TBG_DIV_OFF 13
  37. #define ARMADA_37XX_NB_TBG_DIV_MASK 0x7
  38. #define ARMADA_37XX_NB_CLK_SEL_OFF 11
  39. #define ARMADA_37XX_NB_CLK_SEL_MASK 0x1
  40. #define ARMADA_37XX_NB_TBG_SEL_OFF 9
  41. #define ARMADA_37XX_NB_TBG_SEL_MASK 0x3
  42. #define ARMADA_37XX_NB_CONFIG_SHIFT 16
  43. #define ARMADA_37XX_NB_DYN_MOD 0x24
  44. #define ARMADA_37XX_NB_DFS_EN 31
  45. #define ARMADA_37XX_NB_CPU_LOAD 0x30
  46. #define ARMADA_37XX_NB_CPU_LOAD_MASK 0x3
  47. #define ARMADA_37XX_DVFS_LOAD_0 0
  48. #define ARMADA_37XX_DVFS_LOAD_1 1
  49. #define ARMADA_37XX_DVFS_LOAD_2 2
  50. #define ARMADA_37XX_DVFS_LOAD_3 3
  51. struct clk_periph_driver_data {
  52. struct clk_hw_onecell_data *hw_data;
  53. spinlock_t lock;
  54. };
  55. struct clk_double_div {
  56. struct clk_hw hw;
  57. void __iomem *reg1;
  58. u8 shift1;
  59. void __iomem *reg2;
  60. u8 shift2;
  61. };
  62. struct clk_pm_cpu {
  63. struct clk_hw hw;
  64. void __iomem *reg_mux;
  65. u8 shift_mux;
  66. u32 mask_mux;
  67. void __iomem *reg_div;
  68. u8 shift_div;
  69. struct regmap *nb_pm_base;
  70. };
  71. #define to_clk_double_div(_hw) container_of(_hw, struct clk_double_div, hw)
  72. #define to_clk_pm_cpu(_hw) container_of(_hw, struct clk_pm_cpu, hw)
  73. struct clk_periph_data {
  74. const char *name;
  75. const char * const *parent_names;
  76. int num_parents;
  77. struct clk_hw *mux_hw;
  78. struct clk_hw *rate_hw;
  79. struct clk_hw *gate_hw;
  80. struct clk_hw *muxrate_hw;
  81. bool is_double_div;
  82. };
  83. static const struct clk_div_table clk_table6[] = {
  84. { .val = 1, .div = 1, },
  85. { .val = 2, .div = 2, },
  86. { .val = 3, .div = 3, },
  87. { .val = 4, .div = 4, },
  88. { .val = 5, .div = 5, },
  89. { .val = 6, .div = 6, },
  90. { .val = 0, .div = 0, }, /* last entry */
  91. };
  92. static const struct clk_div_table clk_table1[] = {
  93. { .val = 0, .div = 1, },
  94. { .val = 1, .div = 2, },
  95. { .val = 0, .div = 0, }, /* last entry */
  96. };
  97. static const struct clk_div_table clk_table2[] = {
  98. { .val = 0, .div = 2, },
  99. { .val = 1, .div = 4, },
  100. { .val = 0, .div = 0, }, /* last entry */
  101. };
  102. static const struct clk_ops clk_double_div_ops;
  103. static const struct clk_ops clk_pm_cpu_ops;
  104. #define PERIPH_GATE(_name, _bit) \
  105. struct clk_gate gate_##_name = { \
  106. .reg = (void *)CLK_DIS, \
  107. .bit_idx = _bit, \
  108. .hw.init = &(struct clk_init_data){ \
  109. .ops = &clk_gate_ops, \
  110. } \
  111. };
  112. #define PERIPH_MUX(_name, _shift) \
  113. struct clk_mux mux_##_name = { \
  114. .reg = (void *)TBG_SEL, \
  115. .shift = _shift, \
  116. .mask = 3, \
  117. .hw.init = &(struct clk_init_data){ \
  118. .ops = &clk_mux_ro_ops, \
  119. } \
  120. };
  121. #define PERIPH_DOUBLEDIV(_name, _reg1, _reg2, _shift1, _shift2) \
  122. struct clk_double_div rate_##_name = { \
  123. .reg1 = (void *)_reg1, \
  124. .reg2 = (void *)_reg2, \
  125. .shift1 = _shift1, \
  126. .shift2 = _shift2, \
  127. .hw.init = &(struct clk_init_data){ \
  128. .ops = &clk_double_div_ops, \
  129. } \
  130. };
  131. #define PERIPH_DIV(_name, _reg, _shift, _table) \
  132. struct clk_divider rate_##_name = { \
  133. .reg = (void *)_reg, \
  134. .table = _table, \
  135. .shift = _shift, \
  136. .hw.init = &(struct clk_init_data){ \
  137. .ops = &clk_divider_ro_ops, \
  138. } \
  139. };
  140. #define PERIPH_PM_CPU(_name, _shift1, _reg, _shift2) \
  141. struct clk_pm_cpu muxrate_##_name = { \
  142. .reg_mux = (void *)TBG_SEL, \
  143. .mask_mux = 3, \
  144. .shift_mux = _shift1, \
  145. .reg_div = (void *)_reg, \
  146. .shift_div = _shift2, \
  147. .hw.init = &(struct clk_init_data){ \
  148. .ops = &clk_pm_cpu_ops, \
  149. } \
  150. };
  151. #define PERIPH_CLK_FULL_DD(_name, _bit, _shift, _reg1, _reg2, _shift1, _shift2)\
  152. static PERIPH_GATE(_name, _bit); \
  153. static PERIPH_MUX(_name, _shift); \
  154. static PERIPH_DOUBLEDIV(_name, _reg1, _reg2, _shift1, _shift2);
  155. #define PERIPH_CLK_FULL(_name, _bit, _shift, _reg, _shift1, _table) \
  156. static PERIPH_GATE(_name, _bit); \
  157. static PERIPH_MUX(_name, _shift); \
  158. static PERIPH_DIV(_name, _reg, _shift1, _table);
  159. #define PERIPH_CLK_GATE_DIV(_name, _bit, _reg, _shift, _table) \
  160. static PERIPH_GATE(_name, _bit); \
  161. static PERIPH_DIV(_name, _reg, _shift, _table);
  162. #define PERIPH_CLK_MUX_DD(_name, _shift, _reg1, _reg2, _shift1, _shift2)\
  163. static PERIPH_MUX(_name, _shift); \
  164. static PERIPH_DOUBLEDIV(_name, _reg1, _reg2, _shift1, _shift2);
  165. #define REF_CLK_FULL(_name) \
  166. { .name = #_name, \
  167. .parent_names = (const char *[]){ "TBG-A-P", \
  168. "TBG-B-P", "TBG-A-S", "TBG-B-S"}, \
  169. .num_parents = 4, \
  170. .mux_hw = &mux_##_name.hw, \
  171. .gate_hw = &gate_##_name.hw, \
  172. .rate_hw = &rate_##_name.hw, \
  173. }
  174. #define REF_CLK_FULL_DD(_name) \
  175. { .name = #_name, \
  176. .parent_names = (const char *[]){ "TBG-A-P", \
  177. "TBG-B-P", "TBG-A-S", "TBG-B-S"}, \
  178. .num_parents = 4, \
  179. .mux_hw = &mux_##_name.hw, \
  180. .gate_hw = &gate_##_name.hw, \
  181. .rate_hw = &rate_##_name.hw, \
  182. .is_double_div = true, \
  183. }
  184. #define REF_CLK_GATE(_name, _parent_name) \
  185. { .name = #_name, \
  186. .parent_names = (const char *[]){ _parent_name}, \
  187. .num_parents = 1, \
  188. .gate_hw = &gate_##_name.hw, \
  189. }
  190. #define REF_CLK_GATE_DIV(_name, _parent_name) \
  191. { .name = #_name, \
  192. .parent_names = (const char *[]){ _parent_name}, \
  193. .num_parents = 1, \
  194. .gate_hw = &gate_##_name.hw, \
  195. .rate_hw = &rate_##_name.hw, \
  196. }
  197. #define REF_CLK_PM_CPU(_name) \
  198. { .name = #_name, \
  199. .parent_names = (const char *[]){ "TBG-A-P", \
  200. "TBG-B-P", "TBG-A-S", "TBG-B-S"}, \
  201. .num_parents = 4, \
  202. .muxrate_hw = &muxrate_##_name.hw, \
  203. }
  204. #define REF_CLK_MUX_DD(_name) \
  205. { .name = #_name, \
  206. .parent_names = (const char *[]){ "TBG-A-P", \
  207. "TBG-B-P", "TBG-A-S", "TBG-B-S"}, \
  208. .num_parents = 4, \
  209. .mux_hw = &mux_##_name.hw, \
  210. .rate_hw = &rate_##_name.hw, \
  211. .is_double_div = true, \
  212. }
  213. /* NB periph clocks */
  214. PERIPH_CLK_FULL_DD(mmc, 2, 0, DIV_SEL2, DIV_SEL2, 16, 13);
  215. PERIPH_CLK_FULL_DD(sata_host, 3, 2, DIV_SEL2, DIV_SEL2, 10, 7);
  216. PERIPH_CLK_FULL_DD(sec_at, 6, 4, DIV_SEL1, DIV_SEL1, 3, 0);
  217. PERIPH_CLK_FULL_DD(sec_dap, 7, 6, DIV_SEL1, DIV_SEL1, 9, 6);
  218. PERIPH_CLK_FULL_DD(tscem, 8, 8, DIV_SEL1, DIV_SEL1, 15, 12);
  219. PERIPH_CLK_FULL(tscem_tmx, 10, 10, DIV_SEL1, 18, clk_table6);
  220. static PERIPH_GATE(avs, 11);
  221. PERIPH_CLK_FULL_DD(pwm, 13, 14, DIV_SEL0, DIV_SEL0, 3, 0);
  222. PERIPH_CLK_FULL_DD(sqf, 12, 12, DIV_SEL1, DIV_SEL1, 27, 24);
  223. static PERIPH_GATE(i2c_2, 16);
  224. static PERIPH_GATE(i2c_1, 17);
  225. PERIPH_CLK_GATE_DIV(ddr_phy, 19, DIV_SEL0, 18, clk_table2);
  226. PERIPH_CLK_FULL_DD(ddr_fclk, 21, 16, DIV_SEL0, DIV_SEL0, 15, 12);
  227. PERIPH_CLK_FULL(trace, 22, 18, DIV_SEL0, 20, clk_table6);
  228. PERIPH_CLK_FULL(counter, 23, 20, DIV_SEL0, 23, clk_table6);
  229. PERIPH_CLK_FULL_DD(eip97, 24, 24, DIV_SEL2, DIV_SEL2, 22, 19);
  230. static PERIPH_PM_CPU(cpu, 22, DIV_SEL0, 28);
  231. static struct clk_periph_data data_nb[] = {
  232. REF_CLK_FULL_DD(mmc),
  233. REF_CLK_FULL_DD(sata_host),
  234. REF_CLK_FULL_DD(sec_at),
  235. REF_CLK_FULL_DD(sec_dap),
  236. REF_CLK_FULL_DD(tscem),
  237. REF_CLK_FULL(tscem_tmx),
  238. REF_CLK_GATE(avs, "xtal"),
  239. REF_CLK_FULL_DD(sqf),
  240. REF_CLK_FULL_DD(pwm),
  241. REF_CLK_GATE(i2c_2, "xtal"),
  242. REF_CLK_GATE(i2c_1, "xtal"),
  243. REF_CLK_GATE_DIV(ddr_phy, "TBG-A-S"),
  244. REF_CLK_FULL_DD(ddr_fclk),
  245. REF_CLK_FULL(trace),
  246. REF_CLK_FULL(counter),
  247. REF_CLK_FULL_DD(eip97),
  248. REF_CLK_PM_CPU(cpu),
  249. { },
  250. };
  251. /* SB periph clocks */
  252. PERIPH_CLK_MUX_DD(gbe_50, 6, DIV_SEL2, DIV_SEL2, 6, 9);
  253. PERIPH_CLK_MUX_DD(gbe_core, 8, DIV_SEL1, DIV_SEL1, 18, 21);
  254. PERIPH_CLK_MUX_DD(gbe_125, 10, DIV_SEL1, DIV_SEL1, 6, 9);
  255. static PERIPH_GATE(gbe1_50, 0);
  256. static PERIPH_GATE(gbe0_50, 1);
  257. static PERIPH_GATE(gbe1_125, 2);
  258. static PERIPH_GATE(gbe0_125, 3);
  259. PERIPH_CLK_GATE_DIV(gbe1_core, 4, DIV_SEL1, 13, clk_table1);
  260. PERIPH_CLK_GATE_DIV(gbe0_core, 5, DIV_SEL1, 14, clk_table1);
  261. PERIPH_CLK_GATE_DIV(gbe_bm, 12, DIV_SEL1, 0, clk_table1);
  262. PERIPH_CLK_FULL_DD(sdio, 11, 14, DIV_SEL0, DIV_SEL0, 3, 6);
  263. PERIPH_CLK_FULL_DD(usb32_usb2_sys, 16, 16, DIV_SEL0, DIV_SEL0, 9, 12);
  264. PERIPH_CLK_FULL_DD(usb32_ss_sys, 17, 18, DIV_SEL0, DIV_SEL0, 15, 18);
  265. static struct clk_periph_data data_sb[] = {
  266. REF_CLK_MUX_DD(gbe_50),
  267. REF_CLK_MUX_DD(gbe_core),
  268. REF_CLK_MUX_DD(gbe_125),
  269. REF_CLK_GATE(gbe1_50, "gbe_50"),
  270. REF_CLK_GATE(gbe0_50, "gbe_50"),
  271. REF_CLK_GATE(gbe1_125, "gbe_125"),
  272. REF_CLK_GATE(gbe0_125, "gbe_125"),
  273. REF_CLK_GATE_DIV(gbe1_core, "gbe_core"),
  274. REF_CLK_GATE_DIV(gbe0_core, "gbe_core"),
  275. REF_CLK_GATE_DIV(gbe_bm, "gbe_core"),
  276. REF_CLK_FULL_DD(sdio),
  277. REF_CLK_FULL_DD(usb32_usb2_sys),
  278. REF_CLK_FULL_DD(usb32_ss_sys),
  279. { },
  280. };
  281. static unsigned int get_div(void __iomem *reg, int shift)
  282. {
  283. u32 val;
  284. val = (readl(reg) >> shift) & 0x7;
  285. if (val > 6)
  286. return 0;
  287. return val;
  288. }
  289. static unsigned long clk_double_div_recalc_rate(struct clk_hw *hw,
  290. unsigned long parent_rate)
  291. {
  292. struct clk_double_div *double_div = to_clk_double_div(hw);
  293. unsigned int div;
  294. div = get_div(double_div->reg1, double_div->shift1);
  295. div *= get_div(double_div->reg2, double_div->shift2);
  296. return DIV_ROUND_UP_ULL((u64)parent_rate, div);
  297. }
  298. static const struct clk_ops clk_double_div_ops = {
  299. .recalc_rate = clk_double_div_recalc_rate,
  300. };
  301. static void armada_3700_pm_dvfs_update_regs(unsigned int load_level,
  302. unsigned int *reg,
  303. unsigned int *offset)
  304. {
  305. if (load_level <= ARMADA_37XX_DVFS_LOAD_1)
  306. *reg = ARMADA_37XX_NB_L0L1;
  307. else
  308. *reg = ARMADA_37XX_NB_L2L3;
  309. if (load_level == ARMADA_37XX_DVFS_LOAD_0 ||
  310. load_level == ARMADA_37XX_DVFS_LOAD_2)
  311. *offset += ARMADA_37XX_NB_CONFIG_SHIFT;
  312. }
  313. static bool armada_3700_pm_dvfs_is_enabled(struct regmap *base)
  314. {
  315. unsigned int val, reg = ARMADA_37XX_NB_DYN_MOD;
  316. if (IS_ERR(base))
  317. return false;
  318. regmap_read(base, reg, &val);
  319. return !!(val & BIT(ARMADA_37XX_NB_DFS_EN));
  320. }
  321. static unsigned int armada_3700_pm_dvfs_get_cpu_div(struct regmap *base)
  322. {
  323. unsigned int reg = ARMADA_37XX_NB_CPU_LOAD;
  324. unsigned int offset = ARMADA_37XX_NB_TBG_DIV_OFF;
  325. unsigned int load_level, div;
  326. /*
  327. * This function is always called after the function
  328. * armada_3700_pm_dvfs_is_enabled, so no need to check again
  329. * if the base is valid.
  330. */
  331. regmap_read(base, reg, &load_level);
  332. /*
  333. * The register and the offset inside this register accessed to
  334. * read the current divider depend on the load level
  335. */
  336. load_level &= ARMADA_37XX_NB_CPU_LOAD_MASK;
  337. armada_3700_pm_dvfs_update_regs(load_level, &reg, &offset);
  338. regmap_read(base, reg, &div);
  339. return (div >> offset) & ARMADA_37XX_NB_TBG_DIV_MASK;
  340. }
  341. static unsigned int armada_3700_pm_dvfs_get_cpu_parent(struct regmap *base)
  342. {
  343. unsigned int reg = ARMADA_37XX_NB_CPU_LOAD;
  344. unsigned int offset = ARMADA_37XX_NB_TBG_SEL_OFF;
  345. unsigned int load_level, sel;
  346. /*
  347. * This function is always called after the function
  348. * armada_3700_pm_dvfs_is_enabled, so no need to check again
  349. * if the base is valid
  350. */
  351. regmap_read(base, reg, &load_level);
  352. /*
  353. * The register and the offset inside this register accessed to
  354. * read the current divider depend on the load level
  355. */
  356. load_level &= ARMADA_37XX_NB_CPU_LOAD_MASK;
  357. armada_3700_pm_dvfs_update_regs(load_level, &reg, &offset);
  358. regmap_read(base, reg, &sel);
  359. return (sel >> offset) & ARMADA_37XX_NB_TBG_SEL_MASK;
  360. }
  361. static u8 clk_pm_cpu_get_parent(struct clk_hw *hw)
  362. {
  363. struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
  364. u32 val;
  365. if (armada_3700_pm_dvfs_is_enabled(pm_cpu->nb_pm_base)) {
  366. val = armada_3700_pm_dvfs_get_cpu_parent(pm_cpu->nb_pm_base);
  367. } else {
  368. val = readl(pm_cpu->reg_mux) >> pm_cpu->shift_mux;
  369. val &= pm_cpu->mask_mux;
  370. }
  371. return val;
  372. }
  373. static int clk_pm_cpu_set_parent(struct clk_hw *hw, u8 index)
  374. {
  375. struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
  376. struct regmap *base = pm_cpu->nb_pm_base;
  377. int load_level;
  378. /*
  379. * We set the clock parent only if the DVFS is available but
  380. * not enabled.
  381. */
  382. if (IS_ERR(base) || armada_3700_pm_dvfs_is_enabled(base))
  383. return -EINVAL;
  384. /* Set the parent clock for all the load level */
  385. for (load_level = 0; load_level < LOAD_LEVEL_NR; load_level++) {
  386. unsigned int reg, mask, val,
  387. offset = ARMADA_37XX_NB_TBG_SEL_OFF;
  388. armada_3700_pm_dvfs_update_regs(load_level, &reg, &offset);
  389. val = index << offset;
  390. mask = ARMADA_37XX_NB_TBG_SEL_MASK << offset;
  391. regmap_update_bits(base, reg, mask, val);
  392. }
  393. return 0;
  394. }
  395. static unsigned long clk_pm_cpu_recalc_rate(struct clk_hw *hw,
  396. unsigned long parent_rate)
  397. {
  398. struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
  399. unsigned int div;
  400. if (armada_3700_pm_dvfs_is_enabled(pm_cpu->nb_pm_base))
  401. div = armada_3700_pm_dvfs_get_cpu_div(pm_cpu->nb_pm_base);
  402. else
  403. div = get_div(pm_cpu->reg_div, pm_cpu->shift_div);
  404. return DIV_ROUND_UP_ULL((u64)parent_rate, div);
  405. }
  406. static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
  407. unsigned long *parent_rate)
  408. {
  409. struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
  410. struct regmap *base = pm_cpu->nb_pm_base;
  411. unsigned int div = *parent_rate / rate;
  412. unsigned int load_level;
  413. /* only available when DVFS is enabled */
  414. if (!armada_3700_pm_dvfs_is_enabled(base))
  415. return -EINVAL;
  416. for (load_level = 0; load_level < LOAD_LEVEL_NR; load_level++) {
  417. unsigned int reg, val, offset = ARMADA_37XX_NB_TBG_DIV_OFF;
  418. armada_3700_pm_dvfs_update_regs(load_level, &reg, &offset);
  419. regmap_read(base, reg, &val);
  420. val >>= offset;
  421. val &= ARMADA_37XX_NB_TBG_DIV_MASK;
  422. if (val == div)
  423. /*
  424. * We found a load level matching the target
  425. * divider, switch to this load level and
  426. * return.
  427. */
  428. return *parent_rate / div;
  429. }
  430. /* We didn't find any valid divider */
  431. return -EINVAL;
  432. }
  433. /*
  434. * Switching the CPU from the L2 or L3 frequencies (300 and 200 Mhz
  435. * respectively) to L0 frequency (1.2 Ghz) requires a significant
  436. * amount of time to let VDD stabilize to the appropriate
  437. * voltage. This amount of time is large enough that it cannot be
  438. * covered by the hardware countdown register. Due to this, the CPU
  439. * might start operating at L0 before the voltage is stabilized,
  440. * leading to CPU stalls.
  441. *
  442. * To work around this problem, we prevent switching directly from the
  443. * L2/L3 frequencies to the L0 frequency, and instead switch to the L1
  444. * frequency in-between. The sequence therefore becomes:
  445. * 1. First switch from L2/L3(200/300MHz) to L1(600MHZ)
  446. * 2. Sleep 20ms for stabling VDD voltage
  447. * 3. Then switch from L1(600MHZ) to L0(1200Mhz).
  448. */
  449. static void clk_pm_cpu_set_rate_wa(unsigned long rate, struct regmap *base)
  450. {
  451. unsigned int cur_level;
  452. if (rate != 1200 * 1000 * 1000)
  453. return;
  454. regmap_read(base, ARMADA_37XX_NB_CPU_LOAD, &cur_level);
  455. cur_level &= ARMADA_37XX_NB_CPU_LOAD_MASK;
  456. if (cur_level <= ARMADA_37XX_DVFS_LOAD_1)
  457. return;
  458. regmap_update_bits(base, ARMADA_37XX_NB_CPU_LOAD,
  459. ARMADA_37XX_NB_CPU_LOAD_MASK,
  460. ARMADA_37XX_DVFS_LOAD_1);
  461. msleep(20);
  462. }
  463. static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
  464. unsigned long parent_rate)
  465. {
  466. struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
  467. struct regmap *base = pm_cpu->nb_pm_base;
  468. unsigned int div = parent_rate / rate;
  469. unsigned int load_level;
  470. /* only available when DVFS is enabled */
  471. if (!armada_3700_pm_dvfs_is_enabled(base))
  472. return -EINVAL;
  473. for (load_level = 0; load_level < LOAD_LEVEL_NR; load_level++) {
  474. unsigned int reg, mask, val,
  475. offset = ARMADA_37XX_NB_TBG_DIV_OFF;
  476. armada_3700_pm_dvfs_update_regs(load_level, &reg, &offset);
  477. regmap_read(base, reg, &val);
  478. val >>= offset;
  479. val &= ARMADA_37XX_NB_TBG_DIV_MASK;
  480. if (val == div) {
  481. /*
  482. * We found a load level matching the target
  483. * divider, switch to this load level and
  484. * return.
  485. */
  486. reg = ARMADA_37XX_NB_CPU_LOAD;
  487. mask = ARMADA_37XX_NB_CPU_LOAD_MASK;
  488. clk_pm_cpu_set_rate_wa(rate, base);
  489. regmap_update_bits(base, reg, mask, load_level);
  490. return rate;
  491. }
  492. }
  493. /* We didn't find any valid divider */
  494. return -EINVAL;
  495. }
  496. static const struct clk_ops clk_pm_cpu_ops = {
  497. .get_parent = clk_pm_cpu_get_parent,
  498. .set_parent = clk_pm_cpu_set_parent,
  499. .round_rate = clk_pm_cpu_round_rate,
  500. .set_rate = clk_pm_cpu_set_rate,
  501. .recalc_rate = clk_pm_cpu_recalc_rate,
  502. };
  503. static const struct of_device_id armada_3700_periph_clock_of_match[] = {
  504. { .compatible = "marvell,armada-3700-periph-clock-nb",
  505. .data = data_nb, },
  506. { .compatible = "marvell,armada-3700-periph-clock-sb",
  507. .data = data_sb, },
  508. { }
  509. };
  510. static int armada_3700_add_composite_clk(const struct clk_periph_data *data,
  511. void __iomem *reg, spinlock_t *lock,
  512. struct device *dev, struct clk_hw **hw)
  513. {
  514. const struct clk_ops *mux_ops = NULL, *gate_ops = NULL,
  515. *rate_ops = NULL;
  516. struct clk_hw *mux_hw = NULL, *gate_hw = NULL, *rate_hw = NULL;
  517. if (data->mux_hw) {
  518. struct clk_mux *mux;
  519. mux_hw = data->mux_hw;
  520. mux = to_clk_mux(mux_hw);
  521. mux->lock = lock;
  522. mux_ops = mux_hw->init->ops;
  523. mux->reg = reg + (u64)mux->reg;
  524. }
  525. if (data->gate_hw) {
  526. struct clk_gate *gate;
  527. gate_hw = data->gate_hw;
  528. gate = to_clk_gate(gate_hw);
  529. gate->lock = lock;
  530. gate_ops = gate_hw->init->ops;
  531. gate->reg = reg + (u64)gate->reg;
  532. gate->flags = CLK_GATE_SET_TO_DISABLE;
  533. }
  534. if (data->rate_hw) {
  535. rate_hw = data->rate_hw;
  536. rate_ops = rate_hw->init->ops;
  537. if (data->is_double_div) {
  538. struct clk_double_div *rate;
  539. rate = to_clk_double_div(rate_hw);
  540. rate->reg1 = reg + (u64)rate->reg1;
  541. rate->reg2 = reg + (u64)rate->reg2;
  542. } else {
  543. struct clk_divider *rate = to_clk_divider(rate_hw);
  544. const struct clk_div_table *clkt;
  545. int table_size = 0;
  546. rate->reg = reg + (u64)rate->reg;
  547. for (clkt = rate->table; clkt->div; clkt++)
  548. table_size++;
  549. rate->width = order_base_2(table_size);
  550. rate->lock = lock;
  551. }
  552. }
  553. if (data->muxrate_hw) {
  554. struct clk_pm_cpu *pmcpu_clk;
  555. struct clk_hw *muxrate_hw = data->muxrate_hw;
  556. struct regmap *map;
  557. pmcpu_clk = to_clk_pm_cpu(muxrate_hw);
  558. pmcpu_clk->reg_mux = reg + (u64)pmcpu_clk->reg_mux;
  559. pmcpu_clk->reg_div = reg + (u64)pmcpu_clk->reg_div;
  560. mux_hw = muxrate_hw;
  561. rate_hw = muxrate_hw;
  562. mux_ops = muxrate_hw->init->ops;
  563. rate_ops = muxrate_hw->init->ops;
  564. map = syscon_regmap_lookup_by_compatible(
  565. "marvell,armada-3700-nb-pm");
  566. pmcpu_clk->nb_pm_base = map;
  567. }
  568. *hw = clk_hw_register_composite(dev, data->name, data->parent_names,
  569. data->num_parents, mux_hw,
  570. mux_ops, rate_hw, rate_ops,
  571. gate_hw, gate_ops, CLK_IGNORE_UNUSED);
  572. return PTR_ERR_OR_ZERO(*hw);
  573. }
  574. static int armada_3700_periph_clock_probe(struct platform_device *pdev)
  575. {
  576. struct clk_periph_driver_data *driver_data;
  577. struct device_node *np = pdev->dev.of_node;
  578. const struct clk_periph_data *data;
  579. struct device *dev = &pdev->dev;
  580. int num_periph = 0, i, ret;
  581. struct resource *res;
  582. void __iomem *reg;
  583. data = of_device_get_match_data(dev);
  584. if (!data)
  585. return -ENODEV;
  586. while (data[num_periph].name)
  587. num_periph++;
  588. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  589. reg = devm_ioremap_resource(dev, res);
  590. if (IS_ERR(reg))
  591. return PTR_ERR(reg);
  592. driver_data = devm_kzalloc(dev, sizeof(*driver_data), GFP_KERNEL);
  593. if (!driver_data)
  594. return -ENOMEM;
  595. driver_data->hw_data = devm_kzalloc(dev,
  596. struct_size(driver_data->hw_data,
  597. hws, num_periph),
  598. GFP_KERNEL);
  599. if (!driver_data->hw_data)
  600. return -ENOMEM;
  601. driver_data->hw_data->num = num_periph;
  602. spin_lock_init(&driver_data->lock);
  603. for (i = 0; i < num_periph; i++) {
  604. struct clk_hw **hw = &driver_data->hw_data->hws[i];
  605. if (armada_3700_add_composite_clk(&data[i], reg,
  606. &driver_data->lock, dev, hw))
  607. dev_err(dev, "Can't register periph clock %s\n",
  608. data[i].name);
  609. }
  610. ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
  611. driver_data->hw_data);
  612. if (ret) {
  613. for (i = 0; i < num_periph; i++)
  614. clk_hw_unregister(driver_data->hw_data->hws[i]);
  615. return ret;
  616. }
  617. platform_set_drvdata(pdev, driver_data);
  618. return 0;
  619. }
  620. static int armada_3700_periph_clock_remove(struct platform_device *pdev)
  621. {
  622. struct clk_periph_driver_data *data = platform_get_drvdata(pdev);
  623. struct clk_hw_onecell_data *hw_data = data->hw_data;
  624. int i;
  625. of_clk_del_provider(pdev->dev.of_node);
  626. for (i = 0; i < hw_data->num; i++)
  627. clk_hw_unregister(hw_data->hws[i]);
  628. return 0;
  629. }
  630. static struct platform_driver armada_3700_periph_clock_driver = {
  631. .probe = armada_3700_periph_clock_probe,
  632. .remove = armada_3700_periph_clock_remove,
  633. .driver = {
  634. .name = "marvell-armada-3700-periph-clock",
  635. .of_match_table = armada_3700_periph_clock_of_match,
  636. },
  637. };
  638. builtin_platform_driver(armada_3700_periph_clock_driver);