armada-37xx-periph.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Marvell Armada 37xx SoC Peripheral clocks
  4. *
  5. * Copyright (C) 2016 Marvell
  6. *
  7. * Gregory CLEMENT <gregory.clement@free-electrons.com>
  8. *
  9. * Most of the peripheral clocks can be modelled like this:
  10. * _____ _______ _______
  11. * TBG-A-P --| | | | | | ______
  12. * TBG-B-P --| Mux |--| /div1 |--| /div2 |--| Gate |--> perip_clk
  13. * TBG-A-S --| | | | | | |______|
  14. * TBG-B-S --|_____| |_______| |_______|
  15. *
  16. * However some clocks may use only one or two block or and use the
  17. * xtal clock as parent.
  18. */
  19. #include <linux/clk-provider.h>
  20. #include <linux/mfd/syscon.h>
  21. #include <linux/of.h>
  22. #include <linux/of_device.h>
  23. #include <linux/platform_device.h>
  24. #include <linux/regmap.h>
  25. #include <linux/slab.h>
  26. #define TBG_SEL 0x0
  27. #define DIV_SEL0 0x4
  28. #define DIV_SEL1 0x8
  29. #define DIV_SEL2 0xC
  30. #define CLK_SEL 0x10
  31. #define CLK_DIS 0x14
  32. #define ARMADA_37XX_DVFS_LOAD_1 1
  33. #define LOAD_LEVEL_NR 4
  34. #define ARMADA_37XX_NB_L0L1 0x18
  35. #define ARMADA_37XX_NB_L2L3 0x1C
  36. #define ARMADA_37XX_NB_TBG_DIV_OFF 13
  37. #define ARMADA_37XX_NB_TBG_DIV_MASK 0x7
  38. #define ARMADA_37XX_NB_CLK_SEL_OFF 11
  39. #define ARMADA_37XX_NB_CLK_SEL_MASK 0x1
  40. #define ARMADA_37XX_NB_TBG_SEL_OFF 9
  41. #define ARMADA_37XX_NB_TBG_SEL_MASK 0x3
  42. #define ARMADA_37XX_NB_CONFIG_SHIFT 16
  43. #define ARMADA_37XX_NB_DYN_MOD 0x24
  44. #define ARMADA_37XX_NB_DFS_EN 31
  45. #define ARMADA_37XX_NB_CPU_LOAD 0x30
  46. #define ARMADA_37XX_NB_CPU_LOAD_MASK 0x3
  47. #define ARMADA_37XX_DVFS_LOAD_0 0
  48. #define ARMADA_37XX_DVFS_LOAD_1 1
  49. #define ARMADA_37XX_DVFS_LOAD_2 2
  50. #define ARMADA_37XX_DVFS_LOAD_3 3
  51. struct clk_periph_driver_data {
  52. struct clk_hw_onecell_data *hw_data;
  53. spinlock_t lock;
  54. void __iomem *reg;
  55. /* Storage registers for suspend/resume operations */
  56. u32 tbg_sel;
  57. u32 div_sel0;
  58. u32 div_sel1;
  59. u32 div_sel2;
  60. u32 clk_sel;
  61. u32 clk_dis;
  62. };
  63. struct clk_double_div {
  64. struct clk_hw hw;
  65. void __iomem *reg1;
  66. u8 shift1;
  67. void __iomem *reg2;
  68. u8 shift2;
  69. };
  70. struct clk_pm_cpu {
  71. struct clk_hw hw;
  72. void __iomem *reg_mux;
  73. u8 shift_mux;
  74. u32 mask_mux;
  75. void __iomem *reg_div;
  76. u8 shift_div;
  77. struct regmap *nb_pm_base;
  78. };
  79. #define to_clk_double_div(_hw) container_of(_hw, struct clk_double_div, hw)
  80. #define to_clk_pm_cpu(_hw) container_of(_hw, struct clk_pm_cpu, hw)
  81. struct clk_periph_data {
  82. const char *name;
  83. const char * const *parent_names;
  84. int num_parents;
  85. struct clk_hw *mux_hw;
  86. struct clk_hw *rate_hw;
  87. struct clk_hw *gate_hw;
  88. struct clk_hw *muxrate_hw;
  89. bool is_double_div;
  90. };
  91. static const struct clk_div_table clk_table6[] = {
  92. { .val = 1, .div = 1, },
  93. { .val = 2, .div = 2, },
  94. { .val = 3, .div = 3, },
  95. { .val = 4, .div = 4, },
  96. { .val = 5, .div = 5, },
  97. { .val = 6, .div = 6, },
  98. { .val = 0, .div = 0, }, /* last entry */
  99. };
  100. static const struct clk_div_table clk_table1[] = {
  101. { .val = 0, .div = 1, },
  102. { .val = 1, .div = 2, },
  103. { .val = 0, .div = 0, }, /* last entry */
  104. };
  105. static const struct clk_div_table clk_table2[] = {
  106. { .val = 0, .div = 2, },
  107. { .val = 1, .div = 4, },
  108. { .val = 0, .div = 0, }, /* last entry */
  109. };
  110. static const struct clk_ops clk_double_div_ops;
  111. static const struct clk_ops clk_pm_cpu_ops;
  112. #define PERIPH_GATE(_name, _bit) \
  113. struct clk_gate gate_##_name = { \
  114. .reg = (void *)CLK_DIS, \
  115. .bit_idx = _bit, \
  116. .hw.init = &(struct clk_init_data){ \
  117. .ops = &clk_gate_ops, \
  118. } \
  119. };
  120. #define PERIPH_MUX(_name, _shift) \
  121. struct clk_mux mux_##_name = { \
  122. .reg = (void *)TBG_SEL, \
  123. .shift = _shift, \
  124. .mask = 3, \
  125. .hw.init = &(struct clk_init_data){ \
  126. .ops = &clk_mux_ro_ops, \
  127. } \
  128. };
  129. #define PERIPH_DOUBLEDIV(_name, _reg1, _reg2, _shift1, _shift2) \
  130. struct clk_double_div rate_##_name = { \
  131. .reg1 = (void *)_reg1, \
  132. .reg2 = (void *)_reg2, \
  133. .shift1 = _shift1, \
  134. .shift2 = _shift2, \
  135. .hw.init = &(struct clk_init_data){ \
  136. .ops = &clk_double_div_ops, \
  137. } \
  138. };
  139. #define PERIPH_DIV(_name, _reg, _shift, _table) \
  140. struct clk_divider rate_##_name = { \
  141. .reg = (void *)_reg, \
  142. .table = _table, \
  143. .shift = _shift, \
  144. .hw.init = &(struct clk_init_data){ \
  145. .ops = &clk_divider_ro_ops, \
  146. } \
  147. };
  148. #define PERIPH_PM_CPU(_name, _shift1, _reg, _shift2) \
  149. struct clk_pm_cpu muxrate_##_name = { \
  150. .reg_mux = (void *)TBG_SEL, \
  151. .mask_mux = 3, \
  152. .shift_mux = _shift1, \
  153. .reg_div = (void *)_reg, \
  154. .shift_div = _shift2, \
  155. .hw.init = &(struct clk_init_data){ \
  156. .ops = &clk_pm_cpu_ops, \
  157. } \
  158. };
  159. #define PERIPH_CLK_FULL_DD(_name, _bit, _shift, _reg1, _reg2, _shift1, _shift2)\
  160. static PERIPH_GATE(_name, _bit); \
  161. static PERIPH_MUX(_name, _shift); \
  162. static PERIPH_DOUBLEDIV(_name, _reg1, _reg2, _shift1, _shift2);
  163. #define PERIPH_CLK_FULL(_name, _bit, _shift, _reg, _shift1, _table) \
  164. static PERIPH_GATE(_name, _bit); \
  165. static PERIPH_MUX(_name, _shift); \
  166. static PERIPH_DIV(_name, _reg, _shift1, _table);
  167. #define PERIPH_CLK_GATE_DIV(_name, _bit, _reg, _shift, _table) \
  168. static PERIPH_GATE(_name, _bit); \
  169. static PERIPH_DIV(_name, _reg, _shift, _table);
  170. #define PERIPH_CLK_MUX_DD(_name, _shift, _reg1, _reg2, _shift1, _shift2)\
  171. static PERIPH_MUX(_name, _shift); \
  172. static PERIPH_DOUBLEDIV(_name, _reg1, _reg2, _shift1, _shift2);
  173. #define REF_CLK_FULL(_name) \
  174. { .name = #_name, \
  175. .parent_names = (const char *[]){ "TBG-A-P", \
  176. "TBG-B-P", "TBG-A-S", "TBG-B-S"}, \
  177. .num_parents = 4, \
  178. .mux_hw = &mux_##_name.hw, \
  179. .gate_hw = &gate_##_name.hw, \
  180. .rate_hw = &rate_##_name.hw, \
  181. }
  182. #define REF_CLK_FULL_DD(_name) \
  183. { .name = #_name, \
  184. .parent_names = (const char *[]){ "TBG-A-P", \
  185. "TBG-B-P", "TBG-A-S", "TBG-B-S"}, \
  186. .num_parents = 4, \
  187. .mux_hw = &mux_##_name.hw, \
  188. .gate_hw = &gate_##_name.hw, \
  189. .rate_hw = &rate_##_name.hw, \
  190. .is_double_div = true, \
  191. }
  192. #define REF_CLK_GATE(_name, _parent_name) \
  193. { .name = #_name, \
  194. .parent_names = (const char *[]){ _parent_name}, \
  195. .num_parents = 1, \
  196. .gate_hw = &gate_##_name.hw, \
  197. }
  198. #define REF_CLK_GATE_DIV(_name, _parent_name) \
  199. { .name = #_name, \
  200. .parent_names = (const char *[]){ _parent_name}, \
  201. .num_parents = 1, \
  202. .gate_hw = &gate_##_name.hw, \
  203. .rate_hw = &rate_##_name.hw, \
  204. }
  205. #define REF_CLK_PM_CPU(_name) \
  206. { .name = #_name, \
  207. .parent_names = (const char *[]){ "TBG-A-P", \
  208. "TBG-B-P", "TBG-A-S", "TBG-B-S"}, \
  209. .num_parents = 4, \
  210. .muxrate_hw = &muxrate_##_name.hw, \
  211. }
  212. #define REF_CLK_MUX_DD(_name) \
  213. { .name = #_name, \
  214. .parent_names = (const char *[]){ "TBG-A-P", \
  215. "TBG-B-P", "TBG-A-S", "TBG-B-S"}, \
  216. .num_parents = 4, \
  217. .mux_hw = &mux_##_name.hw, \
  218. .rate_hw = &rate_##_name.hw, \
  219. .is_double_div = true, \
  220. }
  221. /* NB periph clocks */
  222. PERIPH_CLK_FULL_DD(mmc, 2, 0, DIV_SEL2, DIV_SEL2, 16, 13);
  223. PERIPH_CLK_FULL_DD(sata_host, 3, 2, DIV_SEL2, DIV_SEL2, 10, 7);
  224. PERIPH_CLK_FULL_DD(sec_at, 6, 4, DIV_SEL1, DIV_SEL1, 3, 0);
  225. PERIPH_CLK_FULL_DD(sec_dap, 7, 6, DIV_SEL1, DIV_SEL1, 9, 6);
  226. PERIPH_CLK_FULL_DD(tscem, 8, 8, DIV_SEL1, DIV_SEL1, 15, 12);
  227. PERIPH_CLK_FULL(tscem_tmx, 10, 10, DIV_SEL1, 18, clk_table6);
  228. static PERIPH_GATE(avs, 11);
  229. PERIPH_CLK_FULL_DD(pwm, 13, 14, DIV_SEL0, DIV_SEL0, 3, 0);
  230. PERIPH_CLK_FULL_DD(sqf, 12, 12, DIV_SEL1, DIV_SEL1, 27, 24);
  231. static PERIPH_GATE(i2c_2, 16);
  232. static PERIPH_GATE(i2c_1, 17);
  233. PERIPH_CLK_GATE_DIV(ddr_phy, 19, DIV_SEL0, 18, clk_table2);
  234. PERIPH_CLK_FULL_DD(ddr_fclk, 21, 16, DIV_SEL0, DIV_SEL0, 15, 12);
  235. PERIPH_CLK_FULL(trace, 22, 18, DIV_SEL0, 20, clk_table6);
  236. PERIPH_CLK_FULL(counter, 23, 20, DIV_SEL0, 23, clk_table6);
  237. PERIPH_CLK_FULL_DD(eip97, 24, 24, DIV_SEL2, DIV_SEL2, 22, 19);
  238. static PERIPH_PM_CPU(cpu, 22, DIV_SEL0, 28);
  239. static struct clk_periph_data data_nb[] = {
  240. REF_CLK_FULL_DD(mmc),
  241. REF_CLK_FULL_DD(sata_host),
  242. REF_CLK_FULL_DD(sec_at),
  243. REF_CLK_FULL_DD(sec_dap),
  244. REF_CLK_FULL_DD(tscem),
  245. REF_CLK_FULL(tscem_tmx),
  246. REF_CLK_GATE(avs, "xtal"),
  247. REF_CLK_FULL_DD(sqf),
  248. REF_CLK_FULL_DD(pwm),
  249. REF_CLK_GATE(i2c_2, "xtal"),
  250. REF_CLK_GATE(i2c_1, "xtal"),
  251. REF_CLK_GATE_DIV(ddr_phy, "TBG-A-S"),
  252. REF_CLK_FULL_DD(ddr_fclk),
  253. REF_CLK_FULL(trace),
  254. REF_CLK_FULL(counter),
  255. REF_CLK_FULL_DD(eip97),
  256. REF_CLK_PM_CPU(cpu),
  257. { },
  258. };
  259. /* SB periph clocks */
  260. PERIPH_CLK_MUX_DD(gbe_50, 6, DIV_SEL2, DIV_SEL2, 6, 9);
  261. PERIPH_CLK_MUX_DD(gbe_core, 8, DIV_SEL1, DIV_SEL1, 18, 21);
  262. PERIPH_CLK_MUX_DD(gbe_125, 10, DIV_SEL1, DIV_SEL1, 6, 9);
  263. static PERIPH_GATE(gbe1_50, 0);
  264. static PERIPH_GATE(gbe0_50, 1);
  265. static PERIPH_GATE(gbe1_125, 2);
  266. static PERIPH_GATE(gbe0_125, 3);
  267. PERIPH_CLK_GATE_DIV(gbe1_core, 4, DIV_SEL1, 13, clk_table1);
  268. PERIPH_CLK_GATE_DIV(gbe0_core, 5, DIV_SEL1, 14, clk_table1);
  269. PERIPH_CLK_GATE_DIV(gbe_bm, 12, DIV_SEL1, 0, clk_table1);
  270. PERIPH_CLK_FULL_DD(sdio, 11, 14, DIV_SEL0, DIV_SEL0, 3, 6);
  271. PERIPH_CLK_FULL_DD(usb32_usb2_sys, 16, 16, DIV_SEL0, DIV_SEL0, 9, 12);
  272. PERIPH_CLK_FULL_DD(usb32_ss_sys, 17, 18, DIV_SEL0, DIV_SEL0, 15, 18);
  273. static struct clk_periph_data data_sb[] = {
  274. REF_CLK_MUX_DD(gbe_50),
  275. REF_CLK_MUX_DD(gbe_core),
  276. REF_CLK_MUX_DD(gbe_125),
  277. REF_CLK_GATE(gbe1_50, "gbe_50"),
  278. REF_CLK_GATE(gbe0_50, "gbe_50"),
  279. REF_CLK_GATE(gbe1_125, "gbe_125"),
  280. REF_CLK_GATE(gbe0_125, "gbe_125"),
  281. REF_CLK_GATE_DIV(gbe1_core, "gbe_core"),
  282. REF_CLK_GATE_DIV(gbe0_core, "gbe_core"),
  283. REF_CLK_GATE_DIV(gbe_bm, "gbe_core"),
  284. REF_CLK_FULL_DD(sdio),
  285. REF_CLK_FULL_DD(usb32_usb2_sys),
  286. REF_CLK_FULL_DD(usb32_ss_sys),
  287. { },
  288. };
  289. static unsigned int get_div(void __iomem *reg, int shift)
  290. {
  291. u32 val;
  292. val = (readl(reg) >> shift) & 0x7;
  293. if (val > 6)
  294. return 0;
  295. return val;
  296. }
  297. static unsigned long clk_double_div_recalc_rate(struct clk_hw *hw,
  298. unsigned long parent_rate)
  299. {
  300. struct clk_double_div *double_div = to_clk_double_div(hw);
  301. unsigned int div;
  302. div = get_div(double_div->reg1, double_div->shift1);
  303. div *= get_div(double_div->reg2, double_div->shift2);
  304. return DIV_ROUND_UP_ULL((u64)parent_rate, div);
  305. }
  306. static const struct clk_ops clk_double_div_ops = {
  307. .recalc_rate = clk_double_div_recalc_rate,
  308. };
  309. static void armada_3700_pm_dvfs_update_regs(unsigned int load_level,
  310. unsigned int *reg,
  311. unsigned int *offset)
  312. {
  313. if (load_level <= ARMADA_37XX_DVFS_LOAD_1)
  314. *reg = ARMADA_37XX_NB_L0L1;
  315. else
  316. *reg = ARMADA_37XX_NB_L2L3;
  317. if (load_level == ARMADA_37XX_DVFS_LOAD_0 ||
  318. load_level == ARMADA_37XX_DVFS_LOAD_2)
  319. *offset += ARMADA_37XX_NB_CONFIG_SHIFT;
  320. }
  321. static bool armada_3700_pm_dvfs_is_enabled(struct regmap *base)
  322. {
  323. unsigned int val, reg = ARMADA_37XX_NB_DYN_MOD;
  324. if (IS_ERR(base))
  325. return false;
  326. regmap_read(base, reg, &val);
  327. return !!(val & BIT(ARMADA_37XX_NB_DFS_EN));
  328. }
  329. static unsigned int armada_3700_pm_dvfs_get_cpu_div(struct regmap *base)
  330. {
  331. unsigned int reg = ARMADA_37XX_NB_CPU_LOAD;
  332. unsigned int offset = ARMADA_37XX_NB_TBG_DIV_OFF;
  333. unsigned int load_level, div;
  334. /*
  335. * This function is always called after the function
  336. * armada_3700_pm_dvfs_is_enabled, so no need to check again
  337. * if the base is valid.
  338. */
  339. regmap_read(base, reg, &load_level);
  340. /*
  341. * The register and the offset inside this register accessed to
  342. * read the current divider depend on the load level
  343. */
  344. load_level &= ARMADA_37XX_NB_CPU_LOAD_MASK;
  345. armada_3700_pm_dvfs_update_regs(load_level, &reg, &offset);
  346. regmap_read(base, reg, &div);
  347. return (div >> offset) & ARMADA_37XX_NB_TBG_DIV_MASK;
  348. }
  349. static unsigned int armada_3700_pm_dvfs_get_cpu_parent(struct regmap *base)
  350. {
  351. unsigned int reg = ARMADA_37XX_NB_CPU_LOAD;
  352. unsigned int offset = ARMADA_37XX_NB_TBG_SEL_OFF;
  353. unsigned int load_level, sel;
  354. /*
  355. * This function is always called after the function
  356. * armada_3700_pm_dvfs_is_enabled, so no need to check again
  357. * if the base is valid
  358. */
  359. regmap_read(base, reg, &load_level);
  360. /*
  361. * The register and the offset inside this register accessed to
  362. * read the current divider depend on the load level
  363. */
  364. load_level &= ARMADA_37XX_NB_CPU_LOAD_MASK;
  365. armada_3700_pm_dvfs_update_regs(load_level, &reg, &offset);
  366. regmap_read(base, reg, &sel);
  367. return (sel >> offset) & ARMADA_37XX_NB_TBG_SEL_MASK;
  368. }
  369. static u8 clk_pm_cpu_get_parent(struct clk_hw *hw)
  370. {
  371. struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
  372. u32 val;
  373. if (armada_3700_pm_dvfs_is_enabled(pm_cpu->nb_pm_base)) {
  374. val = armada_3700_pm_dvfs_get_cpu_parent(pm_cpu->nb_pm_base);
  375. } else {
  376. val = readl(pm_cpu->reg_mux) >> pm_cpu->shift_mux;
  377. val &= pm_cpu->mask_mux;
  378. }
  379. return val;
  380. }
  381. static int clk_pm_cpu_set_parent(struct clk_hw *hw, u8 index)
  382. {
  383. struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
  384. struct regmap *base = pm_cpu->nb_pm_base;
  385. int load_level;
  386. /*
  387. * We set the clock parent only if the DVFS is available but
  388. * not enabled.
  389. */
  390. if (IS_ERR(base) || armada_3700_pm_dvfs_is_enabled(base))
  391. return -EINVAL;
  392. /* Set the parent clock for all the load level */
  393. for (load_level = 0; load_level < LOAD_LEVEL_NR; load_level++) {
  394. unsigned int reg, mask, val,
  395. offset = ARMADA_37XX_NB_TBG_SEL_OFF;
  396. armada_3700_pm_dvfs_update_regs(load_level, &reg, &offset);
  397. val = index << offset;
  398. mask = ARMADA_37XX_NB_TBG_SEL_MASK << offset;
  399. regmap_update_bits(base, reg, mask, val);
  400. }
  401. return 0;
  402. }
  403. static unsigned long clk_pm_cpu_recalc_rate(struct clk_hw *hw,
  404. unsigned long parent_rate)
  405. {
  406. struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
  407. unsigned int div;
  408. if (armada_3700_pm_dvfs_is_enabled(pm_cpu->nb_pm_base))
  409. div = armada_3700_pm_dvfs_get_cpu_div(pm_cpu->nb_pm_base);
  410. else
  411. div = get_div(pm_cpu->reg_div, pm_cpu->shift_div);
  412. return DIV_ROUND_UP_ULL((u64)parent_rate, div);
  413. }
  414. static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
  415. unsigned long *parent_rate)
  416. {
  417. struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
  418. struct regmap *base = pm_cpu->nb_pm_base;
  419. unsigned int div = *parent_rate / rate;
  420. unsigned int load_level;
  421. /* only available when DVFS is enabled */
  422. if (!armada_3700_pm_dvfs_is_enabled(base))
  423. return -EINVAL;
  424. for (load_level = 0; load_level < LOAD_LEVEL_NR; load_level++) {
  425. unsigned int reg, val, offset = ARMADA_37XX_NB_TBG_DIV_OFF;
  426. armada_3700_pm_dvfs_update_regs(load_level, &reg, &offset);
  427. regmap_read(base, reg, &val);
  428. val >>= offset;
  429. val &= ARMADA_37XX_NB_TBG_DIV_MASK;
  430. if (val == div)
  431. /*
  432. * We found a load level matching the target
  433. * divider, switch to this load level and
  434. * return.
  435. */
  436. return *parent_rate / div;
  437. }
  438. /* We didn't find any valid divider */
  439. return -EINVAL;
  440. }
  441. /*
  442. * Switching the CPU from the L2 or L3 frequencies (300 and 200 Mhz
  443. * respectively) to L0 frequency (1.2 Ghz) requires a significant
  444. * amount of time to let VDD stabilize to the appropriate
  445. * voltage. This amount of time is large enough that it cannot be
  446. * covered by the hardware countdown register. Due to this, the CPU
  447. * might start operating at L0 before the voltage is stabilized,
  448. * leading to CPU stalls.
  449. *
  450. * To work around this problem, we prevent switching directly from the
  451. * L2/L3 frequencies to the L0 frequency, and instead switch to the L1
  452. * frequency in-between. The sequence therefore becomes:
  453. * 1. First switch from L2/L3(200/300MHz) to L1(600MHZ)
  454. * 2. Sleep 20ms for stabling VDD voltage
  455. * 3. Then switch from L1(600MHZ) to L0(1200Mhz).
  456. */
  457. static void clk_pm_cpu_set_rate_wa(unsigned long rate, struct regmap *base)
  458. {
  459. unsigned int cur_level;
  460. if (rate != 1200 * 1000 * 1000)
  461. return;
  462. regmap_read(base, ARMADA_37XX_NB_CPU_LOAD, &cur_level);
  463. cur_level &= ARMADA_37XX_NB_CPU_LOAD_MASK;
  464. if (cur_level <= ARMADA_37XX_DVFS_LOAD_1)
  465. return;
  466. regmap_update_bits(base, ARMADA_37XX_NB_CPU_LOAD,
  467. ARMADA_37XX_NB_CPU_LOAD_MASK,
  468. ARMADA_37XX_DVFS_LOAD_1);
  469. msleep(20);
  470. }
  471. static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
  472. unsigned long parent_rate)
  473. {
  474. struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
  475. struct regmap *base = pm_cpu->nb_pm_base;
  476. unsigned int div = parent_rate / rate;
  477. unsigned int load_level;
  478. /* only available when DVFS is enabled */
  479. if (!armada_3700_pm_dvfs_is_enabled(base))
  480. return -EINVAL;
  481. for (load_level = 0; load_level < LOAD_LEVEL_NR; load_level++) {
  482. unsigned int reg, mask, val,
  483. offset = ARMADA_37XX_NB_TBG_DIV_OFF;
  484. armada_3700_pm_dvfs_update_regs(load_level, &reg, &offset);
  485. regmap_read(base, reg, &val);
  486. val >>= offset;
  487. val &= ARMADA_37XX_NB_TBG_DIV_MASK;
  488. if (val == div) {
  489. /*
  490. * We found a load level matching the target
  491. * divider, switch to this load level and
  492. * return.
  493. */
  494. reg = ARMADA_37XX_NB_CPU_LOAD;
  495. mask = ARMADA_37XX_NB_CPU_LOAD_MASK;
  496. clk_pm_cpu_set_rate_wa(rate, base);
  497. regmap_update_bits(base, reg, mask, load_level);
  498. return rate;
  499. }
  500. }
  501. /* We didn't find any valid divider */
  502. return -EINVAL;
  503. }
  504. static const struct clk_ops clk_pm_cpu_ops = {
  505. .get_parent = clk_pm_cpu_get_parent,
  506. .set_parent = clk_pm_cpu_set_parent,
  507. .round_rate = clk_pm_cpu_round_rate,
  508. .set_rate = clk_pm_cpu_set_rate,
  509. .recalc_rate = clk_pm_cpu_recalc_rate,
  510. };
  511. static const struct of_device_id armada_3700_periph_clock_of_match[] = {
  512. { .compatible = "marvell,armada-3700-periph-clock-nb",
  513. .data = data_nb, },
  514. { .compatible = "marvell,armada-3700-periph-clock-sb",
  515. .data = data_sb, },
  516. { }
  517. };
  518. static int armada_3700_add_composite_clk(const struct clk_periph_data *data,
  519. void __iomem *reg, spinlock_t *lock,
  520. struct device *dev, struct clk_hw **hw)
  521. {
  522. const struct clk_ops *mux_ops = NULL, *gate_ops = NULL,
  523. *rate_ops = NULL;
  524. struct clk_hw *mux_hw = NULL, *gate_hw = NULL, *rate_hw = NULL;
  525. if (data->mux_hw) {
  526. struct clk_mux *mux;
  527. mux_hw = data->mux_hw;
  528. mux = to_clk_mux(mux_hw);
  529. mux->lock = lock;
  530. mux_ops = mux_hw->init->ops;
  531. mux->reg = reg + (u64)mux->reg;
  532. }
  533. if (data->gate_hw) {
  534. struct clk_gate *gate;
  535. gate_hw = data->gate_hw;
  536. gate = to_clk_gate(gate_hw);
  537. gate->lock = lock;
  538. gate_ops = gate_hw->init->ops;
  539. gate->reg = reg + (u64)gate->reg;
  540. gate->flags = CLK_GATE_SET_TO_DISABLE;
  541. }
  542. if (data->rate_hw) {
  543. rate_hw = data->rate_hw;
  544. rate_ops = rate_hw->init->ops;
  545. if (data->is_double_div) {
  546. struct clk_double_div *rate;
  547. rate = to_clk_double_div(rate_hw);
  548. rate->reg1 = reg + (u64)rate->reg1;
  549. rate->reg2 = reg + (u64)rate->reg2;
  550. } else {
  551. struct clk_divider *rate = to_clk_divider(rate_hw);
  552. const struct clk_div_table *clkt;
  553. int table_size = 0;
  554. rate->reg = reg + (u64)rate->reg;
  555. for (clkt = rate->table; clkt->div; clkt++)
  556. table_size++;
  557. rate->width = order_base_2(table_size);
  558. rate->lock = lock;
  559. }
  560. }
  561. if (data->muxrate_hw) {
  562. struct clk_pm_cpu *pmcpu_clk;
  563. struct clk_hw *muxrate_hw = data->muxrate_hw;
  564. struct regmap *map;
  565. pmcpu_clk = to_clk_pm_cpu(muxrate_hw);
  566. pmcpu_clk->reg_mux = reg + (u64)pmcpu_clk->reg_mux;
  567. pmcpu_clk->reg_div = reg + (u64)pmcpu_clk->reg_div;
  568. mux_hw = muxrate_hw;
  569. rate_hw = muxrate_hw;
  570. mux_ops = muxrate_hw->init->ops;
  571. rate_ops = muxrate_hw->init->ops;
  572. map = syscon_regmap_lookup_by_compatible(
  573. "marvell,armada-3700-nb-pm");
  574. pmcpu_clk->nb_pm_base = map;
  575. }
  576. *hw = clk_hw_register_composite(dev, data->name, data->parent_names,
  577. data->num_parents, mux_hw,
  578. mux_ops, rate_hw, rate_ops,
  579. gate_hw, gate_ops, CLK_IGNORE_UNUSED);
  580. return PTR_ERR_OR_ZERO(*hw);
  581. }
  582. static int __maybe_unused armada_3700_periph_clock_suspend(struct device *dev)
  583. {
  584. struct clk_periph_driver_data *data = dev_get_drvdata(dev);
  585. data->tbg_sel = readl(data->reg + TBG_SEL);
  586. data->div_sel0 = readl(data->reg + DIV_SEL0);
  587. data->div_sel1 = readl(data->reg + DIV_SEL1);
  588. data->div_sel2 = readl(data->reg + DIV_SEL2);
  589. data->clk_sel = readl(data->reg + CLK_SEL);
  590. data->clk_dis = readl(data->reg + CLK_DIS);
  591. return 0;
  592. }
  593. static int __maybe_unused armada_3700_periph_clock_resume(struct device *dev)
  594. {
  595. struct clk_periph_driver_data *data = dev_get_drvdata(dev);
  596. /* Follow the same order than what the Cortex-M3 does (ATF code) */
  597. writel(data->clk_dis, data->reg + CLK_DIS);
  598. writel(data->div_sel0, data->reg + DIV_SEL0);
  599. writel(data->div_sel1, data->reg + DIV_SEL1);
  600. writel(data->div_sel2, data->reg + DIV_SEL2);
  601. writel(data->tbg_sel, data->reg + TBG_SEL);
  602. writel(data->clk_sel, data->reg + CLK_SEL);
  603. return 0;
  604. }
  605. static const struct dev_pm_ops armada_3700_periph_clock_pm_ops = {
  606. SET_SYSTEM_SLEEP_PM_OPS(armada_3700_periph_clock_suspend,
  607. armada_3700_periph_clock_resume)
  608. };
  609. static int armada_3700_periph_clock_probe(struct platform_device *pdev)
  610. {
  611. struct clk_periph_driver_data *driver_data;
  612. struct device_node *np = pdev->dev.of_node;
  613. const struct clk_periph_data *data;
  614. struct device *dev = &pdev->dev;
  615. int num_periph = 0, i, ret;
  616. struct resource *res;
  617. data = of_device_get_match_data(dev);
  618. if (!data)
  619. return -ENODEV;
  620. while (data[num_periph].name)
  621. num_periph++;
  622. driver_data = devm_kzalloc(dev, sizeof(*driver_data), GFP_KERNEL);
  623. if (!driver_data)
  624. return -ENOMEM;
  625. driver_data->hw_data = devm_kzalloc(dev,
  626. struct_size(driver_data->hw_data,
  627. hws, num_periph),
  628. GFP_KERNEL);
  629. if (!driver_data->hw_data)
  630. return -ENOMEM;
  631. driver_data->hw_data->num = num_periph;
  632. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  633. driver_data->reg = devm_ioremap_resource(dev, res);
  634. if (IS_ERR(driver_data->reg))
  635. return PTR_ERR(driver_data->reg);
  636. spin_lock_init(&driver_data->lock);
  637. for (i = 0; i < num_periph; i++) {
  638. struct clk_hw **hw = &driver_data->hw_data->hws[i];
  639. if (armada_3700_add_composite_clk(&data[i], driver_data->reg,
  640. &driver_data->lock, dev, hw))
  641. dev_err(dev, "Can't register periph clock %s\n",
  642. data[i].name);
  643. }
  644. ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
  645. driver_data->hw_data);
  646. if (ret) {
  647. for (i = 0; i < num_periph; i++)
  648. clk_hw_unregister(driver_data->hw_data->hws[i]);
  649. return ret;
  650. }
  651. platform_set_drvdata(pdev, driver_data);
  652. return 0;
  653. }
  654. static int armada_3700_periph_clock_remove(struct platform_device *pdev)
  655. {
  656. struct clk_periph_driver_data *data = platform_get_drvdata(pdev);
  657. struct clk_hw_onecell_data *hw_data = data->hw_data;
  658. int i;
  659. of_clk_del_provider(pdev->dev.of_node);
  660. for (i = 0; i < hw_data->num; i++)
  661. clk_hw_unregister(hw_data->hws[i]);
  662. return 0;
  663. }
  664. static struct platform_driver armada_3700_periph_clock_driver = {
  665. .probe = armada_3700_periph_clock_probe,
  666. .remove = armada_3700_periph_clock_remove,
  667. .driver = {
  668. .name = "marvell-armada-3700-periph-clock",
  669. .of_match_table = armada_3700_periph_clock_of_match,
  670. .pm = &armada_3700_periph_clock_pm_ops,
  671. },
  672. };
  673. builtin_platform_driver(armada_3700_periph_clock_driver);