clk-stm32h7.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410
  1. /*
  2. * Copyright (C) Gabriel Fernandez 2017
  3. * Author: Gabriel Fernandez <gabriel.fernandez@st.com>
  4. *
  5. * License terms: GPL V2.0.
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms and conditions of the GNU General Public License,
  9. * version 2, as published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope it will be useful, but WITHOUT
  12. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  14. * more details.
  15. *
  16. * You should have received a copy of the GNU General Public License along with
  17. * this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include <linux/clk.h>
  20. #include <linux/clk-provider.h>
  21. #include <linux/err.h>
  22. #include <linux/io.h>
  23. #include <linux/mfd/syscon.h>
  24. #include <linux/of.h>
  25. #include <linux/of_address.h>
  26. #include <linux/slab.h>
  27. #include <linux/spinlock.h>
  28. #include <linux/regmap.h>
  29. #include <dt-bindings/clock/stm32h7-clks.h>
  30. /* Reset Clock Control Registers */
  31. #define RCC_CR 0x00
  32. #define RCC_CFGR 0x10
  33. #define RCC_D1CFGR 0x18
  34. #define RCC_D2CFGR 0x1C
  35. #define RCC_D3CFGR 0x20
  36. #define RCC_PLLCKSELR 0x28
  37. #define RCC_PLLCFGR 0x2C
  38. #define RCC_PLL1DIVR 0x30
  39. #define RCC_PLL1FRACR 0x34
  40. #define RCC_PLL2DIVR 0x38
  41. #define RCC_PLL2FRACR 0x3C
  42. #define RCC_PLL3DIVR 0x40
  43. #define RCC_PLL3FRACR 0x44
  44. #define RCC_D1CCIPR 0x4C
  45. #define RCC_D2CCIP1R 0x50
  46. #define RCC_D2CCIP2R 0x54
  47. #define RCC_D3CCIPR 0x58
  48. #define RCC_BDCR 0x70
  49. #define RCC_CSR 0x74
  50. #define RCC_AHB3ENR 0xD4
  51. #define RCC_AHB1ENR 0xD8
  52. #define RCC_AHB2ENR 0xDC
  53. #define RCC_AHB4ENR 0xE0
  54. #define RCC_APB3ENR 0xE4
  55. #define RCC_APB1LENR 0xE8
  56. #define RCC_APB1HENR 0xEC
  57. #define RCC_APB2ENR 0xF0
  58. #define RCC_APB4ENR 0xF4
  59. static DEFINE_SPINLOCK(stm32rcc_lock);
  60. static void __iomem *base;
  61. static struct clk_hw **hws;
  62. /* System clock parent */
  63. static const char * const sys_src[] = {
  64. "hsi_ck", "csi_ck", "hse_ck", "pll1_p" };
  65. static const char * const tracein_src[] = {
  66. "hsi_ck", "csi_ck", "hse_ck", "pll1_r" };
  67. static const char * const per_src[] = {
  68. "hsi_ker", "csi_ker", "hse_ck", "disabled" };
  69. static const char * const pll_src[] = {
  70. "hsi_ck", "csi_ck", "hse_ck", "no clock" };
  71. static const char * const sdmmc_src[] = { "pll1_q", "pll2_r" };
  72. static const char * const dsi_src[] = { "ck_dsi_phy", "pll2_q" };
  73. static const char * const qspi_src[] = {
  74. "hclk", "pll1_q", "pll2_r", "per_ck" };
  75. static const char * const fmc_src[] = {
  76. "hclk", "pll1_q", "pll2_r", "per_ck" };
  77. /* Kernel clock parent */
  78. static const char * const swp_src[] = { "pclk1", "hsi_ker" };
  79. static const char * const fdcan_src[] = { "hse_ck", "pll1_q", "pll2_q" };
  80. static const char * const dfsdm1_src[] = { "pclk2", "sys_ck" };
  81. static const char * const spdifrx_src[] = {
  82. "pll1_q", "pll2_r", "pll3_r", "hsi_ker" };
  83. static const char *spi_src1[5] = {
  84. "pll1_q", "pll2_p", "pll3_p", NULL, "per_ck" };
  85. static const char * const spi_src2[] = {
  86. "pclk2", "pll2_q", "pll3_q", "hsi_ker", "csi_ker", "hse_ck" };
  87. static const char * const spi_src3[] = {
  88. "pclk4", "pll2_q", "pll3_q", "hsi_ker", "csi_ker", "hse_ck" };
  89. static const char * const lptim_src1[] = {
  90. "pclk1", "pll2_p", "pll3_r", "lse_ck", "lsi_ck", "per_ck" };
  91. static const char * const lptim_src2[] = {
  92. "pclk4", "pll2_p", "pll3_r", "lse_ck", "lsi_ck", "per_ck" };
  93. static const char * const cec_src[] = {"lse_ck", "lsi_ck", "csi_ker_div122" };
  94. static const char * const usbotg_src[] = {"pll1_q", "pll3_q", "rc48_ck" };
  95. /* i2c 1,2,3 src */
  96. static const char * const i2c_src1[] = {
  97. "pclk1", "pll3_r", "hsi_ker", "csi_ker" };
  98. static const char * const i2c_src2[] = {
  99. "pclk4", "pll3_r", "hsi_ker", "csi_ker" };
  100. static const char * const rng_src[] = {
  101. "rc48_ck", "pll1_q", "lse_ck", "lsi_ck" };
  102. /* usart 1,6 src */
  103. static const char * const usart_src1[] = {
  104. "pclk2", "pll2_q", "pll3_q", "hsi_ker", "csi_ker", "lse_ck" };
  105. /* usart 2,3,4,5,7,8 src */
  106. static const char * const usart_src2[] = {
  107. "pclk1", "pll2_q", "pll3_q", "hsi_ker", "csi_ker", "lse_ck" };
  108. static const char *sai_src[5] = {
  109. "pll1_q", "pll2_p", "pll3_p", NULL, "per_ck" };
  110. static const char * const adc_src[] = { "pll2_p", "pll3_r", "per_ck" };
  111. /* lptim 2,3,4,5 src */
  112. static const char * const lpuart1_src[] = {
  113. "pclk3", "pll2_q", "pll3_q", "csi_ker", "lse_ck" };
  114. static const char * const hrtim_src[] = { "tim2_ker", "d1cpre" };
  115. /* RTC clock parent */
  116. static const char * const rtc_src[] = { "off", "lse_ck", "lsi_ck", "hse_1M" };
  117. /* Micro-controller output clock parent */
  118. static const char * const mco_src1[] = {
  119. "hsi_ck", "lse_ck", "hse_ck", "pll1_q", "rc48_ck" };
  120. static const char * const mco_src2[] = {
  121. "sys_ck", "pll2_p", "hse_ck", "pll1_p", "csi_ck", "lsi_ck" };
  122. /* LCD clock */
  123. static const char * const ltdc_src[] = {"pll3_r"};
  124. /* Gate clock with ready bit and backup domain management */
  125. struct stm32_ready_gate {
  126. struct clk_gate gate;
  127. u8 bit_rdy;
  128. };
  129. #define to_ready_gate_clk(_rgate) container_of(_rgate, struct stm32_ready_gate,\
  130. gate)
  131. #define RGATE_TIMEOUT 10000
  132. static int ready_gate_clk_enable(struct clk_hw *hw)
  133. {
  134. struct clk_gate *gate = to_clk_gate(hw);
  135. struct stm32_ready_gate *rgate = to_ready_gate_clk(gate);
  136. int bit_status;
  137. unsigned int timeout = RGATE_TIMEOUT;
  138. if (clk_gate_ops.is_enabled(hw))
  139. return 0;
  140. clk_gate_ops.enable(hw);
  141. /* We can't use readl_poll_timeout() because we can blocked if
  142. * someone enables this clock before clocksource changes.
  143. * Only jiffies counter is available. Jiffies are incremented by
  144. * interruptions and enable op does not allow to be interrupted.
  145. */
  146. do {
  147. bit_status = !(readl(gate->reg) & BIT(rgate->bit_rdy));
  148. if (bit_status)
  149. udelay(100);
  150. } while (bit_status && --timeout);
  151. return bit_status;
  152. }
  153. static void ready_gate_clk_disable(struct clk_hw *hw)
  154. {
  155. struct clk_gate *gate = to_clk_gate(hw);
  156. struct stm32_ready_gate *rgate = to_ready_gate_clk(gate);
  157. int bit_status;
  158. unsigned int timeout = RGATE_TIMEOUT;
  159. if (!clk_gate_ops.is_enabled(hw))
  160. return;
  161. clk_gate_ops.disable(hw);
  162. do {
  163. bit_status = !!(readl(gate->reg) & BIT(rgate->bit_rdy));
  164. if (bit_status)
  165. udelay(100);
  166. } while (bit_status && --timeout);
  167. }
  168. static const struct clk_ops ready_gate_clk_ops = {
  169. .enable = ready_gate_clk_enable,
  170. .disable = ready_gate_clk_disable,
  171. .is_enabled = clk_gate_is_enabled,
  172. };
  173. static struct clk_hw *clk_register_ready_gate(struct device *dev,
  174. const char *name, const char *parent_name,
  175. void __iomem *reg, u8 bit_idx, u8 bit_rdy,
  176. unsigned long flags, spinlock_t *lock)
  177. {
  178. struct stm32_ready_gate *rgate;
  179. struct clk_init_data init = { NULL };
  180. struct clk_hw *hw;
  181. int ret;
  182. rgate = kzalloc(sizeof(*rgate), GFP_KERNEL);
  183. if (!rgate)
  184. return ERR_PTR(-ENOMEM);
  185. init.name = name;
  186. init.ops = &ready_gate_clk_ops;
  187. init.flags = flags;
  188. init.parent_names = &parent_name;
  189. init.num_parents = 1;
  190. rgate->bit_rdy = bit_rdy;
  191. rgate->gate.lock = lock;
  192. rgate->gate.reg = reg;
  193. rgate->gate.bit_idx = bit_idx;
  194. rgate->gate.hw.init = &init;
  195. hw = &rgate->gate.hw;
  196. ret = clk_hw_register(dev, hw);
  197. if (ret) {
  198. kfree(rgate);
  199. hw = ERR_PTR(ret);
  200. }
  201. return hw;
  202. }
  203. struct gate_cfg {
  204. u32 offset;
  205. u8 bit_idx;
  206. };
  207. struct muxdiv_cfg {
  208. u32 offset;
  209. u8 shift;
  210. u8 width;
  211. };
  212. struct composite_clk_cfg {
  213. struct gate_cfg *gate;
  214. struct muxdiv_cfg *mux;
  215. struct muxdiv_cfg *div;
  216. const char *name;
  217. const char * const *parent_name;
  218. int num_parents;
  219. u32 flags;
  220. };
  221. struct composite_clk_gcfg_t {
  222. u8 flags;
  223. const struct clk_ops *ops;
  224. };
  225. /*
  226. * General config definition of a composite clock (only clock diviser for rate)
  227. */
  228. struct composite_clk_gcfg {
  229. struct composite_clk_gcfg_t *mux;
  230. struct composite_clk_gcfg_t *div;
  231. struct composite_clk_gcfg_t *gate;
  232. };
  233. #define M_CFG_MUX(_mux_ops, _mux_flags)\
  234. .mux = &(struct composite_clk_gcfg_t) { _mux_flags, _mux_ops}
  235. #define M_CFG_DIV(_rate_ops, _rate_flags)\
  236. .div = &(struct composite_clk_gcfg_t) {_rate_flags, _rate_ops}
  237. #define M_CFG_GATE(_gate_ops, _gate_flags)\
  238. .gate = &(struct composite_clk_gcfg_t) { _gate_flags, _gate_ops}
  239. static struct clk_mux *_get_cmux(void __iomem *reg, u8 shift, u8 width,
  240. u32 flags, spinlock_t *lock)
  241. {
  242. struct clk_mux *mux;
  243. mux = kzalloc(sizeof(*mux), GFP_KERNEL);
  244. if (!mux)
  245. return ERR_PTR(-ENOMEM);
  246. mux->reg = reg;
  247. mux->shift = shift;
  248. mux->mask = (1 << width) - 1;
  249. mux->flags = flags;
  250. mux->lock = lock;
  251. return mux;
  252. }
  253. static struct clk_divider *_get_cdiv(void __iomem *reg, u8 shift, u8 width,
  254. u32 flags, spinlock_t *lock)
  255. {
  256. struct clk_divider *div;
  257. div = kzalloc(sizeof(*div), GFP_KERNEL);
  258. if (!div)
  259. return ERR_PTR(-ENOMEM);
  260. div->reg = reg;
  261. div->shift = shift;
  262. div->width = width;
  263. div->flags = flags;
  264. div->lock = lock;
  265. return div;
  266. }
  267. static struct clk_gate *_get_cgate(void __iomem *reg, u8 bit_idx, u32 flags,
  268. spinlock_t *lock)
  269. {
  270. struct clk_gate *gate;
  271. gate = kzalloc(sizeof(*gate), GFP_KERNEL);
  272. if (!gate)
  273. return ERR_PTR(-ENOMEM);
  274. gate->reg = reg;
  275. gate->bit_idx = bit_idx;
  276. gate->flags = flags;
  277. gate->lock = lock;
  278. return gate;
  279. }
  280. struct composite_cfg {
  281. struct clk_hw *mux_hw;
  282. struct clk_hw *div_hw;
  283. struct clk_hw *gate_hw;
  284. const struct clk_ops *mux_ops;
  285. const struct clk_ops *div_ops;
  286. const struct clk_ops *gate_ops;
  287. };
  288. static void get_cfg_composite_div(const struct composite_clk_gcfg *gcfg,
  289. const struct composite_clk_cfg *cfg,
  290. struct composite_cfg *composite, spinlock_t *lock)
  291. {
  292. struct clk_mux *mux = NULL;
  293. struct clk_divider *div = NULL;
  294. struct clk_gate *gate = NULL;
  295. const struct clk_ops *mux_ops, *div_ops, *gate_ops;
  296. struct clk_hw *mux_hw;
  297. struct clk_hw *div_hw;
  298. struct clk_hw *gate_hw;
  299. mux_ops = div_ops = gate_ops = NULL;
  300. mux_hw = div_hw = gate_hw = NULL;
  301. if (gcfg->mux && gcfg->mux) {
  302. mux = _get_cmux(base + cfg->mux->offset,
  303. cfg->mux->shift,
  304. cfg->mux->width,
  305. gcfg->mux->flags, lock);
  306. if (!IS_ERR(mux)) {
  307. mux_hw = &mux->hw;
  308. mux_ops = gcfg->mux->ops ?
  309. gcfg->mux->ops : &clk_mux_ops;
  310. }
  311. }
  312. if (gcfg->div && cfg->div) {
  313. div = _get_cdiv(base + cfg->div->offset,
  314. cfg->div->shift,
  315. cfg->div->width,
  316. gcfg->div->flags, lock);
  317. if (!IS_ERR(div)) {
  318. div_hw = &div->hw;
  319. div_ops = gcfg->div->ops ?
  320. gcfg->div->ops : &clk_divider_ops;
  321. }
  322. }
  323. if (gcfg->gate && gcfg->gate) {
  324. gate = _get_cgate(base + cfg->gate->offset,
  325. cfg->gate->bit_idx,
  326. gcfg->gate->flags, lock);
  327. if (!IS_ERR(gate)) {
  328. gate_hw = &gate->hw;
  329. gate_ops = gcfg->gate->ops ?
  330. gcfg->gate->ops : &clk_gate_ops;
  331. }
  332. }
  333. composite->mux_hw = mux_hw;
  334. composite->mux_ops = mux_ops;
  335. composite->div_hw = div_hw;
  336. composite->div_ops = div_ops;
  337. composite->gate_hw = gate_hw;
  338. composite->gate_ops = gate_ops;
  339. }
  340. /* Kernel Timer */
  341. struct timer_ker {
  342. u8 dppre_shift;
  343. struct clk_hw hw;
  344. spinlock_t *lock;
  345. };
  346. #define to_timer_ker(_hw) container_of(_hw, struct timer_ker, hw)
  347. static unsigned long timer_ker_recalc_rate(struct clk_hw *hw,
  348. unsigned long parent_rate)
  349. {
  350. struct timer_ker *clk_elem = to_timer_ker(hw);
  351. u32 timpre;
  352. u32 dppre_shift = clk_elem->dppre_shift;
  353. u32 prescaler;
  354. u32 mul;
  355. timpre = (readl(base + RCC_CFGR) >> 15) & 0x01;
  356. prescaler = (readl(base + RCC_D2CFGR) >> dppre_shift) & 0x03;
  357. mul = 2;
  358. if (prescaler < 4)
  359. mul = 1;
  360. else if (timpre && prescaler > 4)
  361. mul = 4;
  362. return parent_rate * mul;
  363. }
  364. static const struct clk_ops timer_ker_ops = {
  365. .recalc_rate = timer_ker_recalc_rate,
  366. };
  367. static struct clk_hw *clk_register_stm32_timer_ker(struct device *dev,
  368. const char *name, const char *parent_name,
  369. unsigned long flags,
  370. u8 dppre_shift,
  371. spinlock_t *lock)
  372. {
  373. struct timer_ker *element;
  374. struct clk_init_data init;
  375. struct clk_hw *hw;
  376. int err;
  377. element = kzalloc(sizeof(*element), GFP_KERNEL);
  378. if (!element)
  379. return ERR_PTR(-ENOMEM);
  380. init.name = name;
  381. init.ops = &timer_ker_ops;
  382. init.flags = flags;
  383. init.parent_names = &parent_name;
  384. init.num_parents = 1;
  385. element->hw.init = &init;
  386. element->lock = lock;
  387. element->dppre_shift = dppre_shift;
  388. hw = &element->hw;
  389. err = clk_hw_register(dev, hw);
  390. if (err) {
  391. kfree(element);
  392. return ERR_PTR(err);
  393. }
  394. return hw;
  395. }
  396. static const struct clk_div_table d1cpre_div_table[] = {
  397. { 0, 1 }, { 1, 1 }, { 2, 1 }, { 3, 1},
  398. { 4, 1 }, { 5, 1 }, { 6, 1 }, { 7, 1},
  399. { 8, 2 }, { 9, 4 }, { 10, 8 }, { 11, 16 },
  400. { 12, 64 }, { 13, 128 }, { 14, 256 },
  401. { 15, 512 },
  402. { 0 },
  403. };
  404. static const struct clk_div_table ppre_div_table[] = {
  405. { 0, 1 }, { 1, 1 }, { 2, 1 }, { 3, 1},
  406. { 4, 2 }, { 5, 4 }, { 6, 8 }, { 7, 16 },
  407. { 0 },
  408. };
  409. static void register_core_and_bus_clocks(void)
  410. {
  411. /* CORE AND BUS */
  412. hws[SYS_D1CPRE] = clk_hw_register_divider_table(NULL, "d1cpre",
  413. "sys_ck", CLK_IGNORE_UNUSED, base + RCC_D1CFGR, 8, 4, 0,
  414. d1cpre_div_table, &stm32rcc_lock);
  415. hws[HCLK] = clk_hw_register_divider_table(NULL, "hclk", "d1cpre",
  416. CLK_IGNORE_UNUSED, base + RCC_D1CFGR, 0, 4, 0,
  417. d1cpre_div_table, &stm32rcc_lock);
  418. /* D1 DOMAIN */
  419. /* * CPU Systick */
  420. hws[CPU_SYSTICK] = clk_hw_register_fixed_factor(NULL, "systick",
  421. "d1cpre", 0, 1, 8);
  422. /* * APB3 peripheral */
  423. hws[PCLK3] = clk_hw_register_divider_table(NULL, "pclk3", "hclk", 0,
  424. base + RCC_D1CFGR, 4, 3, 0,
  425. ppre_div_table, &stm32rcc_lock);
  426. /* D2 DOMAIN */
  427. /* * APB1 peripheral */
  428. hws[PCLK1] = clk_hw_register_divider_table(NULL, "pclk1", "hclk", 0,
  429. base + RCC_D2CFGR, 4, 3, 0,
  430. ppre_div_table, &stm32rcc_lock);
  431. /* Timers prescaler clocks */
  432. clk_register_stm32_timer_ker(NULL, "tim1_ker", "pclk1", 0,
  433. 4, &stm32rcc_lock);
  434. /* * APB2 peripheral */
  435. hws[PCLK2] = clk_hw_register_divider_table(NULL, "pclk2", "hclk", 0,
  436. base + RCC_D2CFGR, 8, 3, 0, ppre_div_table,
  437. &stm32rcc_lock);
  438. clk_register_stm32_timer_ker(NULL, "tim2_ker", "pclk2", 0, 8,
  439. &stm32rcc_lock);
  440. /* D3 DOMAIN */
  441. /* * APB4 peripheral */
  442. hws[PCLK4] = clk_hw_register_divider_table(NULL, "pclk4", "hclk", 0,
  443. base + RCC_D3CFGR, 4, 3, 0,
  444. ppre_div_table, &stm32rcc_lock);
  445. }
  446. /* MUX clock configuration */
  447. struct stm32_mux_clk {
  448. const char *name;
  449. const char * const *parents;
  450. u8 num_parents;
  451. u32 offset;
  452. u8 shift;
  453. u8 width;
  454. u32 flags;
  455. };
  456. #define M_MCLOCF(_name, _parents, _mux_offset, _mux_shift, _mux_width, _flags)\
  457. {\
  458. .name = _name,\
  459. .parents = _parents,\
  460. .num_parents = ARRAY_SIZE(_parents),\
  461. .offset = _mux_offset,\
  462. .shift = _mux_shift,\
  463. .width = _mux_width,\
  464. .flags = _flags,\
  465. }
  466. #define M_MCLOC(_name, _parents, _mux_offset, _mux_shift, _mux_width)\
  467. M_MCLOCF(_name, _parents, _mux_offset, _mux_shift, _mux_width, 0)\
  468. static const struct stm32_mux_clk stm32_mclk[] __initconst = {
  469. M_MCLOC("per_ck", per_src, RCC_D1CCIPR, 28, 3),
  470. M_MCLOC("pllsrc", pll_src, RCC_PLLCKSELR, 0, 3),
  471. M_MCLOC("sys_ck", sys_src, RCC_CFGR, 0, 3),
  472. M_MCLOC("tracein_ck", tracein_src, RCC_CFGR, 0, 3),
  473. };
  474. /* Oscillary clock configuration */
  475. struct stm32_osc_clk {
  476. const char *name;
  477. const char *parent;
  478. u32 gate_offset;
  479. u8 bit_idx;
  480. u8 bit_rdy;
  481. u32 flags;
  482. };
  483. #define OSC_CLKF(_name, _parent, _gate_offset, _bit_idx, _bit_rdy, _flags)\
  484. {\
  485. .name = _name,\
  486. .parent = _parent,\
  487. .gate_offset = _gate_offset,\
  488. .bit_idx = _bit_idx,\
  489. .bit_rdy = _bit_rdy,\
  490. .flags = _flags,\
  491. }
  492. #define OSC_CLK(_name, _parent, _gate_offset, _bit_idx, _bit_rdy)\
  493. OSC_CLKF(_name, _parent, _gate_offset, _bit_idx, _bit_rdy, 0)
  494. static const struct stm32_osc_clk stm32_oclk[] __initconst = {
  495. OSC_CLKF("hsi_ck", "hsidiv", RCC_CR, 0, 2, CLK_IGNORE_UNUSED),
  496. OSC_CLKF("hsi_ker", "hsidiv", RCC_CR, 1, 2, CLK_IGNORE_UNUSED),
  497. OSC_CLKF("csi_ck", "clk-csi", RCC_CR, 7, 8, CLK_IGNORE_UNUSED),
  498. OSC_CLKF("csi_ker", "clk-csi", RCC_CR, 9, 8, CLK_IGNORE_UNUSED),
  499. OSC_CLKF("rc48_ck", "clk-rc48", RCC_CR, 12, 13, CLK_IGNORE_UNUSED),
  500. OSC_CLKF("lsi_ck", "clk-lsi", RCC_CSR, 0, 1, CLK_IGNORE_UNUSED),
  501. };
  502. /* PLL configuration */
  503. struct st32h7_pll_cfg {
  504. u8 bit_idx;
  505. u32 offset_divr;
  506. u8 bit_frac_en;
  507. u32 offset_frac;
  508. u8 divm;
  509. };
  510. struct stm32_pll_data {
  511. const char *name;
  512. const char *parent_name;
  513. unsigned long flags;
  514. const struct st32h7_pll_cfg *cfg;
  515. };
  516. static const struct st32h7_pll_cfg stm32h7_pll1 = {
  517. .bit_idx = 24,
  518. .offset_divr = RCC_PLL1DIVR,
  519. .bit_frac_en = 0,
  520. .offset_frac = RCC_PLL1FRACR,
  521. .divm = 4,
  522. };
  523. static const struct st32h7_pll_cfg stm32h7_pll2 = {
  524. .bit_idx = 26,
  525. .offset_divr = RCC_PLL2DIVR,
  526. .bit_frac_en = 4,
  527. .offset_frac = RCC_PLL2FRACR,
  528. .divm = 12,
  529. };
  530. static const struct st32h7_pll_cfg stm32h7_pll3 = {
  531. .bit_idx = 28,
  532. .offset_divr = RCC_PLL3DIVR,
  533. .bit_frac_en = 8,
  534. .offset_frac = RCC_PLL3FRACR,
  535. .divm = 20,
  536. };
  537. static const struct stm32_pll_data stm32_pll[] = {
  538. { "vco1", "pllsrc", CLK_IGNORE_UNUSED, &stm32h7_pll1 },
  539. { "vco2", "pllsrc", 0, &stm32h7_pll2 },
  540. { "vco3", "pllsrc", 0, &stm32h7_pll3 },
  541. };
  542. struct stm32_fractional_divider {
  543. void __iomem *mreg;
  544. u8 mshift;
  545. u8 mwidth;
  546. u32 mmask;
  547. void __iomem *nreg;
  548. u8 nshift;
  549. u8 nwidth;
  550. void __iomem *freg_status;
  551. u8 freg_bit;
  552. void __iomem *freg_value;
  553. u8 fshift;
  554. u8 fwidth;
  555. u8 flags;
  556. struct clk_hw hw;
  557. spinlock_t *lock;
  558. };
  559. struct stm32_pll_obj {
  560. spinlock_t *lock;
  561. struct stm32_fractional_divider div;
  562. struct stm32_ready_gate rgate;
  563. struct clk_hw hw;
  564. };
  565. #define to_pll(_hw) container_of(_hw, struct stm32_pll_obj, hw)
  566. static int pll_is_enabled(struct clk_hw *hw)
  567. {
  568. struct stm32_pll_obj *clk_elem = to_pll(hw);
  569. struct clk_hw *_hw = &clk_elem->rgate.gate.hw;
  570. __clk_hw_set_clk(_hw, hw);
  571. return ready_gate_clk_ops.is_enabled(_hw);
  572. }
  573. static int pll_enable(struct clk_hw *hw)
  574. {
  575. struct stm32_pll_obj *clk_elem = to_pll(hw);
  576. struct clk_hw *_hw = &clk_elem->rgate.gate.hw;
  577. __clk_hw_set_clk(_hw, hw);
  578. return ready_gate_clk_ops.enable(_hw);
  579. }
  580. static void pll_disable(struct clk_hw *hw)
  581. {
  582. struct stm32_pll_obj *clk_elem = to_pll(hw);
  583. struct clk_hw *_hw = &clk_elem->rgate.gate.hw;
  584. __clk_hw_set_clk(_hw, hw);
  585. ready_gate_clk_ops.disable(_hw);
  586. }
  587. static int pll_frac_is_enabled(struct clk_hw *hw)
  588. {
  589. struct stm32_pll_obj *clk_elem = to_pll(hw);
  590. struct stm32_fractional_divider *fd = &clk_elem->div;
  591. return (readl(fd->freg_status) >> fd->freg_bit) & 0x01;
  592. }
  593. static unsigned long pll_read_frac(struct clk_hw *hw)
  594. {
  595. struct stm32_pll_obj *clk_elem = to_pll(hw);
  596. struct stm32_fractional_divider *fd = &clk_elem->div;
  597. return (readl(fd->freg_value) >> fd->fshift) &
  598. GENMASK(fd->fwidth - 1, 0);
  599. }
  600. static unsigned long pll_fd_recalc_rate(struct clk_hw *hw,
  601. unsigned long parent_rate)
  602. {
  603. struct stm32_pll_obj *clk_elem = to_pll(hw);
  604. struct stm32_fractional_divider *fd = &clk_elem->div;
  605. unsigned long m, n;
  606. u32 val, mask;
  607. u64 rate, rate1 = 0;
  608. val = readl(fd->mreg);
  609. mask = GENMASK(fd->mwidth - 1, 0) << fd->mshift;
  610. m = (val & mask) >> fd->mshift;
  611. val = readl(fd->nreg);
  612. mask = GENMASK(fd->nwidth - 1, 0) << fd->nshift;
  613. n = ((val & mask) >> fd->nshift) + 1;
  614. if (!n || !m)
  615. return parent_rate;
  616. rate = (u64)parent_rate * n;
  617. do_div(rate, m);
  618. if (pll_frac_is_enabled(hw)) {
  619. val = pll_read_frac(hw);
  620. rate1 = (u64)parent_rate * (u64)val;
  621. do_div(rate1, (m * 8191));
  622. }
  623. return rate + rate1;
  624. }
  625. static const struct clk_ops pll_ops = {
  626. .enable = pll_enable,
  627. .disable = pll_disable,
  628. .is_enabled = pll_is_enabled,
  629. .recalc_rate = pll_fd_recalc_rate,
  630. };
  631. static struct clk_hw *clk_register_stm32_pll(struct device *dev,
  632. const char *name,
  633. const char *parent,
  634. unsigned long flags,
  635. const struct st32h7_pll_cfg *cfg,
  636. spinlock_t *lock)
  637. {
  638. struct stm32_pll_obj *pll;
  639. struct clk_init_data init = { NULL };
  640. struct clk_hw *hw;
  641. int ret;
  642. struct stm32_fractional_divider *div = NULL;
  643. struct stm32_ready_gate *rgate;
  644. pll = kzalloc(sizeof(*pll), GFP_KERNEL);
  645. if (!pll)
  646. return ERR_PTR(-ENOMEM);
  647. init.name = name;
  648. init.ops = &pll_ops;
  649. init.flags = flags;
  650. init.parent_names = &parent;
  651. init.num_parents = 1;
  652. pll->hw.init = &init;
  653. hw = &pll->hw;
  654. rgate = &pll->rgate;
  655. rgate->bit_rdy = cfg->bit_idx + 1;
  656. rgate->gate.lock = lock;
  657. rgate->gate.reg = base + RCC_CR;
  658. rgate->gate.bit_idx = cfg->bit_idx;
  659. div = &pll->div;
  660. div->flags = 0;
  661. div->mreg = base + RCC_PLLCKSELR;
  662. div->mshift = cfg->divm;
  663. div->mwidth = 6;
  664. div->nreg = base + cfg->offset_divr;
  665. div->nshift = 0;
  666. div->nwidth = 9;
  667. div->freg_status = base + RCC_PLLCFGR;
  668. div->freg_bit = cfg->bit_frac_en;
  669. div->freg_value = base + cfg->offset_frac;
  670. div->fshift = 3;
  671. div->fwidth = 13;
  672. div->lock = lock;
  673. ret = clk_hw_register(dev, hw);
  674. if (ret) {
  675. kfree(pll);
  676. hw = ERR_PTR(ret);
  677. }
  678. return hw;
  679. }
  680. /* ODF CLOCKS */
  681. static unsigned long odf_divider_recalc_rate(struct clk_hw *hw,
  682. unsigned long parent_rate)
  683. {
  684. return clk_divider_ops.recalc_rate(hw, parent_rate);
  685. }
  686. static long odf_divider_round_rate(struct clk_hw *hw, unsigned long rate,
  687. unsigned long *prate)
  688. {
  689. return clk_divider_ops.round_rate(hw, rate, prate);
  690. }
  691. static int odf_divider_set_rate(struct clk_hw *hw, unsigned long rate,
  692. unsigned long parent_rate)
  693. {
  694. struct clk_hw *hwp;
  695. int pll_status;
  696. int ret;
  697. hwp = clk_hw_get_parent(hw);
  698. pll_status = pll_is_enabled(hwp);
  699. if (pll_status)
  700. pll_disable(hwp);
  701. ret = clk_divider_ops.set_rate(hw, rate, parent_rate);
  702. if (pll_status)
  703. pll_enable(hwp);
  704. return ret;
  705. }
  706. static const struct clk_ops odf_divider_ops = {
  707. .recalc_rate = odf_divider_recalc_rate,
  708. .round_rate = odf_divider_round_rate,
  709. .set_rate = odf_divider_set_rate,
  710. };
  711. static int odf_gate_enable(struct clk_hw *hw)
  712. {
  713. struct clk_hw *hwp;
  714. int pll_status;
  715. int ret;
  716. if (clk_gate_ops.is_enabled(hw))
  717. return 0;
  718. hwp = clk_hw_get_parent(hw);
  719. pll_status = pll_is_enabled(hwp);
  720. if (pll_status)
  721. pll_disable(hwp);
  722. ret = clk_gate_ops.enable(hw);
  723. if (pll_status)
  724. pll_enable(hwp);
  725. return ret;
  726. }
  727. static void odf_gate_disable(struct clk_hw *hw)
  728. {
  729. struct clk_hw *hwp;
  730. int pll_status;
  731. if (!clk_gate_ops.is_enabled(hw))
  732. return;
  733. hwp = clk_hw_get_parent(hw);
  734. pll_status = pll_is_enabled(hwp);
  735. if (pll_status)
  736. pll_disable(hwp);
  737. clk_gate_ops.disable(hw);
  738. if (pll_status)
  739. pll_enable(hwp);
  740. }
  741. static const struct clk_ops odf_gate_ops = {
  742. .enable = odf_gate_enable,
  743. .disable = odf_gate_disable,
  744. .is_enabled = clk_gate_is_enabled,
  745. };
  746. static struct composite_clk_gcfg odf_clk_gcfg = {
  747. M_CFG_DIV(&odf_divider_ops, 0),
  748. M_CFG_GATE(&odf_gate_ops, 0),
  749. };
  750. #define M_ODF_F(_name, _parent, _gate_offset, _bit_idx, _rate_offset,\
  751. _rate_shift, _rate_width, _flags)\
  752. {\
  753. .mux = NULL,\
  754. .div = &(struct muxdiv_cfg) {_rate_offset, _rate_shift, _rate_width},\
  755. .gate = &(struct gate_cfg) {_gate_offset, _bit_idx },\
  756. .name = _name,\
  757. .parent_name = &(const char *) {_parent},\
  758. .num_parents = 1,\
  759. .flags = _flags,\
  760. }
  761. #define M_ODF(_name, _parent, _gate_offset, _bit_idx, _rate_offset,\
  762. _rate_shift, _rate_width)\
  763. M_ODF_F(_name, _parent, _gate_offset, _bit_idx, _rate_offset,\
  764. _rate_shift, _rate_width, 0)\
  765. static const struct composite_clk_cfg stm32_odf[3][3] = {
  766. {
  767. M_ODF_F("pll1_p", "vco1", RCC_PLLCFGR, 16, RCC_PLL1DIVR, 9, 7,
  768. CLK_IGNORE_UNUSED),
  769. M_ODF_F("pll1_q", "vco1", RCC_PLLCFGR, 17, RCC_PLL1DIVR, 16, 7,
  770. CLK_IGNORE_UNUSED),
  771. M_ODF_F("pll1_r", "vco1", RCC_PLLCFGR, 18, RCC_PLL1DIVR, 24, 7,
  772. CLK_IGNORE_UNUSED),
  773. },
  774. {
  775. M_ODF("pll2_p", "vco2", RCC_PLLCFGR, 19, RCC_PLL2DIVR, 9, 7),
  776. M_ODF("pll2_q", "vco2", RCC_PLLCFGR, 20, RCC_PLL2DIVR, 16, 7),
  777. M_ODF("pll2_r", "vco2", RCC_PLLCFGR, 21, RCC_PLL2DIVR, 24, 7),
  778. },
  779. {
  780. M_ODF("pll3_p", "vco3", RCC_PLLCFGR, 22, RCC_PLL3DIVR, 9, 7),
  781. M_ODF("pll3_q", "vco3", RCC_PLLCFGR, 23, RCC_PLL3DIVR, 16, 7),
  782. M_ODF("pll3_r", "vco3", RCC_PLLCFGR, 24, RCC_PLL3DIVR, 24, 7),
  783. }
  784. };
  785. /* PERIF CLOCKS */
  786. struct pclk_t {
  787. u32 gate_offset;
  788. u8 bit_idx;
  789. const char *name;
  790. const char *parent;
  791. u32 flags;
  792. };
  793. #define PER_CLKF(_gate_offset, _bit_idx, _name, _parent, _flags)\
  794. {\
  795. .gate_offset = _gate_offset,\
  796. .bit_idx = _bit_idx,\
  797. .name = _name,\
  798. .parent = _parent,\
  799. .flags = _flags,\
  800. }
  801. #define PER_CLK(_gate_offset, _bit_idx, _name, _parent)\
  802. PER_CLKF(_gate_offset, _bit_idx, _name, _parent, 0)
  803. static const struct pclk_t pclk[] = {
  804. PER_CLK(RCC_AHB3ENR, 31, "d1sram1", "hclk"),
  805. PER_CLK(RCC_AHB3ENR, 30, "itcm", "hclk"),
  806. PER_CLK(RCC_AHB3ENR, 29, "dtcm2", "hclk"),
  807. PER_CLK(RCC_AHB3ENR, 28, "dtcm1", "hclk"),
  808. PER_CLK(RCC_AHB3ENR, 8, "flitf", "hclk"),
  809. PER_CLK(RCC_AHB3ENR, 5, "jpgdec", "hclk"),
  810. PER_CLK(RCC_AHB3ENR, 4, "dma2d", "hclk"),
  811. PER_CLK(RCC_AHB3ENR, 0, "mdma", "hclk"),
  812. PER_CLK(RCC_AHB1ENR, 28, "usb2ulpi", "hclk"),
  813. PER_CLK(RCC_AHB1ENR, 26, "usb1ulpi", "hclk"),
  814. PER_CLK(RCC_AHB1ENR, 17, "eth1rx", "hclk"),
  815. PER_CLK(RCC_AHB1ENR, 16, "eth1tx", "hclk"),
  816. PER_CLK(RCC_AHB1ENR, 15, "eth1mac", "hclk"),
  817. PER_CLK(RCC_AHB1ENR, 14, "art", "hclk"),
  818. PER_CLK(RCC_AHB1ENR, 1, "dma2", "hclk"),
  819. PER_CLK(RCC_AHB1ENR, 0, "dma1", "hclk"),
  820. PER_CLK(RCC_AHB2ENR, 31, "d2sram3", "hclk"),
  821. PER_CLK(RCC_AHB2ENR, 30, "d2sram2", "hclk"),
  822. PER_CLK(RCC_AHB2ENR, 29, "d2sram1", "hclk"),
  823. PER_CLK(RCC_AHB2ENR, 5, "hash", "hclk"),
  824. PER_CLK(RCC_AHB2ENR, 4, "crypt", "hclk"),
  825. PER_CLK(RCC_AHB2ENR, 0, "camitf", "hclk"),
  826. PER_CLK(RCC_AHB4ENR, 28, "bkpram", "hclk"),
  827. PER_CLK(RCC_AHB4ENR, 25, "hsem", "hclk"),
  828. PER_CLK(RCC_AHB4ENR, 21, "bdma", "hclk"),
  829. PER_CLK(RCC_AHB4ENR, 19, "crc", "hclk"),
  830. PER_CLK(RCC_AHB4ENR, 10, "gpiok", "hclk"),
  831. PER_CLK(RCC_AHB4ENR, 9, "gpioj", "hclk"),
  832. PER_CLK(RCC_AHB4ENR, 8, "gpioi", "hclk"),
  833. PER_CLK(RCC_AHB4ENR, 7, "gpioh", "hclk"),
  834. PER_CLK(RCC_AHB4ENR, 6, "gpiog", "hclk"),
  835. PER_CLK(RCC_AHB4ENR, 5, "gpiof", "hclk"),
  836. PER_CLK(RCC_AHB4ENR, 4, "gpioe", "hclk"),
  837. PER_CLK(RCC_AHB4ENR, 3, "gpiod", "hclk"),
  838. PER_CLK(RCC_AHB4ENR, 2, "gpioc", "hclk"),
  839. PER_CLK(RCC_AHB4ENR, 1, "gpiob", "hclk"),
  840. PER_CLK(RCC_AHB4ENR, 0, "gpioa", "hclk"),
  841. PER_CLK(RCC_APB3ENR, 6, "wwdg1", "pclk3"),
  842. PER_CLK(RCC_APB1LENR, 29, "dac12", "pclk1"),
  843. PER_CLK(RCC_APB1LENR, 11, "wwdg2", "pclk1"),
  844. PER_CLK(RCC_APB1LENR, 8, "tim14", "tim1_ker"),
  845. PER_CLK(RCC_APB1LENR, 7, "tim13", "tim1_ker"),
  846. PER_CLK(RCC_APB1LENR, 6, "tim12", "tim1_ker"),
  847. PER_CLK(RCC_APB1LENR, 5, "tim7", "tim1_ker"),
  848. PER_CLK(RCC_APB1LENR, 4, "tim6", "tim1_ker"),
  849. PER_CLK(RCC_APB1LENR, 3, "tim5", "tim1_ker"),
  850. PER_CLK(RCC_APB1LENR, 2, "tim4", "tim1_ker"),
  851. PER_CLK(RCC_APB1LENR, 1, "tim3", "tim1_ker"),
  852. PER_CLK(RCC_APB1LENR, 0, "tim2", "tim1_ker"),
  853. PER_CLK(RCC_APB1HENR, 5, "mdios", "pclk1"),
  854. PER_CLK(RCC_APB1HENR, 4, "opamp", "pclk1"),
  855. PER_CLK(RCC_APB1HENR, 1, "crs", "pclk1"),
  856. PER_CLK(RCC_APB2ENR, 18, "tim17", "tim2_ker"),
  857. PER_CLK(RCC_APB2ENR, 17, "tim16", "tim2_ker"),
  858. PER_CLK(RCC_APB2ENR, 16, "tim15", "tim2_ker"),
  859. PER_CLK(RCC_APB2ENR, 1, "tim8", "tim2_ker"),
  860. PER_CLK(RCC_APB2ENR, 0, "tim1", "tim2_ker"),
  861. PER_CLK(RCC_APB4ENR, 26, "tmpsens", "pclk4"),
  862. PER_CLK(RCC_APB4ENR, 16, "rtcapb", "pclk4"),
  863. PER_CLK(RCC_APB4ENR, 15, "vref", "pclk4"),
  864. PER_CLK(RCC_APB4ENR, 14, "comp12", "pclk4"),
  865. PER_CLK(RCC_APB4ENR, 1, "syscfg", "pclk4"),
  866. };
  867. /* KERNEL CLOCKS */
  868. #define KER_CLKF(_gate_offset, _bit_idx,\
  869. _mux_offset, _mux_shift, _mux_width,\
  870. _name, _parent_name,\
  871. _flags) \
  872. { \
  873. .gate = &(struct gate_cfg) {_gate_offset, _bit_idx},\
  874. .mux = &(struct muxdiv_cfg) {_mux_offset, _mux_shift, _mux_width },\
  875. .name = _name, \
  876. .parent_name = _parent_name, \
  877. .num_parents = ARRAY_SIZE(_parent_name),\
  878. .flags = _flags,\
  879. }
  880. #define KER_CLK(_gate_offset, _bit_idx, _mux_offset, _mux_shift, _mux_width,\
  881. _name, _parent_name) \
  882. KER_CLKF(_gate_offset, _bit_idx, _mux_offset, _mux_shift, _mux_width,\
  883. _name, _parent_name, 0)\
  884. #define KER_CLKF_NOMUX(_gate_offset, _bit_idx,\
  885. _name, _parent_name,\
  886. _flags) \
  887. { \
  888. .gate = &(struct gate_cfg) {_gate_offset, _bit_idx},\
  889. .mux = NULL,\
  890. .name = _name, \
  891. .parent_name = _parent_name, \
  892. .num_parents = 1,\
  893. .flags = _flags,\
  894. }
  895. static const struct composite_clk_cfg kclk[] = {
  896. KER_CLK(RCC_AHB3ENR, 16, RCC_D1CCIPR, 16, 1, "sdmmc1", sdmmc_src),
  897. KER_CLKF(RCC_AHB3ENR, 14, RCC_D1CCIPR, 4, 2, "quadspi", qspi_src,
  898. CLK_IGNORE_UNUSED),
  899. KER_CLKF(RCC_AHB3ENR, 12, RCC_D1CCIPR, 0, 2, "fmc", fmc_src,
  900. CLK_IGNORE_UNUSED),
  901. KER_CLK(RCC_AHB1ENR, 27, RCC_D2CCIP2R, 20, 2, "usb2otg", usbotg_src),
  902. KER_CLK(RCC_AHB1ENR, 25, RCC_D2CCIP2R, 20, 2, "usb1otg", usbotg_src),
  903. KER_CLK(RCC_AHB1ENR, 5, RCC_D3CCIPR, 16, 2, "adc12", adc_src),
  904. KER_CLK(RCC_AHB2ENR, 9, RCC_D1CCIPR, 16, 1, "sdmmc2", sdmmc_src),
  905. KER_CLK(RCC_AHB2ENR, 6, RCC_D2CCIP2R, 8, 2, "rng", rng_src),
  906. KER_CLK(RCC_AHB4ENR, 24, RCC_D3CCIPR, 16, 2, "adc3", adc_src),
  907. KER_CLKF(RCC_APB3ENR, 4, RCC_D1CCIPR, 8, 1, "dsi", dsi_src,
  908. CLK_SET_RATE_PARENT),
  909. KER_CLKF_NOMUX(RCC_APB3ENR, 3, "ltdc", ltdc_src, CLK_SET_RATE_PARENT),
  910. KER_CLK(RCC_APB1LENR, 31, RCC_D2CCIP2R, 0, 3, "usart8", usart_src2),
  911. KER_CLK(RCC_APB1LENR, 30, RCC_D2CCIP2R, 0, 3, "usart7", usart_src2),
  912. KER_CLK(RCC_APB1LENR, 27, RCC_D2CCIP2R, 22, 2, "hdmicec", cec_src),
  913. KER_CLK(RCC_APB1LENR, 23, RCC_D2CCIP2R, 12, 2, "i2c3", i2c_src1),
  914. KER_CLK(RCC_APB1LENR, 22, RCC_D2CCIP2R, 12, 2, "i2c2", i2c_src1),
  915. KER_CLK(RCC_APB1LENR, 21, RCC_D2CCIP2R, 12, 2, "i2c1", i2c_src1),
  916. KER_CLK(RCC_APB1LENR, 20, RCC_D2CCIP2R, 0, 3, "uart5", usart_src2),
  917. KER_CLK(RCC_APB1LENR, 19, RCC_D2CCIP2R, 0, 3, "uart4", usart_src2),
  918. KER_CLK(RCC_APB1LENR, 18, RCC_D2CCIP2R, 0, 3, "usart3", usart_src2),
  919. KER_CLK(RCC_APB1LENR, 17, RCC_D2CCIP2R, 0, 3, "usart2", usart_src2),
  920. KER_CLK(RCC_APB1LENR, 16, RCC_D2CCIP1R, 20, 2, "spdifrx", spdifrx_src),
  921. KER_CLK(RCC_APB1LENR, 15, RCC_D2CCIP1R, 16, 3, "spi3", spi_src1),
  922. KER_CLK(RCC_APB1LENR, 14, RCC_D2CCIP1R, 16, 3, "spi2", spi_src1),
  923. KER_CLK(RCC_APB1LENR, 9, RCC_D2CCIP2R, 28, 3, "lptim1", lptim_src1),
  924. KER_CLK(RCC_APB1HENR, 8, RCC_D2CCIP1R, 28, 2, "fdcan", fdcan_src),
  925. KER_CLK(RCC_APB1HENR, 2, RCC_D2CCIP1R, 31, 1, "swp", swp_src),
  926. KER_CLK(RCC_APB2ENR, 29, RCC_CFGR, 14, 1, "hrtim", hrtim_src),
  927. KER_CLK(RCC_APB2ENR, 28, RCC_D2CCIP1R, 24, 1, "dfsdm1", dfsdm1_src),
  928. KER_CLKF(RCC_APB2ENR, 24, RCC_D2CCIP1R, 6, 3, "sai3", sai_src,
  929. CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT),
  930. KER_CLKF(RCC_APB2ENR, 23, RCC_D2CCIP1R, 6, 3, "sai2", sai_src,
  931. CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT),
  932. KER_CLKF(RCC_APB2ENR, 22, RCC_D2CCIP1R, 0, 3, "sai1", sai_src,
  933. CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT),
  934. KER_CLK(RCC_APB2ENR, 20, RCC_D2CCIP1R, 16, 3, "spi5", spi_src2),
  935. KER_CLK(RCC_APB2ENR, 13, RCC_D2CCIP1R, 16, 3, "spi4", spi_src2),
  936. KER_CLK(RCC_APB2ENR, 12, RCC_D2CCIP1R, 16, 3, "spi1", spi_src1),
  937. KER_CLK(RCC_APB2ENR, 5, RCC_D2CCIP2R, 3, 3, "usart6", usart_src1),
  938. KER_CLK(RCC_APB2ENR, 4, RCC_D2CCIP2R, 3, 3, "usart1", usart_src1),
  939. KER_CLK(RCC_APB4ENR, 21, RCC_D3CCIPR, 24, 3, "sai4b", sai_src),
  940. KER_CLK(RCC_APB4ENR, 21, RCC_D3CCIPR, 21, 3, "sai4a", sai_src),
  941. KER_CLK(RCC_APB4ENR, 12, RCC_D3CCIPR, 13, 3, "lptim5", lptim_src2),
  942. KER_CLK(RCC_APB4ENR, 11, RCC_D3CCIPR, 13, 3, "lptim4", lptim_src2),
  943. KER_CLK(RCC_APB4ENR, 10, RCC_D3CCIPR, 13, 3, "lptim3", lptim_src2),
  944. KER_CLK(RCC_APB4ENR, 9, RCC_D3CCIPR, 10, 3, "lptim2", lptim_src2),
  945. KER_CLK(RCC_APB4ENR, 7, RCC_D3CCIPR, 8, 2, "i2c4", i2c_src2),
  946. KER_CLK(RCC_APB4ENR, 5, RCC_D3CCIPR, 28, 3, "spi6", spi_src3),
  947. KER_CLK(RCC_APB4ENR, 3, RCC_D3CCIPR, 0, 3, "lpuart1", lpuart1_src),
  948. };
  949. static struct composite_clk_gcfg kernel_clk_cfg = {
  950. M_CFG_MUX(NULL, 0),
  951. M_CFG_GATE(NULL, 0),
  952. };
  953. /* RTC clock */
  954. /*
  955. * RTC & LSE registers are protected against parasitic write access.
  956. * PWR_CR_DBP bit must be set to enable write access to RTC registers.
  957. */
  958. /* STM32_PWR_CR */
  959. #define PWR_CR 0x00
  960. /* STM32_PWR_CR bit field */
  961. #define PWR_CR_DBP BIT(8)
  962. static struct composite_clk_gcfg rtc_clk_cfg = {
  963. M_CFG_MUX(NULL, 0),
  964. M_CFG_GATE(NULL, 0),
  965. };
  966. static const struct composite_clk_cfg rtc_clk =
  967. KER_CLK(RCC_BDCR, 15, RCC_BDCR, 8, 2, "rtc_ck", rtc_src);
  968. /* Micro-controller output clock */
  969. static struct composite_clk_gcfg mco_clk_cfg = {
  970. M_CFG_MUX(NULL, 0),
  971. M_CFG_DIV(NULL, CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO),
  972. };
  973. #define M_MCO_F(_name, _parents, _mux_offset, _mux_shift, _mux_width,\
  974. _rate_offset, _rate_shift, _rate_width,\
  975. _flags)\
  976. {\
  977. .mux = &(struct muxdiv_cfg) {_mux_offset, _mux_shift, _mux_width },\
  978. .div = &(struct muxdiv_cfg) {_rate_offset, _rate_shift, _rate_width},\
  979. .gate = NULL,\
  980. .name = _name,\
  981. .parent_name = _parents,\
  982. .num_parents = ARRAY_SIZE(_parents),\
  983. .flags = _flags,\
  984. }
  985. static const struct composite_clk_cfg mco_clk[] = {
  986. M_MCO_F("mco1", mco_src1, RCC_CFGR, 22, 4, RCC_CFGR, 18, 4, 0),
  987. M_MCO_F("mco2", mco_src2, RCC_CFGR, 29, 3, RCC_CFGR, 25, 4, 0),
  988. };
  989. static void __init stm32h7_rcc_init(struct device_node *np)
  990. {
  991. struct clk_hw_onecell_data *clk_data;
  992. struct composite_cfg c_cfg;
  993. int n;
  994. const char *hse_clk, *lse_clk, *i2s_clk;
  995. struct regmap *pdrm;
  996. clk_data = kzalloc(sizeof(*clk_data) +
  997. sizeof(*clk_data->hws) * STM32H7_MAX_CLKS,
  998. GFP_KERNEL);
  999. if (!clk_data)
  1000. return;
  1001. clk_data->num = STM32H7_MAX_CLKS;
  1002. hws = clk_data->hws;
  1003. for (n = 0; n < STM32H7_MAX_CLKS; n++)
  1004. hws[n] = ERR_PTR(-ENOENT);
  1005. /* get RCC base @ from DT */
  1006. base = of_iomap(np, 0);
  1007. if (!base) {
  1008. pr_err("%s: unable to map resource", np->name);
  1009. goto err_free_clks;
  1010. }
  1011. pdrm = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
  1012. if (IS_ERR(pdrm))
  1013. pr_warn("%s: Unable to get syscfg\n", __func__);
  1014. else
  1015. /* In any case disable backup domain write protection
  1016. * and will never be enabled.
  1017. * Needed by LSE & RTC clocks.
  1018. */
  1019. regmap_update_bits(pdrm, PWR_CR, PWR_CR_DBP, PWR_CR_DBP);
  1020. /* Put parent names from DT */
  1021. hse_clk = of_clk_get_parent_name(np, 0);
  1022. lse_clk = of_clk_get_parent_name(np, 1);
  1023. i2s_clk = of_clk_get_parent_name(np, 2);
  1024. sai_src[3] = i2s_clk;
  1025. spi_src1[3] = i2s_clk;
  1026. /* Register Internal oscillators */
  1027. clk_hw_register_fixed_rate(NULL, "clk-hsi", NULL, 0, 64000000);
  1028. clk_hw_register_fixed_rate(NULL, "clk-csi", NULL, 0, 4000000);
  1029. clk_hw_register_fixed_rate(NULL, "clk-lsi", NULL, 0, 32000);
  1030. clk_hw_register_fixed_rate(NULL, "clk-rc48", NULL, 0, 48000);
  1031. /* This clock is coming from outside. Frequencies unknown */
  1032. hws[CK_DSI_PHY] = clk_hw_register_fixed_rate(NULL, "ck_dsi_phy", NULL,
  1033. 0, 0);
  1034. hws[HSI_DIV] = clk_hw_register_divider(NULL, "hsidiv", "clk-hsi", 0,
  1035. base + RCC_CR, 3, 2, CLK_DIVIDER_POWER_OF_TWO,
  1036. &stm32rcc_lock);
  1037. hws[HSE_1M] = clk_hw_register_divider(NULL, "hse_1M", "hse_ck", 0,
  1038. base + RCC_CFGR, 8, 6, CLK_DIVIDER_ONE_BASED |
  1039. CLK_DIVIDER_ALLOW_ZERO,
  1040. &stm32rcc_lock);
  1041. /* Mux system clocks */
  1042. for (n = 0; n < ARRAY_SIZE(stm32_mclk); n++)
  1043. hws[MCLK_BANK + n] = clk_hw_register_mux(NULL,
  1044. stm32_mclk[n].name,
  1045. stm32_mclk[n].parents,
  1046. stm32_mclk[n].num_parents,
  1047. stm32_mclk[n].flags,
  1048. stm32_mclk[n].offset + base,
  1049. stm32_mclk[n].shift,
  1050. stm32_mclk[n].width,
  1051. 0,
  1052. &stm32rcc_lock);
  1053. register_core_and_bus_clocks();
  1054. /* Oscillary clocks */
  1055. for (n = 0; n < ARRAY_SIZE(stm32_oclk); n++)
  1056. hws[OSC_BANK + n] = clk_register_ready_gate(NULL,
  1057. stm32_oclk[n].name,
  1058. stm32_oclk[n].parent,
  1059. stm32_oclk[n].gate_offset + base,
  1060. stm32_oclk[n].bit_idx,
  1061. stm32_oclk[n].bit_rdy,
  1062. stm32_oclk[n].flags,
  1063. &stm32rcc_lock);
  1064. hws[HSE_CK] = clk_register_ready_gate(NULL,
  1065. "hse_ck",
  1066. hse_clk,
  1067. RCC_CR + base,
  1068. 16, 17,
  1069. 0,
  1070. &stm32rcc_lock);
  1071. hws[LSE_CK] = clk_register_ready_gate(NULL,
  1072. "lse_ck",
  1073. lse_clk,
  1074. RCC_BDCR + base,
  1075. 0, 1,
  1076. 0,
  1077. &stm32rcc_lock);
  1078. hws[CSI_KER_DIV122 + n] = clk_hw_register_fixed_factor(NULL,
  1079. "csi_ker_div122", "csi_ker", 0, 1, 122);
  1080. /* PLLs */
  1081. for (n = 0; n < ARRAY_SIZE(stm32_pll); n++) {
  1082. int odf;
  1083. /* Register the VCO */
  1084. clk_register_stm32_pll(NULL, stm32_pll[n].name,
  1085. stm32_pll[n].parent_name, stm32_pll[n].flags,
  1086. stm32_pll[n].cfg,
  1087. &stm32rcc_lock);
  1088. /* Register the 3 output dividers */
  1089. for (odf = 0; odf < 3; odf++) {
  1090. int idx = n * 3 + odf;
  1091. get_cfg_composite_div(&odf_clk_gcfg, &stm32_odf[n][odf],
  1092. &c_cfg, &stm32rcc_lock);
  1093. hws[ODF_BANK + idx] = clk_hw_register_composite(NULL,
  1094. stm32_odf[n][odf].name,
  1095. stm32_odf[n][odf].parent_name,
  1096. stm32_odf[n][odf].num_parents,
  1097. c_cfg.mux_hw, c_cfg.mux_ops,
  1098. c_cfg.div_hw, c_cfg.div_ops,
  1099. c_cfg.gate_hw, c_cfg.gate_ops,
  1100. stm32_odf[n][odf].flags);
  1101. }
  1102. }
  1103. /* Peripheral clocks */
  1104. for (n = 0; n < ARRAY_SIZE(pclk); n++)
  1105. hws[PERIF_BANK + n] = clk_hw_register_gate(NULL, pclk[n].name,
  1106. pclk[n].parent,
  1107. pclk[n].flags, base + pclk[n].gate_offset,
  1108. pclk[n].bit_idx, pclk[n].flags, &stm32rcc_lock);
  1109. /* Kernel clocks */
  1110. for (n = 0; n < ARRAY_SIZE(kclk); n++) {
  1111. get_cfg_composite_div(&kernel_clk_cfg, &kclk[n], &c_cfg,
  1112. &stm32rcc_lock);
  1113. hws[KERN_BANK + n] = clk_hw_register_composite(NULL,
  1114. kclk[n].name,
  1115. kclk[n].parent_name,
  1116. kclk[n].num_parents,
  1117. c_cfg.mux_hw, c_cfg.mux_ops,
  1118. c_cfg.div_hw, c_cfg.div_ops,
  1119. c_cfg.gate_hw, c_cfg.gate_ops,
  1120. kclk[n].flags);
  1121. }
  1122. /* RTC clock (default state is off) */
  1123. clk_hw_register_fixed_rate(NULL, "off", NULL, 0, 0);
  1124. get_cfg_composite_div(&rtc_clk_cfg, &rtc_clk, &c_cfg, &stm32rcc_lock);
  1125. hws[RTC_CK] = clk_hw_register_composite(NULL,
  1126. rtc_clk.name,
  1127. rtc_clk.parent_name,
  1128. rtc_clk.num_parents,
  1129. c_cfg.mux_hw, c_cfg.mux_ops,
  1130. c_cfg.div_hw, c_cfg.div_ops,
  1131. c_cfg.gate_hw, c_cfg.gate_ops,
  1132. rtc_clk.flags);
  1133. /* Micro-controller clocks */
  1134. for (n = 0; n < ARRAY_SIZE(mco_clk); n++) {
  1135. get_cfg_composite_div(&mco_clk_cfg, &mco_clk[n], &c_cfg,
  1136. &stm32rcc_lock);
  1137. hws[MCO_BANK + n] = clk_hw_register_composite(NULL,
  1138. mco_clk[n].name,
  1139. mco_clk[n].parent_name,
  1140. mco_clk[n].num_parents,
  1141. c_cfg.mux_hw, c_cfg.mux_ops,
  1142. c_cfg.div_hw, c_cfg.div_ops,
  1143. c_cfg.gate_hw, c_cfg.gate_ops,
  1144. mco_clk[n].flags);
  1145. }
  1146. of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
  1147. return;
  1148. err_free_clks:
  1149. kfree(clk_data);
  1150. }
  1151. /* The RCC node is a clock and reset controller, and these
  1152. * functionalities are supported by different drivers that
  1153. * matches the same compatible strings.
  1154. */
  1155. CLK_OF_DECLARE_DRIVER(stm32h7_rcc, "st,stm32h743-rcc", stm32h7_rcc_init);