clk-stm32h7.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) STMicroelectronics 2017
  4. * Author: Gabriel Fernandez <gabriel.fernandez@st.com> for STMicroelectronics.
  5. */
  6. #include <linux/clk.h>
  7. #include <linux/clk-provider.h>
  8. #include <linux/err.h>
  9. #include <linux/io.h>
  10. #include <linux/mfd/syscon.h>
  11. #include <linux/of.h>
  12. #include <linux/of_address.h>
  13. #include <linux/slab.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/regmap.h>
  16. #include <dt-bindings/clock/stm32h7-clks.h>
  17. /* Reset Clock Control Registers */
  18. #define RCC_CR 0x00
  19. #define RCC_CFGR 0x10
  20. #define RCC_D1CFGR 0x18
  21. #define RCC_D2CFGR 0x1C
  22. #define RCC_D3CFGR 0x20
  23. #define RCC_PLLCKSELR 0x28
  24. #define RCC_PLLCFGR 0x2C
  25. #define RCC_PLL1DIVR 0x30
  26. #define RCC_PLL1FRACR 0x34
  27. #define RCC_PLL2DIVR 0x38
  28. #define RCC_PLL2FRACR 0x3C
  29. #define RCC_PLL3DIVR 0x40
  30. #define RCC_PLL3FRACR 0x44
  31. #define RCC_D1CCIPR 0x4C
  32. #define RCC_D2CCIP1R 0x50
  33. #define RCC_D2CCIP2R 0x54
  34. #define RCC_D3CCIPR 0x58
  35. #define RCC_BDCR 0x70
  36. #define RCC_CSR 0x74
  37. #define RCC_AHB3ENR 0xD4
  38. #define RCC_AHB1ENR 0xD8
  39. #define RCC_AHB2ENR 0xDC
  40. #define RCC_AHB4ENR 0xE0
  41. #define RCC_APB3ENR 0xE4
  42. #define RCC_APB1LENR 0xE8
  43. #define RCC_APB1HENR 0xEC
  44. #define RCC_APB2ENR 0xF0
  45. #define RCC_APB4ENR 0xF4
  46. static DEFINE_SPINLOCK(stm32rcc_lock);
  47. static void __iomem *base;
  48. static struct clk_hw **hws;
  49. /* System clock parent */
  50. static const char * const sys_src[] = {
  51. "hsi_ck", "csi_ck", "hse_ck", "pll1_p" };
  52. static const char * const tracein_src[] = {
  53. "hsi_ck", "csi_ck", "hse_ck", "pll1_r" };
  54. static const char * const per_src[] = {
  55. "hsi_ker", "csi_ker", "hse_ck", "disabled" };
  56. static const char * const pll_src[] = {
  57. "hsi_ck", "csi_ck", "hse_ck", "no clock" };
  58. static const char * const sdmmc_src[] = { "pll1_q", "pll2_r" };
  59. static const char * const dsi_src[] = { "ck_dsi_phy", "pll2_q" };
  60. static const char * const qspi_src[] = {
  61. "hclk", "pll1_q", "pll2_r", "per_ck" };
  62. static const char * const fmc_src[] = {
  63. "hclk", "pll1_q", "pll2_r", "per_ck" };
  64. /* Kernel clock parent */
  65. static const char * const swp_src[] = { "pclk1", "hsi_ker" };
  66. static const char * const fdcan_src[] = { "hse_ck", "pll1_q", "pll2_q" };
  67. static const char * const dfsdm1_src[] = { "pclk2", "sys_ck" };
  68. static const char * const spdifrx_src[] = {
  69. "pll1_q", "pll2_r", "pll3_r", "hsi_ker" };
  70. static const char *spi_src1[5] = {
  71. "pll1_q", "pll2_p", "pll3_p", NULL, "per_ck" };
  72. static const char * const spi_src2[] = {
  73. "pclk2", "pll2_q", "pll3_q", "hsi_ker", "csi_ker", "hse_ck" };
  74. static const char * const spi_src3[] = {
  75. "pclk4", "pll2_q", "pll3_q", "hsi_ker", "csi_ker", "hse_ck" };
  76. static const char * const lptim_src1[] = {
  77. "pclk1", "pll2_p", "pll3_r", "lse_ck", "lsi_ck", "per_ck" };
  78. static const char * const lptim_src2[] = {
  79. "pclk4", "pll2_p", "pll3_r", "lse_ck", "lsi_ck", "per_ck" };
  80. static const char * const cec_src[] = {"lse_ck", "lsi_ck", "csi_ker_div122" };
  81. static const char * const usbotg_src[] = {"pll1_q", "pll3_q", "rc48_ck" };
  82. /* i2c 1,2,3 src */
  83. static const char * const i2c_src1[] = {
  84. "pclk1", "pll3_r", "hsi_ker", "csi_ker" };
  85. static const char * const i2c_src2[] = {
  86. "pclk4", "pll3_r", "hsi_ker", "csi_ker" };
  87. static const char * const rng_src[] = {
  88. "rc48_ck", "pll1_q", "lse_ck", "lsi_ck" };
  89. /* usart 1,6 src */
  90. static const char * const usart_src1[] = {
  91. "pclk2", "pll2_q", "pll3_q", "hsi_ker", "csi_ker", "lse_ck" };
  92. /* usart 2,3,4,5,7,8 src */
  93. static const char * const usart_src2[] = {
  94. "pclk1", "pll2_q", "pll3_q", "hsi_ker", "csi_ker", "lse_ck" };
  95. static const char *sai_src[5] = {
  96. "pll1_q", "pll2_p", "pll3_p", NULL, "per_ck" };
  97. static const char * const adc_src[] = { "pll2_p", "pll3_r", "per_ck" };
  98. /* lptim 2,3,4,5 src */
  99. static const char * const lpuart1_src[] = {
  100. "pclk3", "pll2_q", "pll3_q", "csi_ker", "lse_ck" };
  101. static const char * const hrtim_src[] = { "tim2_ker", "d1cpre" };
  102. /* RTC clock parent */
  103. static const char * const rtc_src[] = { "off", "lse_ck", "lsi_ck", "hse_1M" };
  104. /* Micro-controller output clock parent */
  105. static const char * const mco_src1[] = {
  106. "hsi_ck", "lse_ck", "hse_ck", "pll1_q", "rc48_ck" };
  107. static const char * const mco_src2[] = {
  108. "sys_ck", "pll2_p", "hse_ck", "pll1_p", "csi_ck", "lsi_ck" };
  109. /* LCD clock */
  110. static const char * const ltdc_src[] = {"pll3_r"};
  111. /* Gate clock with ready bit and backup domain management */
  112. struct stm32_ready_gate {
  113. struct clk_gate gate;
  114. u8 bit_rdy;
  115. };
  116. #define to_ready_gate_clk(_rgate) container_of(_rgate, struct stm32_ready_gate,\
  117. gate)
  118. #define RGATE_TIMEOUT 10000
  119. static int ready_gate_clk_enable(struct clk_hw *hw)
  120. {
  121. struct clk_gate *gate = to_clk_gate(hw);
  122. struct stm32_ready_gate *rgate = to_ready_gate_clk(gate);
  123. int bit_status;
  124. unsigned int timeout = RGATE_TIMEOUT;
  125. if (clk_gate_ops.is_enabled(hw))
  126. return 0;
  127. clk_gate_ops.enable(hw);
  128. /* We can't use readl_poll_timeout() because we can blocked if
  129. * someone enables this clock before clocksource changes.
  130. * Only jiffies counter is available. Jiffies are incremented by
  131. * interruptions and enable op does not allow to be interrupted.
  132. */
  133. do {
  134. bit_status = !(readl(gate->reg) & BIT(rgate->bit_rdy));
  135. if (bit_status)
  136. udelay(100);
  137. } while (bit_status && --timeout);
  138. return bit_status;
  139. }
  140. static void ready_gate_clk_disable(struct clk_hw *hw)
  141. {
  142. struct clk_gate *gate = to_clk_gate(hw);
  143. struct stm32_ready_gate *rgate = to_ready_gate_clk(gate);
  144. int bit_status;
  145. unsigned int timeout = RGATE_TIMEOUT;
  146. if (!clk_gate_ops.is_enabled(hw))
  147. return;
  148. clk_gate_ops.disable(hw);
  149. do {
  150. bit_status = !!(readl(gate->reg) & BIT(rgate->bit_rdy));
  151. if (bit_status)
  152. udelay(100);
  153. } while (bit_status && --timeout);
  154. }
  155. static const struct clk_ops ready_gate_clk_ops = {
  156. .enable = ready_gate_clk_enable,
  157. .disable = ready_gate_clk_disable,
  158. .is_enabled = clk_gate_is_enabled,
  159. };
  160. static struct clk_hw *clk_register_ready_gate(struct device *dev,
  161. const char *name, const char *parent_name,
  162. void __iomem *reg, u8 bit_idx, u8 bit_rdy,
  163. unsigned long flags, spinlock_t *lock)
  164. {
  165. struct stm32_ready_gate *rgate;
  166. struct clk_init_data init = { NULL };
  167. struct clk_hw *hw;
  168. int ret;
  169. rgate = kzalloc(sizeof(*rgate), GFP_KERNEL);
  170. if (!rgate)
  171. return ERR_PTR(-ENOMEM);
  172. init.name = name;
  173. init.ops = &ready_gate_clk_ops;
  174. init.flags = flags;
  175. init.parent_names = &parent_name;
  176. init.num_parents = 1;
  177. rgate->bit_rdy = bit_rdy;
  178. rgate->gate.lock = lock;
  179. rgate->gate.reg = reg;
  180. rgate->gate.bit_idx = bit_idx;
  181. rgate->gate.hw.init = &init;
  182. hw = &rgate->gate.hw;
  183. ret = clk_hw_register(dev, hw);
  184. if (ret) {
  185. kfree(rgate);
  186. hw = ERR_PTR(ret);
  187. }
  188. return hw;
  189. }
  190. struct gate_cfg {
  191. u32 offset;
  192. u8 bit_idx;
  193. };
  194. struct muxdiv_cfg {
  195. u32 offset;
  196. u8 shift;
  197. u8 width;
  198. };
  199. struct composite_clk_cfg {
  200. struct gate_cfg *gate;
  201. struct muxdiv_cfg *mux;
  202. struct muxdiv_cfg *div;
  203. const char *name;
  204. const char * const *parent_name;
  205. int num_parents;
  206. u32 flags;
  207. };
  208. struct composite_clk_gcfg_t {
  209. u8 flags;
  210. const struct clk_ops *ops;
  211. };
  212. /*
  213. * General config definition of a composite clock (only clock diviser for rate)
  214. */
  215. struct composite_clk_gcfg {
  216. struct composite_clk_gcfg_t *mux;
  217. struct composite_clk_gcfg_t *div;
  218. struct composite_clk_gcfg_t *gate;
  219. };
  220. #define M_CFG_MUX(_mux_ops, _mux_flags)\
  221. .mux = &(struct composite_clk_gcfg_t) { _mux_flags, _mux_ops}
  222. #define M_CFG_DIV(_rate_ops, _rate_flags)\
  223. .div = &(struct composite_clk_gcfg_t) {_rate_flags, _rate_ops}
  224. #define M_CFG_GATE(_gate_ops, _gate_flags)\
  225. .gate = &(struct composite_clk_gcfg_t) { _gate_flags, _gate_ops}
  226. static struct clk_mux *_get_cmux(void __iomem *reg, u8 shift, u8 width,
  227. u32 flags, spinlock_t *lock)
  228. {
  229. struct clk_mux *mux;
  230. mux = kzalloc(sizeof(*mux), GFP_KERNEL);
  231. if (!mux)
  232. return ERR_PTR(-ENOMEM);
  233. mux->reg = reg;
  234. mux->shift = shift;
  235. mux->mask = (1 << width) - 1;
  236. mux->flags = flags;
  237. mux->lock = lock;
  238. return mux;
  239. }
  240. static struct clk_divider *_get_cdiv(void __iomem *reg, u8 shift, u8 width,
  241. u32 flags, spinlock_t *lock)
  242. {
  243. struct clk_divider *div;
  244. div = kzalloc(sizeof(*div), GFP_KERNEL);
  245. if (!div)
  246. return ERR_PTR(-ENOMEM);
  247. div->reg = reg;
  248. div->shift = shift;
  249. div->width = width;
  250. div->flags = flags;
  251. div->lock = lock;
  252. return div;
  253. }
  254. static struct clk_gate *_get_cgate(void __iomem *reg, u8 bit_idx, u32 flags,
  255. spinlock_t *lock)
  256. {
  257. struct clk_gate *gate;
  258. gate = kzalloc(sizeof(*gate), GFP_KERNEL);
  259. if (!gate)
  260. return ERR_PTR(-ENOMEM);
  261. gate->reg = reg;
  262. gate->bit_idx = bit_idx;
  263. gate->flags = flags;
  264. gate->lock = lock;
  265. return gate;
  266. }
  267. struct composite_cfg {
  268. struct clk_hw *mux_hw;
  269. struct clk_hw *div_hw;
  270. struct clk_hw *gate_hw;
  271. const struct clk_ops *mux_ops;
  272. const struct clk_ops *div_ops;
  273. const struct clk_ops *gate_ops;
  274. };
  275. static void get_cfg_composite_div(const struct composite_clk_gcfg *gcfg,
  276. const struct composite_clk_cfg *cfg,
  277. struct composite_cfg *composite, spinlock_t *lock)
  278. {
  279. struct clk_mux *mux = NULL;
  280. struct clk_divider *div = NULL;
  281. struct clk_gate *gate = NULL;
  282. const struct clk_ops *mux_ops, *div_ops, *gate_ops;
  283. struct clk_hw *mux_hw;
  284. struct clk_hw *div_hw;
  285. struct clk_hw *gate_hw;
  286. mux_ops = div_ops = gate_ops = NULL;
  287. mux_hw = div_hw = gate_hw = NULL;
  288. if (gcfg->mux && cfg->mux) {
  289. mux = _get_cmux(base + cfg->mux->offset,
  290. cfg->mux->shift,
  291. cfg->mux->width,
  292. gcfg->mux->flags, lock);
  293. if (!IS_ERR(mux)) {
  294. mux_hw = &mux->hw;
  295. mux_ops = gcfg->mux->ops ?
  296. gcfg->mux->ops : &clk_mux_ops;
  297. }
  298. }
  299. if (gcfg->div && cfg->div) {
  300. div = _get_cdiv(base + cfg->div->offset,
  301. cfg->div->shift,
  302. cfg->div->width,
  303. gcfg->div->flags, lock);
  304. if (!IS_ERR(div)) {
  305. div_hw = &div->hw;
  306. div_ops = gcfg->div->ops ?
  307. gcfg->div->ops : &clk_divider_ops;
  308. }
  309. }
  310. if (gcfg->gate && cfg->gate) {
  311. gate = _get_cgate(base + cfg->gate->offset,
  312. cfg->gate->bit_idx,
  313. gcfg->gate->flags, lock);
  314. if (!IS_ERR(gate)) {
  315. gate_hw = &gate->hw;
  316. gate_ops = gcfg->gate->ops ?
  317. gcfg->gate->ops : &clk_gate_ops;
  318. }
  319. }
  320. composite->mux_hw = mux_hw;
  321. composite->mux_ops = mux_ops;
  322. composite->div_hw = div_hw;
  323. composite->div_ops = div_ops;
  324. composite->gate_hw = gate_hw;
  325. composite->gate_ops = gate_ops;
  326. }
  327. /* Kernel Timer */
  328. struct timer_ker {
  329. u8 dppre_shift;
  330. struct clk_hw hw;
  331. spinlock_t *lock;
  332. };
  333. #define to_timer_ker(_hw) container_of(_hw, struct timer_ker, hw)
  334. static unsigned long timer_ker_recalc_rate(struct clk_hw *hw,
  335. unsigned long parent_rate)
  336. {
  337. struct timer_ker *clk_elem = to_timer_ker(hw);
  338. u32 timpre;
  339. u32 dppre_shift = clk_elem->dppre_shift;
  340. u32 prescaler;
  341. u32 mul;
  342. timpre = (readl(base + RCC_CFGR) >> 15) & 0x01;
  343. prescaler = (readl(base + RCC_D2CFGR) >> dppre_shift) & 0x03;
  344. mul = 2;
  345. if (prescaler < 4)
  346. mul = 1;
  347. else if (timpre && prescaler > 4)
  348. mul = 4;
  349. return parent_rate * mul;
  350. }
  351. static const struct clk_ops timer_ker_ops = {
  352. .recalc_rate = timer_ker_recalc_rate,
  353. };
  354. static struct clk_hw *clk_register_stm32_timer_ker(struct device *dev,
  355. const char *name, const char *parent_name,
  356. unsigned long flags,
  357. u8 dppre_shift,
  358. spinlock_t *lock)
  359. {
  360. struct timer_ker *element;
  361. struct clk_init_data init;
  362. struct clk_hw *hw;
  363. int err;
  364. element = kzalloc(sizeof(*element), GFP_KERNEL);
  365. if (!element)
  366. return ERR_PTR(-ENOMEM);
  367. init.name = name;
  368. init.ops = &timer_ker_ops;
  369. init.flags = flags;
  370. init.parent_names = &parent_name;
  371. init.num_parents = 1;
  372. element->hw.init = &init;
  373. element->lock = lock;
  374. element->dppre_shift = dppre_shift;
  375. hw = &element->hw;
  376. err = clk_hw_register(dev, hw);
  377. if (err) {
  378. kfree(element);
  379. return ERR_PTR(err);
  380. }
  381. return hw;
  382. }
  383. static const struct clk_div_table d1cpre_div_table[] = {
  384. { 0, 1 }, { 1, 1 }, { 2, 1 }, { 3, 1},
  385. { 4, 1 }, { 5, 1 }, { 6, 1 }, { 7, 1},
  386. { 8, 2 }, { 9, 4 }, { 10, 8 }, { 11, 16 },
  387. { 12, 64 }, { 13, 128 }, { 14, 256 },
  388. { 15, 512 },
  389. { 0 },
  390. };
  391. static const struct clk_div_table ppre_div_table[] = {
  392. { 0, 1 }, { 1, 1 }, { 2, 1 }, { 3, 1},
  393. { 4, 2 }, { 5, 4 }, { 6, 8 }, { 7, 16 },
  394. { 0 },
  395. };
  396. static void register_core_and_bus_clocks(void)
  397. {
  398. /* CORE AND BUS */
  399. hws[SYS_D1CPRE] = clk_hw_register_divider_table(NULL, "d1cpre",
  400. "sys_ck", CLK_IGNORE_UNUSED, base + RCC_D1CFGR, 8, 4, 0,
  401. d1cpre_div_table, &stm32rcc_lock);
  402. hws[HCLK] = clk_hw_register_divider_table(NULL, "hclk", "d1cpre",
  403. CLK_IGNORE_UNUSED, base + RCC_D1CFGR, 0, 4, 0,
  404. d1cpre_div_table, &stm32rcc_lock);
  405. /* D1 DOMAIN */
  406. /* * CPU Systick */
  407. hws[CPU_SYSTICK] = clk_hw_register_fixed_factor(NULL, "systick",
  408. "d1cpre", 0, 1, 8);
  409. /* * APB3 peripheral */
  410. hws[PCLK3] = clk_hw_register_divider_table(NULL, "pclk3", "hclk", 0,
  411. base + RCC_D1CFGR, 4, 3, 0,
  412. ppre_div_table, &stm32rcc_lock);
  413. /* D2 DOMAIN */
  414. /* * APB1 peripheral */
  415. hws[PCLK1] = clk_hw_register_divider_table(NULL, "pclk1", "hclk", 0,
  416. base + RCC_D2CFGR, 4, 3, 0,
  417. ppre_div_table, &stm32rcc_lock);
  418. /* Timers prescaler clocks */
  419. clk_register_stm32_timer_ker(NULL, "tim1_ker", "pclk1", 0,
  420. 4, &stm32rcc_lock);
  421. /* * APB2 peripheral */
  422. hws[PCLK2] = clk_hw_register_divider_table(NULL, "pclk2", "hclk", 0,
  423. base + RCC_D2CFGR, 8, 3, 0, ppre_div_table,
  424. &stm32rcc_lock);
  425. clk_register_stm32_timer_ker(NULL, "tim2_ker", "pclk2", 0, 8,
  426. &stm32rcc_lock);
  427. /* D3 DOMAIN */
  428. /* * APB4 peripheral */
  429. hws[PCLK4] = clk_hw_register_divider_table(NULL, "pclk4", "hclk", 0,
  430. base + RCC_D3CFGR, 4, 3, 0,
  431. ppre_div_table, &stm32rcc_lock);
  432. }
  433. /* MUX clock configuration */
  434. struct stm32_mux_clk {
  435. const char *name;
  436. const char * const *parents;
  437. u8 num_parents;
  438. u32 offset;
  439. u8 shift;
  440. u8 width;
  441. u32 flags;
  442. };
  443. #define M_MCLOCF(_name, _parents, _mux_offset, _mux_shift, _mux_width, _flags)\
  444. {\
  445. .name = _name,\
  446. .parents = _parents,\
  447. .num_parents = ARRAY_SIZE(_parents),\
  448. .offset = _mux_offset,\
  449. .shift = _mux_shift,\
  450. .width = _mux_width,\
  451. .flags = _flags,\
  452. }
  453. #define M_MCLOC(_name, _parents, _mux_offset, _mux_shift, _mux_width)\
  454. M_MCLOCF(_name, _parents, _mux_offset, _mux_shift, _mux_width, 0)\
  455. static const struct stm32_mux_clk stm32_mclk[] __initconst = {
  456. M_MCLOC("per_ck", per_src, RCC_D1CCIPR, 28, 3),
  457. M_MCLOC("pllsrc", pll_src, RCC_PLLCKSELR, 0, 3),
  458. M_MCLOC("sys_ck", sys_src, RCC_CFGR, 0, 3),
  459. M_MCLOC("tracein_ck", tracein_src, RCC_CFGR, 0, 3),
  460. };
  461. /* Oscillary clock configuration */
  462. struct stm32_osc_clk {
  463. const char *name;
  464. const char *parent;
  465. u32 gate_offset;
  466. u8 bit_idx;
  467. u8 bit_rdy;
  468. u32 flags;
  469. };
  470. #define OSC_CLKF(_name, _parent, _gate_offset, _bit_idx, _bit_rdy, _flags)\
  471. {\
  472. .name = _name,\
  473. .parent = _parent,\
  474. .gate_offset = _gate_offset,\
  475. .bit_idx = _bit_idx,\
  476. .bit_rdy = _bit_rdy,\
  477. .flags = _flags,\
  478. }
  479. #define OSC_CLK(_name, _parent, _gate_offset, _bit_idx, _bit_rdy)\
  480. OSC_CLKF(_name, _parent, _gate_offset, _bit_idx, _bit_rdy, 0)
  481. static const struct stm32_osc_clk stm32_oclk[] __initconst = {
  482. OSC_CLKF("hsi_ck", "hsidiv", RCC_CR, 0, 2, CLK_IGNORE_UNUSED),
  483. OSC_CLKF("hsi_ker", "hsidiv", RCC_CR, 1, 2, CLK_IGNORE_UNUSED),
  484. OSC_CLKF("csi_ck", "clk-csi", RCC_CR, 7, 8, CLK_IGNORE_UNUSED),
  485. OSC_CLKF("csi_ker", "clk-csi", RCC_CR, 9, 8, CLK_IGNORE_UNUSED),
  486. OSC_CLKF("rc48_ck", "clk-rc48", RCC_CR, 12, 13, CLK_IGNORE_UNUSED),
  487. OSC_CLKF("lsi_ck", "clk-lsi", RCC_CSR, 0, 1, CLK_IGNORE_UNUSED),
  488. };
  489. /* PLL configuration */
  490. struct st32h7_pll_cfg {
  491. u8 bit_idx;
  492. u32 offset_divr;
  493. u8 bit_frac_en;
  494. u32 offset_frac;
  495. u8 divm;
  496. };
  497. struct stm32_pll_data {
  498. const char *name;
  499. const char *parent_name;
  500. unsigned long flags;
  501. const struct st32h7_pll_cfg *cfg;
  502. };
  503. static const struct st32h7_pll_cfg stm32h7_pll1 = {
  504. .bit_idx = 24,
  505. .offset_divr = RCC_PLL1DIVR,
  506. .bit_frac_en = 0,
  507. .offset_frac = RCC_PLL1FRACR,
  508. .divm = 4,
  509. };
  510. static const struct st32h7_pll_cfg stm32h7_pll2 = {
  511. .bit_idx = 26,
  512. .offset_divr = RCC_PLL2DIVR,
  513. .bit_frac_en = 4,
  514. .offset_frac = RCC_PLL2FRACR,
  515. .divm = 12,
  516. };
  517. static const struct st32h7_pll_cfg stm32h7_pll3 = {
  518. .bit_idx = 28,
  519. .offset_divr = RCC_PLL3DIVR,
  520. .bit_frac_en = 8,
  521. .offset_frac = RCC_PLL3FRACR,
  522. .divm = 20,
  523. };
  524. static const struct stm32_pll_data stm32_pll[] = {
  525. { "vco1", "pllsrc", CLK_IGNORE_UNUSED, &stm32h7_pll1 },
  526. { "vco2", "pllsrc", 0, &stm32h7_pll2 },
  527. { "vco3", "pllsrc", 0, &stm32h7_pll3 },
  528. };
  529. struct stm32_fractional_divider {
  530. void __iomem *mreg;
  531. u8 mshift;
  532. u8 mwidth;
  533. u32 mmask;
  534. void __iomem *nreg;
  535. u8 nshift;
  536. u8 nwidth;
  537. void __iomem *freg_status;
  538. u8 freg_bit;
  539. void __iomem *freg_value;
  540. u8 fshift;
  541. u8 fwidth;
  542. u8 flags;
  543. struct clk_hw hw;
  544. spinlock_t *lock;
  545. };
  546. struct stm32_pll_obj {
  547. spinlock_t *lock;
  548. struct stm32_fractional_divider div;
  549. struct stm32_ready_gate rgate;
  550. struct clk_hw hw;
  551. };
  552. #define to_pll(_hw) container_of(_hw, struct stm32_pll_obj, hw)
  553. static int pll_is_enabled(struct clk_hw *hw)
  554. {
  555. struct stm32_pll_obj *clk_elem = to_pll(hw);
  556. struct clk_hw *_hw = &clk_elem->rgate.gate.hw;
  557. __clk_hw_set_clk(_hw, hw);
  558. return ready_gate_clk_ops.is_enabled(_hw);
  559. }
  560. static int pll_enable(struct clk_hw *hw)
  561. {
  562. struct stm32_pll_obj *clk_elem = to_pll(hw);
  563. struct clk_hw *_hw = &clk_elem->rgate.gate.hw;
  564. __clk_hw_set_clk(_hw, hw);
  565. return ready_gate_clk_ops.enable(_hw);
  566. }
  567. static void pll_disable(struct clk_hw *hw)
  568. {
  569. struct stm32_pll_obj *clk_elem = to_pll(hw);
  570. struct clk_hw *_hw = &clk_elem->rgate.gate.hw;
  571. __clk_hw_set_clk(_hw, hw);
  572. ready_gate_clk_ops.disable(_hw);
  573. }
  574. static int pll_frac_is_enabled(struct clk_hw *hw)
  575. {
  576. struct stm32_pll_obj *clk_elem = to_pll(hw);
  577. struct stm32_fractional_divider *fd = &clk_elem->div;
  578. return (readl(fd->freg_status) >> fd->freg_bit) & 0x01;
  579. }
  580. static unsigned long pll_read_frac(struct clk_hw *hw)
  581. {
  582. struct stm32_pll_obj *clk_elem = to_pll(hw);
  583. struct stm32_fractional_divider *fd = &clk_elem->div;
  584. return (readl(fd->freg_value) >> fd->fshift) &
  585. GENMASK(fd->fwidth - 1, 0);
  586. }
  587. static unsigned long pll_fd_recalc_rate(struct clk_hw *hw,
  588. unsigned long parent_rate)
  589. {
  590. struct stm32_pll_obj *clk_elem = to_pll(hw);
  591. struct stm32_fractional_divider *fd = &clk_elem->div;
  592. unsigned long m, n;
  593. u32 val, mask;
  594. u64 rate, rate1 = 0;
  595. val = readl(fd->mreg);
  596. mask = GENMASK(fd->mwidth - 1, 0) << fd->mshift;
  597. m = (val & mask) >> fd->mshift;
  598. val = readl(fd->nreg);
  599. mask = GENMASK(fd->nwidth - 1, 0) << fd->nshift;
  600. n = ((val & mask) >> fd->nshift) + 1;
  601. if (!n || !m)
  602. return parent_rate;
  603. rate = (u64)parent_rate * n;
  604. do_div(rate, m);
  605. if (pll_frac_is_enabled(hw)) {
  606. val = pll_read_frac(hw);
  607. rate1 = (u64)parent_rate * (u64)val;
  608. do_div(rate1, (m * 8191));
  609. }
  610. return rate + rate1;
  611. }
  612. static const struct clk_ops pll_ops = {
  613. .enable = pll_enable,
  614. .disable = pll_disable,
  615. .is_enabled = pll_is_enabled,
  616. .recalc_rate = pll_fd_recalc_rate,
  617. };
  618. static struct clk_hw *clk_register_stm32_pll(struct device *dev,
  619. const char *name,
  620. const char *parent,
  621. unsigned long flags,
  622. const struct st32h7_pll_cfg *cfg,
  623. spinlock_t *lock)
  624. {
  625. struct stm32_pll_obj *pll;
  626. struct clk_init_data init = { NULL };
  627. struct clk_hw *hw;
  628. int ret;
  629. struct stm32_fractional_divider *div = NULL;
  630. struct stm32_ready_gate *rgate;
  631. pll = kzalloc(sizeof(*pll), GFP_KERNEL);
  632. if (!pll)
  633. return ERR_PTR(-ENOMEM);
  634. init.name = name;
  635. init.ops = &pll_ops;
  636. init.flags = flags;
  637. init.parent_names = &parent;
  638. init.num_parents = 1;
  639. pll->hw.init = &init;
  640. hw = &pll->hw;
  641. rgate = &pll->rgate;
  642. rgate->bit_rdy = cfg->bit_idx + 1;
  643. rgate->gate.lock = lock;
  644. rgate->gate.reg = base + RCC_CR;
  645. rgate->gate.bit_idx = cfg->bit_idx;
  646. div = &pll->div;
  647. div->flags = 0;
  648. div->mreg = base + RCC_PLLCKSELR;
  649. div->mshift = cfg->divm;
  650. div->mwidth = 6;
  651. div->nreg = base + cfg->offset_divr;
  652. div->nshift = 0;
  653. div->nwidth = 9;
  654. div->freg_status = base + RCC_PLLCFGR;
  655. div->freg_bit = cfg->bit_frac_en;
  656. div->freg_value = base + cfg->offset_frac;
  657. div->fshift = 3;
  658. div->fwidth = 13;
  659. div->lock = lock;
  660. ret = clk_hw_register(dev, hw);
  661. if (ret) {
  662. kfree(pll);
  663. hw = ERR_PTR(ret);
  664. }
  665. return hw;
  666. }
  667. /* ODF CLOCKS */
  668. static unsigned long odf_divider_recalc_rate(struct clk_hw *hw,
  669. unsigned long parent_rate)
  670. {
  671. return clk_divider_ops.recalc_rate(hw, parent_rate);
  672. }
  673. static long odf_divider_round_rate(struct clk_hw *hw, unsigned long rate,
  674. unsigned long *prate)
  675. {
  676. return clk_divider_ops.round_rate(hw, rate, prate);
  677. }
  678. static int odf_divider_set_rate(struct clk_hw *hw, unsigned long rate,
  679. unsigned long parent_rate)
  680. {
  681. struct clk_hw *hwp;
  682. int pll_status;
  683. int ret;
  684. hwp = clk_hw_get_parent(hw);
  685. pll_status = pll_is_enabled(hwp);
  686. if (pll_status)
  687. pll_disable(hwp);
  688. ret = clk_divider_ops.set_rate(hw, rate, parent_rate);
  689. if (pll_status)
  690. pll_enable(hwp);
  691. return ret;
  692. }
  693. static const struct clk_ops odf_divider_ops = {
  694. .recalc_rate = odf_divider_recalc_rate,
  695. .round_rate = odf_divider_round_rate,
  696. .set_rate = odf_divider_set_rate,
  697. };
  698. static int odf_gate_enable(struct clk_hw *hw)
  699. {
  700. struct clk_hw *hwp;
  701. int pll_status;
  702. int ret;
  703. if (clk_gate_ops.is_enabled(hw))
  704. return 0;
  705. hwp = clk_hw_get_parent(hw);
  706. pll_status = pll_is_enabled(hwp);
  707. if (pll_status)
  708. pll_disable(hwp);
  709. ret = clk_gate_ops.enable(hw);
  710. if (pll_status)
  711. pll_enable(hwp);
  712. return ret;
  713. }
  714. static void odf_gate_disable(struct clk_hw *hw)
  715. {
  716. struct clk_hw *hwp;
  717. int pll_status;
  718. if (!clk_gate_ops.is_enabled(hw))
  719. return;
  720. hwp = clk_hw_get_parent(hw);
  721. pll_status = pll_is_enabled(hwp);
  722. if (pll_status)
  723. pll_disable(hwp);
  724. clk_gate_ops.disable(hw);
  725. if (pll_status)
  726. pll_enable(hwp);
  727. }
  728. static const struct clk_ops odf_gate_ops = {
  729. .enable = odf_gate_enable,
  730. .disable = odf_gate_disable,
  731. .is_enabled = clk_gate_is_enabled,
  732. };
  733. static struct composite_clk_gcfg odf_clk_gcfg = {
  734. M_CFG_DIV(&odf_divider_ops, 0),
  735. M_CFG_GATE(&odf_gate_ops, 0),
  736. };
  737. #define M_ODF_F(_name, _parent, _gate_offset, _bit_idx, _rate_offset,\
  738. _rate_shift, _rate_width, _flags)\
  739. {\
  740. .mux = NULL,\
  741. .div = &(struct muxdiv_cfg) {_rate_offset, _rate_shift, _rate_width},\
  742. .gate = &(struct gate_cfg) {_gate_offset, _bit_idx },\
  743. .name = _name,\
  744. .parent_name = &(const char *) {_parent},\
  745. .num_parents = 1,\
  746. .flags = _flags,\
  747. }
  748. #define M_ODF(_name, _parent, _gate_offset, _bit_idx, _rate_offset,\
  749. _rate_shift, _rate_width)\
  750. M_ODF_F(_name, _parent, _gate_offset, _bit_idx, _rate_offset,\
  751. _rate_shift, _rate_width, 0)\
  752. static const struct composite_clk_cfg stm32_odf[3][3] = {
  753. {
  754. M_ODF_F("pll1_p", "vco1", RCC_PLLCFGR, 16, RCC_PLL1DIVR, 9, 7,
  755. CLK_IGNORE_UNUSED),
  756. M_ODF_F("pll1_q", "vco1", RCC_PLLCFGR, 17, RCC_PLL1DIVR, 16, 7,
  757. CLK_IGNORE_UNUSED),
  758. M_ODF_F("pll1_r", "vco1", RCC_PLLCFGR, 18, RCC_PLL1DIVR, 24, 7,
  759. CLK_IGNORE_UNUSED),
  760. },
  761. {
  762. M_ODF("pll2_p", "vco2", RCC_PLLCFGR, 19, RCC_PLL2DIVR, 9, 7),
  763. M_ODF("pll2_q", "vco2", RCC_PLLCFGR, 20, RCC_PLL2DIVR, 16, 7),
  764. M_ODF("pll2_r", "vco2", RCC_PLLCFGR, 21, RCC_PLL2DIVR, 24, 7),
  765. },
  766. {
  767. M_ODF("pll3_p", "vco3", RCC_PLLCFGR, 22, RCC_PLL3DIVR, 9, 7),
  768. M_ODF("pll3_q", "vco3", RCC_PLLCFGR, 23, RCC_PLL3DIVR, 16, 7),
  769. M_ODF("pll3_r", "vco3", RCC_PLLCFGR, 24, RCC_PLL3DIVR, 24, 7),
  770. }
  771. };
  772. /* PERIF CLOCKS */
  773. struct pclk_t {
  774. u32 gate_offset;
  775. u8 bit_idx;
  776. const char *name;
  777. const char *parent;
  778. u32 flags;
  779. };
  780. #define PER_CLKF(_gate_offset, _bit_idx, _name, _parent, _flags)\
  781. {\
  782. .gate_offset = _gate_offset,\
  783. .bit_idx = _bit_idx,\
  784. .name = _name,\
  785. .parent = _parent,\
  786. .flags = _flags,\
  787. }
  788. #define PER_CLK(_gate_offset, _bit_idx, _name, _parent)\
  789. PER_CLKF(_gate_offset, _bit_idx, _name, _parent, 0)
  790. static const struct pclk_t pclk[] = {
  791. PER_CLK(RCC_AHB3ENR, 31, "d1sram1", "hclk"),
  792. PER_CLK(RCC_AHB3ENR, 30, "itcm", "hclk"),
  793. PER_CLK(RCC_AHB3ENR, 29, "dtcm2", "hclk"),
  794. PER_CLK(RCC_AHB3ENR, 28, "dtcm1", "hclk"),
  795. PER_CLK(RCC_AHB3ENR, 8, "flitf", "hclk"),
  796. PER_CLK(RCC_AHB3ENR, 5, "jpgdec", "hclk"),
  797. PER_CLK(RCC_AHB3ENR, 4, "dma2d", "hclk"),
  798. PER_CLK(RCC_AHB3ENR, 0, "mdma", "hclk"),
  799. PER_CLK(RCC_AHB1ENR, 28, "usb2ulpi", "hclk"),
  800. PER_CLK(RCC_AHB1ENR, 26, "usb1ulpi", "hclk"),
  801. PER_CLK(RCC_AHB1ENR, 17, "eth1rx", "hclk"),
  802. PER_CLK(RCC_AHB1ENR, 16, "eth1tx", "hclk"),
  803. PER_CLK(RCC_AHB1ENR, 15, "eth1mac", "hclk"),
  804. PER_CLK(RCC_AHB1ENR, 14, "art", "hclk"),
  805. PER_CLK(RCC_AHB1ENR, 1, "dma2", "hclk"),
  806. PER_CLK(RCC_AHB1ENR, 0, "dma1", "hclk"),
  807. PER_CLK(RCC_AHB2ENR, 31, "d2sram3", "hclk"),
  808. PER_CLK(RCC_AHB2ENR, 30, "d2sram2", "hclk"),
  809. PER_CLK(RCC_AHB2ENR, 29, "d2sram1", "hclk"),
  810. PER_CLK(RCC_AHB2ENR, 5, "hash", "hclk"),
  811. PER_CLK(RCC_AHB2ENR, 4, "crypt", "hclk"),
  812. PER_CLK(RCC_AHB2ENR, 0, "camitf", "hclk"),
  813. PER_CLK(RCC_AHB4ENR, 28, "bkpram", "hclk"),
  814. PER_CLK(RCC_AHB4ENR, 25, "hsem", "hclk"),
  815. PER_CLK(RCC_AHB4ENR, 21, "bdma", "hclk"),
  816. PER_CLK(RCC_AHB4ENR, 19, "crc", "hclk"),
  817. PER_CLK(RCC_AHB4ENR, 10, "gpiok", "hclk"),
  818. PER_CLK(RCC_AHB4ENR, 9, "gpioj", "hclk"),
  819. PER_CLK(RCC_AHB4ENR, 8, "gpioi", "hclk"),
  820. PER_CLK(RCC_AHB4ENR, 7, "gpioh", "hclk"),
  821. PER_CLK(RCC_AHB4ENR, 6, "gpiog", "hclk"),
  822. PER_CLK(RCC_AHB4ENR, 5, "gpiof", "hclk"),
  823. PER_CLK(RCC_AHB4ENR, 4, "gpioe", "hclk"),
  824. PER_CLK(RCC_AHB4ENR, 3, "gpiod", "hclk"),
  825. PER_CLK(RCC_AHB4ENR, 2, "gpioc", "hclk"),
  826. PER_CLK(RCC_AHB4ENR, 1, "gpiob", "hclk"),
  827. PER_CLK(RCC_AHB4ENR, 0, "gpioa", "hclk"),
  828. PER_CLK(RCC_APB3ENR, 6, "wwdg1", "pclk3"),
  829. PER_CLK(RCC_APB1LENR, 29, "dac12", "pclk1"),
  830. PER_CLK(RCC_APB1LENR, 11, "wwdg2", "pclk1"),
  831. PER_CLK(RCC_APB1LENR, 8, "tim14", "tim1_ker"),
  832. PER_CLK(RCC_APB1LENR, 7, "tim13", "tim1_ker"),
  833. PER_CLK(RCC_APB1LENR, 6, "tim12", "tim1_ker"),
  834. PER_CLK(RCC_APB1LENR, 5, "tim7", "tim1_ker"),
  835. PER_CLK(RCC_APB1LENR, 4, "tim6", "tim1_ker"),
  836. PER_CLK(RCC_APB1LENR, 3, "tim5", "tim1_ker"),
  837. PER_CLK(RCC_APB1LENR, 2, "tim4", "tim1_ker"),
  838. PER_CLK(RCC_APB1LENR, 1, "tim3", "tim1_ker"),
  839. PER_CLK(RCC_APB1LENR, 0, "tim2", "tim1_ker"),
  840. PER_CLK(RCC_APB1HENR, 5, "mdios", "pclk1"),
  841. PER_CLK(RCC_APB1HENR, 4, "opamp", "pclk1"),
  842. PER_CLK(RCC_APB1HENR, 1, "crs", "pclk1"),
  843. PER_CLK(RCC_APB2ENR, 18, "tim17", "tim2_ker"),
  844. PER_CLK(RCC_APB2ENR, 17, "tim16", "tim2_ker"),
  845. PER_CLK(RCC_APB2ENR, 16, "tim15", "tim2_ker"),
  846. PER_CLK(RCC_APB2ENR, 1, "tim8", "tim2_ker"),
  847. PER_CLK(RCC_APB2ENR, 0, "tim1", "tim2_ker"),
  848. PER_CLK(RCC_APB4ENR, 26, "tmpsens", "pclk4"),
  849. PER_CLK(RCC_APB4ENR, 16, "rtcapb", "pclk4"),
  850. PER_CLK(RCC_APB4ENR, 15, "vref", "pclk4"),
  851. PER_CLK(RCC_APB4ENR, 14, "comp12", "pclk4"),
  852. PER_CLK(RCC_APB4ENR, 1, "syscfg", "pclk4"),
  853. };
  854. /* KERNEL CLOCKS */
  855. #define KER_CLKF(_gate_offset, _bit_idx,\
  856. _mux_offset, _mux_shift, _mux_width,\
  857. _name, _parent_name,\
  858. _flags) \
  859. { \
  860. .gate = &(struct gate_cfg) {_gate_offset, _bit_idx},\
  861. .mux = &(struct muxdiv_cfg) {_mux_offset, _mux_shift, _mux_width },\
  862. .name = _name, \
  863. .parent_name = _parent_name, \
  864. .num_parents = ARRAY_SIZE(_parent_name),\
  865. .flags = _flags,\
  866. }
  867. #define KER_CLK(_gate_offset, _bit_idx, _mux_offset, _mux_shift, _mux_width,\
  868. _name, _parent_name) \
  869. KER_CLKF(_gate_offset, _bit_idx, _mux_offset, _mux_shift, _mux_width,\
  870. _name, _parent_name, 0)\
  871. #define KER_CLKF_NOMUX(_gate_offset, _bit_idx,\
  872. _name, _parent_name,\
  873. _flags) \
  874. { \
  875. .gate = &(struct gate_cfg) {_gate_offset, _bit_idx},\
  876. .mux = NULL,\
  877. .name = _name, \
  878. .parent_name = _parent_name, \
  879. .num_parents = 1,\
  880. .flags = _flags,\
  881. }
  882. static const struct composite_clk_cfg kclk[] = {
  883. KER_CLK(RCC_AHB3ENR, 16, RCC_D1CCIPR, 16, 1, "sdmmc1", sdmmc_src),
  884. KER_CLKF(RCC_AHB3ENR, 14, RCC_D1CCIPR, 4, 2, "quadspi", qspi_src,
  885. CLK_IGNORE_UNUSED),
  886. KER_CLKF(RCC_AHB3ENR, 12, RCC_D1CCIPR, 0, 2, "fmc", fmc_src,
  887. CLK_IGNORE_UNUSED),
  888. KER_CLK(RCC_AHB1ENR, 27, RCC_D2CCIP2R, 20, 2, "usb2otg", usbotg_src),
  889. KER_CLK(RCC_AHB1ENR, 25, RCC_D2CCIP2R, 20, 2, "usb1otg", usbotg_src),
  890. KER_CLK(RCC_AHB1ENR, 5, RCC_D3CCIPR, 16, 2, "adc12", adc_src),
  891. KER_CLK(RCC_AHB2ENR, 9, RCC_D1CCIPR, 16, 1, "sdmmc2", sdmmc_src),
  892. KER_CLK(RCC_AHB2ENR, 6, RCC_D2CCIP2R, 8, 2, "rng", rng_src),
  893. KER_CLK(RCC_AHB4ENR, 24, RCC_D3CCIPR, 16, 2, "adc3", adc_src),
  894. KER_CLKF(RCC_APB3ENR, 4, RCC_D1CCIPR, 8, 1, "dsi", dsi_src,
  895. CLK_SET_RATE_PARENT),
  896. KER_CLKF_NOMUX(RCC_APB3ENR, 3, "ltdc", ltdc_src, CLK_SET_RATE_PARENT),
  897. KER_CLK(RCC_APB1LENR, 31, RCC_D2CCIP2R, 0, 3, "usart8", usart_src2),
  898. KER_CLK(RCC_APB1LENR, 30, RCC_D2CCIP2R, 0, 3, "usart7", usart_src2),
  899. KER_CLK(RCC_APB1LENR, 27, RCC_D2CCIP2R, 22, 2, "hdmicec", cec_src),
  900. KER_CLK(RCC_APB1LENR, 23, RCC_D2CCIP2R, 12, 2, "i2c3", i2c_src1),
  901. KER_CLK(RCC_APB1LENR, 22, RCC_D2CCIP2R, 12, 2, "i2c2", i2c_src1),
  902. KER_CLK(RCC_APB1LENR, 21, RCC_D2CCIP2R, 12, 2, "i2c1", i2c_src1),
  903. KER_CLK(RCC_APB1LENR, 20, RCC_D2CCIP2R, 0, 3, "uart5", usart_src2),
  904. KER_CLK(RCC_APB1LENR, 19, RCC_D2CCIP2R, 0, 3, "uart4", usart_src2),
  905. KER_CLK(RCC_APB1LENR, 18, RCC_D2CCIP2R, 0, 3, "usart3", usart_src2),
  906. KER_CLK(RCC_APB1LENR, 17, RCC_D2CCIP2R, 0, 3, "usart2", usart_src2),
  907. KER_CLK(RCC_APB1LENR, 16, RCC_D2CCIP1R, 20, 2, "spdifrx", spdifrx_src),
  908. KER_CLK(RCC_APB1LENR, 15, RCC_D2CCIP1R, 16, 3, "spi3", spi_src1),
  909. KER_CLK(RCC_APB1LENR, 14, RCC_D2CCIP1R, 16, 3, "spi2", spi_src1),
  910. KER_CLK(RCC_APB1LENR, 9, RCC_D2CCIP2R, 28, 3, "lptim1", lptim_src1),
  911. KER_CLK(RCC_APB1HENR, 8, RCC_D2CCIP1R, 28, 2, "fdcan", fdcan_src),
  912. KER_CLK(RCC_APB1HENR, 2, RCC_D2CCIP1R, 31, 1, "swp", swp_src),
  913. KER_CLK(RCC_APB2ENR, 29, RCC_CFGR, 14, 1, "hrtim", hrtim_src),
  914. KER_CLK(RCC_APB2ENR, 28, RCC_D2CCIP1R, 24, 1, "dfsdm1", dfsdm1_src),
  915. KER_CLKF(RCC_APB2ENR, 24, RCC_D2CCIP1R, 6, 3, "sai3", sai_src,
  916. CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT),
  917. KER_CLKF(RCC_APB2ENR, 23, RCC_D2CCIP1R, 6, 3, "sai2", sai_src,
  918. CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT),
  919. KER_CLKF(RCC_APB2ENR, 22, RCC_D2CCIP1R, 0, 3, "sai1", sai_src,
  920. CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT),
  921. KER_CLK(RCC_APB2ENR, 20, RCC_D2CCIP1R, 16, 3, "spi5", spi_src2),
  922. KER_CLK(RCC_APB2ENR, 13, RCC_D2CCIP1R, 16, 3, "spi4", spi_src2),
  923. KER_CLK(RCC_APB2ENR, 12, RCC_D2CCIP1R, 16, 3, "spi1", spi_src1),
  924. KER_CLK(RCC_APB2ENR, 5, RCC_D2CCIP2R, 3, 3, "usart6", usart_src1),
  925. KER_CLK(RCC_APB2ENR, 4, RCC_D2CCIP2R, 3, 3, "usart1", usart_src1),
  926. KER_CLK(RCC_APB4ENR, 21, RCC_D3CCIPR, 24, 3, "sai4b", sai_src),
  927. KER_CLK(RCC_APB4ENR, 21, RCC_D3CCIPR, 21, 3, "sai4a", sai_src),
  928. KER_CLK(RCC_APB4ENR, 12, RCC_D3CCIPR, 13, 3, "lptim5", lptim_src2),
  929. KER_CLK(RCC_APB4ENR, 11, RCC_D3CCIPR, 13, 3, "lptim4", lptim_src2),
  930. KER_CLK(RCC_APB4ENR, 10, RCC_D3CCIPR, 13, 3, "lptim3", lptim_src2),
  931. KER_CLK(RCC_APB4ENR, 9, RCC_D3CCIPR, 10, 3, "lptim2", lptim_src2),
  932. KER_CLK(RCC_APB4ENR, 7, RCC_D3CCIPR, 8, 2, "i2c4", i2c_src2),
  933. KER_CLK(RCC_APB4ENR, 5, RCC_D3CCIPR, 28, 3, "spi6", spi_src3),
  934. KER_CLK(RCC_APB4ENR, 3, RCC_D3CCIPR, 0, 3, "lpuart1", lpuart1_src),
  935. };
  936. static struct composite_clk_gcfg kernel_clk_cfg = {
  937. M_CFG_MUX(NULL, 0),
  938. M_CFG_GATE(NULL, 0),
  939. };
  940. /* RTC clock */
  941. /*
  942. * RTC & LSE registers are protected against parasitic write access.
  943. * PWR_CR_DBP bit must be set to enable write access to RTC registers.
  944. */
  945. /* STM32_PWR_CR */
  946. #define PWR_CR 0x00
  947. /* STM32_PWR_CR bit field */
  948. #define PWR_CR_DBP BIT(8)
  949. static struct composite_clk_gcfg rtc_clk_cfg = {
  950. M_CFG_MUX(NULL, 0),
  951. M_CFG_GATE(NULL, 0),
  952. };
  953. static const struct composite_clk_cfg rtc_clk =
  954. KER_CLK(RCC_BDCR, 15, RCC_BDCR, 8, 2, "rtc_ck", rtc_src);
  955. /* Micro-controller output clock */
  956. static struct composite_clk_gcfg mco_clk_cfg = {
  957. M_CFG_MUX(NULL, 0),
  958. M_CFG_DIV(NULL, CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO),
  959. };
  960. #define M_MCO_F(_name, _parents, _mux_offset, _mux_shift, _mux_width,\
  961. _rate_offset, _rate_shift, _rate_width,\
  962. _flags)\
  963. {\
  964. .mux = &(struct muxdiv_cfg) {_mux_offset, _mux_shift, _mux_width },\
  965. .div = &(struct muxdiv_cfg) {_rate_offset, _rate_shift, _rate_width},\
  966. .gate = NULL,\
  967. .name = _name,\
  968. .parent_name = _parents,\
  969. .num_parents = ARRAY_SIZE(_parents),\
  970. .flags = _flags,\
  971. }
  972. static const struct composite_clk_cfg mco_clk[] = {
  973. M_MCO_F("mco1", mco_src1, RCC_CFGR, 22, 4, RCC_CFGR, 18, 4, 0),
  974. M_MCO_F("mco2", mco_src2, RCC_CFGR, 29, 3, RCC_CFGR, 25, 4, 0),
  975. };
  976. static void __init stm32h7_rcc_init(struct device_node *np)
  977. {
  978. struct clk_hw_onecell_data *clk_data;
  979. struct composite_cfg c_cfg;
  980. int n;
  981. const char *hse_clk, *lse_clk, *i2s_clk;
  982. struct regmap *pdrm;
  983. clk_data = kzalloc(sizeof(*clk_data) +
  984. sizeof(*clk_data->hws) * STM32H7_MAX_CLKS,
  985. GFP_KERNEL);
  986. if (!clk_data)
  987. return;
  988. clk_data->num = STM32H7_MAX_CLKS;
  989. hws = clk_data->hws;
  990. for (n = 0; n < STM32H7_MAX_CLKS; n++)
  991. hws[n] = ERR_PTR(-ENOENT);
  992. /* get RCC base @ from DT */
  993. base = of_iomap(np, 0);
  994. if (!base) {
  995. pr_err("%s: unable to map resource", np->name);
  996. goto err_free_clks;
  997. }
  998. pdrm = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
  999. if (IS_ERR(pdrm))
  1000. pr_warn("%s: Unable to get syscfg\n", __func__);
  1001. else
  1002. /* In any case disable backup domain write protection
  1003. * and will never be enabled.
  1004. * Needed by LSE & RTC clocks.
  1005. */
  1006. regmap_update_bits(pdrm, PWR_CR, PWR_CR_DBP, PWR_CR_DBP);
  1007. /* Put parent names from DT */
  1008. hse_clk = of_clk_get_parent_name(np, 0);
  1009. lse_clk = of_clk_get_parent_name(np, 1);
  1010. i2s_clk = of_clk_get_parent_name(np, 2);
  1011. sai_src[3] = i2s_clk;
  1012. spi_src1[3] = i2s_clk;
  1013. /* Register Internal oscillators */
  1014. clk_hw_register_fixed_rate(NULL, "clk-hsi", NULL, 0, 64000000);
  1015. clk_hw_register_fixed_rate(NULL, "clk-csi", NULL, 0, 4000000);
  1016. clk_hw_register_fixed_rate(NULL, "clk-lsi", NULL, 0, 32000);
  1017. clk_hw_register_fixed_rate(NULL, "clk-rc48", NULL, 0, 48000);
  1018. /* This clock is coming from outside. Frequencies unknown */
  1019. hws[CK_DSI_PHY] = clk_hw_register_fixed_rate(NULL, "ck_dsi_phy", NULL,
  1020. 0, 0);
  1021. hws[HSI_DIV] = clk_hw_register_divider(NULL, "hsidiv", "clk-hsi", 0,
  1022. base + RCC_CR, 3, 2, CLK_DIVIDER_POWER_OF_TWO,
  1023. &stm32rcc_lock);
  1024. hws[HSE_1M] = clk_hw_register_divider(NULL, "hse_1M", "hse_ck", 0,
  1025. base + RCC_CFGR, 8, 6, CLK_DIVIDER_ONE_BASED |
  1026. CLK_DIVIDER_ALLOW_ZERO,
  1027. &stm32rcc_lock);
  1028. /* Mux system clocks */
  1029. for (n = 0; n < ARRAY_SIZE(stm32_mclk); n++)
  1030. hws[MCLK_BANK + n] = clk_hw_register_mux(NULL,
  1031. stm32_mclk[n].name,
  1032. stm32_mclk[n].parents,
  1033. stm32_mclk[n].num_parents,
  1034. stm32_mclk[n].flags,
  1035. stm32_mclk[n].offset + base,
  1036. stm32_mclk[n].shift,
  1037. stm32_mclk[n].width,
  1038. 0,
  1039. &stm32rcc_lock);
  1040. register_core_and_bus_clocks();
  1041. /* Oscillary clocks */
  1042. for (n = 0; n < ARRAY_SIZE(stm32_oclk); n++)
  1043. hws[OSC_BANK + n] = clk_register_ready_gate(NULL,
  1044. stm32_oclk[n].name,
  1045. stm32_oclk[n].parent,
  1046. stm32_oclk[n].gate_offset + base,
  1047. stm32_oclk[n].bit_idx,
  1048. stm32_oclk[n].bit_rdy,
  1049. stm32_oclk[n].flags,
  1050. &stm32rcc_lock);
  1051. hws[HSE_CK] = clk_register_ready_gate(NULL,
  1052. "hse_ck",
  1053. hse_clk,
  1054. RCC_CR + base,
  1055. 16, 17,
  1056. 0,
  1057. &stm32rcc_lock);
  1058. hws[LSE_CK] = clk_register_ready_gate(NULL,
  1059. "lse_ck",
  1060. lse_clk,
  1061. RCC_BDCR + base,
  1062. 0, 1,
  1063. 0,
  1064. &stm32rcc_lock);
  1065. hws[CSI_KER_DIV122 + n] = clk_hw_register_fixed_factor(NULL,
  1066. "csi_ker_div122", "csi_ker", 0, 1, 122);
  1067. /* PLLs */
  1068. for (n = 0; n < ARRAY_SIZE(stm32_pll); n++) {
  1069. int odf;
  1070. /* Register the VCO */
  1071. clk_register_stm32_pll(NULL, stm32_pll[n].name,
  1072. stm32_pll[n].parent_name, stm32_pll[n].flags,
  1073. stm32_pll[n].cfg,
  1074. &stm32rcc_lock);
  1075. /* Register the 3 output dividers */
  1076. for (odf = 0; odf < 3; odf++) {
  1077. int idx = n * 3 + odf;
  1078. get_cfg_composite_div(&odf_clk_gcfg, &stm32_odf[n][odf],
  1079. &c_cfg, &stm32rcc_lock);
  1080. hws[ODF_BANK + idx] = clk_hw_register_composite(NULL,
  1081. stm32_odf[n][odf].name,
  1082. stm32_odf[n][odf].parent_name,
  1083. stm32_odf[n][odf].num_parents,
  1084. c_cfg.mux_hw, c_cfg.mux_ops,
  1085. c_cfg.div_hw, c_cfg.div_ops,
  1086. c_cfg.gate_hw, c_cfg.gate_ops,
  1087. stm32_odf[n][odf].flags);
  1088. }
  1089. }
  1090. /* Peripheral clocks */
  1091. for (n = 0; n < ARRAY_SIZE(pclk); n++)
  1092. hws[PERIF_BANK + n] = clk_hw_register_gate(NULL, pclk[n].name,
  1093. pclk[n].parent,
  1094. pclk[n].flags, base + pclk[n].gate_offset,
  1095. pclk[n].bit_idx, pclk[n].flags, &stm32rcc_lock);
  1096. /* Kernel clocks */
  1097. for (n = 0; n < ARRAY_SIZE(kclk); n++) {
  1098. get_cfg_composite_div(&kernel_clk_cfg, &kclk[n], &c_cfg,
  1099. &stm32rcc_lock);
  1100. hws[KERN_BANK + n] = clk_hw_register_composite(NULL,
  1101. kclk[n].name,
  1102. kclk[n].parent_name,
  1103. kclk[n].num_parents,
  1104. c_cfg.mux_hw, c_cfg.mux_ops,
  1105. c_cfg.div_hw, c_cfg.div_ops,
  1106. c_cfg.gate_hw, c_cfg.gate_ops,
  1107. kclk[n].flags);
  1108. }
  1109. /* RTC clock (default state is off) */
  1110. clk_hw_register_fixed_rate(NULL, "off", NULL, 0, 0);
  1111. get_cfg_composite_div(&rtc_clk_cfg, &rtc_clk, &c_cfg, &stm32rcc_lock);
  1112. hws[RTC_CK] = clk_hw_register_composite(NULL,
  1113. rtc_clk.name,
  1114. rtc_clk.parent_name,
  1115. rtc_clk.num_parents,
  1116. c_cfg.mux_hw, c_cfg.mux_ops,
  1117. c_cfg.div_hw, c_cfg.div_ops,
  1118. c_cfg.gate_hw, c_cfg.gate_ops,
  1119. rtc_clk.flags);
  1120. /* Micro-controller clocks */
  1121. for (n = 0; n < ARRAY_SIZE(mco_clk); n++) {
  1122. get_cfg_composite_div(&mco_clk_cfg, &mco_clk[n], &c_cfg,
  1123. &stm32rcc_lock);
  1124. hws[MCO_BANK + n] = clk_hw_register_composite(NULL,
  1125. mco_clk[n].name,
  1126. mco_clk[n].parent_name,
  1127. mco_clk[n].num_parents,
  1128. c_cfg.mux_hw, c_cfg.mux_ops,
  1129. c_cfg.div_hw, c_cfg.div_ops,
  1130. c_cfg.gate_hw, c_cfg.gate_ops,
  1131. mco_clk[n].flags);
  1132. }
  1133. of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
  1134. return;
  1135. err_free_clks:
  1136. kfree(clk_data);
  1137. }
  1138. /* The RCC node is a clock and reset controller, and these
  1139. * functionalities are supported by different drivers that
  1140. * matches the same compatible strings.
  1141. */
  1142. CLK_OF_DECLARE_DRIVER(stm32h7_rcc, "st,stm32h743-rcc", stm32h7_rcc_init);