cgu.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787
  1. /*
  2. * Ingenic SoC CGU driver
  3. *
  4. * Copyright (c) 2013-2015 Imagination Technologies
  5. * Author: Paul Burton <paul.burton@mips.com>
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License as
  9. * published by the Free Software Foundation; either version 2 of
  10. * the License, or (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. */
  17. #include <linux/bitops.h>
  18. #include <linux/clk.h>
  19. #include <linux/clk-provider.h>
  20. #include <linux/clkdev.h>
  21. #include <linux/delay.h>
  22. #include <linux/math64.h>
  23. #include <linux/of.h>
  24. #include <linux/of_address.h>
  25. #include <linux/slab.h>
  26. #include <linux/spinlock.h>
  27. #include "cgu.h"
  28. #define MHZ (1000 * 1000)
  29. /**
  30. * ingenic_cgu_gate_get() - get the value of clock gate register bit
  31. * @cgu: reference to the CGU whose registers should be read
  32. * @info: info struct describing the gate bit
  33. *
  34. * Retrieves the state of the clock gate bit described by info. The
  35. * caller must hold cgu->lock.
  36. *
  37. * Return: true if the gate bit is set, else false.
  38. */
  39. static inline bool
  40. ingenic_cgu_gate_get(struct ingenic_cgu *cgu,
  41. const struct ingenic_cgu_gate_info *info)
  42. {
  43. return !!(readl(cgu->base + info->reg) & BIT(info->bit))
  44. ^ info->clear_to_gate;
  45. }
  46. /**
  47. * ingenic_cgu_gate_set() - set the value of clock gate register bit
  48. * @cgu: reference to the CGU whose registers should be modified
  49. * @info: info struct describing the gate bit
  50. * @val: non-zero to gate a clock, otherwise zero
  51. *
  52. * Sets the given gate bit in order to gate or ungate a clock.
  53. *
  54. * The caller must hold cgu->lock.
  55. */
  56. static inline void
  57. ingenic_cgu_gate_set(struct ingenic_cgu *cgu,
  58. const struct ingenic_cgu_gate_info *info, bool val)
  59. {
  60. u32 clkgr = readl(cgu->base + info->reg);
  61. if (val ^ info->clear_to_gate)
  62. clkgr |= BIT(info->bit);
  63. else
  64. clkgr &= ~BIT(info->bit);
  65. writel(clkgr, cgu->base + info->reg);
  66. }
  67. /*
  68. * PLL operations
  69. */
  70. static unsigned long
  71. ingenic_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
  72. {
  73. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  74. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  75. const struct ingenic_cgu_clk_info *clk_info;
  76. const struct ingenic_cgu_pll_info *pll_info;
  77. unsigned m, n, od_enc, od;
  78. bool bypass, enable;
  79. unsigned long flags;
  80. u32 ctl;
  81. clk_info = &cgu->clock_info[ingenic_clk->idx];
  82. BUG_ON(clk_info->type != CGU_CLK_PLL);
  83. pll_info = &clk_info->pll;
  84. spin_lock_irqsave(&cgu->lock, flags);
  85. ctl = readl(cgu->base + pll_info->reg);
  86. spin_unlock_irqrestore(&cgu->lock, flags);
  87. m = (ctl >> pll_info->m_shift) & GENMASK(pll_info->m_bits - 1, 0);
  88. m += pll_info->m_offset;
  89. n = (ctl >> pll_info->n_shift) & GENMASK(pll_info->n_bits - 1, 0);
  90. n += pll_info->n_offset;
  91. od_enc = ctl >> pll_info->od_shift;
  92. od_enc &= GENMASK(pll_info->od_bits - 1, 0);
  93. bypass = !pll_info->no_bypass_bit &&
  94. !!(ctl & BIT(pll_info->bypass_bit));
  95. enable = !!(ctl & BIT(pll_info->enable_bit));
  96. if (bypass)
  97. return parent_rate;
  98. for (od = 0; od < pll_info->od_max; od++) {
  99. if (pll_info->od_encoding[od] == od_enc)
  100. break;
  101. }
  102. BUG_ON(od == pll_info->od_max);
  103. od++;
  104. return div_u64((u64)parent_rate * m, n * od);
  105. }
  106. static unsigned long
  107. ingenic_pll_calc(const struct ingenic_cgu_clk_info *clk_info,
  108. unsigned long rate, unsigned long parent_rate,
  109. unsigned *pm, unsigned *pn, unsigned *pod)
  110. {
  111. const struct ingenic_cgu_pll_info *pll_info;
  112. unsigned m, n, od;
  113. pll_info = &clk_info->pll;
  114. od = 1;
  115. /*
  116. * The frequency after the input divider must be between 10 and 50 MHz.
  117. * The highest divider yields the best resolution.
  118. */
  119. n = parent_rate / (10 * MHZ);
  120. n = min_t(unsigned, n, 1 << clk_info->pll.n_bits);
  121. n = max_t(unsigned, n, pll_info->n_offset);
  122. m = (rate / MHZ) * od * n / (parent_rate / MHZ);
  123. m = min_t(unsigned, m, 1 << clk_info->pll.m_bits);
  124. m = max_t(unsigned, m, pll_info->m_offset);
  125. if (pm)
  126. *pm = m;
  127. if (pn)
  128. *pn = n;
  129. if (pod)
  130. *pod = od;
  131. return div_u64((u64)parent_rate * m, n * od);
  132. }
  133. static inline const struct ingenic_cgu_clk_info *to_clk_info(
  134. struct ingenic_clk *ingenic_clk)
  135. {
  136. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  137. const struct ingenic_cgu_clk_info *clk_info;
  138. clk_info = &cgu->clock_info[ingenic_clk->idx];
  139. BUG_ON(clk_info->type != CGU_CLK_PLL);
  140. return clk_info;
  141. }
  142. static long
  143. ingenic_pll_round_rate(struct clk_hw *hw, unsigned long req_rate,
  144. unsigned long *prate)
  145. {
  146. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  147. const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
  148. return ingenic_pll_calc(clk_info, req_rate, *prate, NULL, NULL, NULL);
  149. }
  150. static int
  151. ingenic_pll_set_rate(struct clk_hw *hw, unsigned long req_rate,
  152. unsigned long parent_rate)
  153. {
  154. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  155. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  156. const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
  157. const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
  158. unsigned long rate, flags;
  159. unsigned int m, n, od;
  160. u32 ctl;
  161. rate = ingenic_pll_calc(clk_info, req_rate, parent_rate,
  162. &m, &n, &od);
  163. if (rate != req_rate)
  164. pr_info("ingenic-cgu: request '%s' rate %luHz, actual %luHz\n",
  165. clk_info->name, req_rate, rate);
  166. spin_lock_irqsave(&cgu->lock, flags);
  167. ctl = readl(cgu->base + pll_info->reg);
  168. ctl &= ~(GENMASK(pll_info->m_bits - 1, 0) << pll_info->m_shift);
  169. ctl |= (m - pll_info->m_offset) << pll_info->m_shift;
  170. ctl &= ~(GENMASK(pll_info->n_bits - 1, 0) << pll_info->n_shift);
  171. ctl |= (n - pll_info->n_offset) << pll_info->n_shift;
  172. ctl &= ~(GENMASK(pll_info->od_bits - 1, 0) << pll_info->od_shift);
  173. ctl |= pll_info->od_encoding[od - 1] << pll_info->od_shift;
  174. writel(ctl, cgu->base + pll_info->reg);
  175. spin_unlock_irqrestore(&cgu->lock, flags);
  176. return 0;
  177. }
  178. static int ingenic_pll_enable(struct clk_hw *hw)
  179. {
  180. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  181. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  182. const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
  183. const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
  184. const unsigned int timeout = 100;
  185. unsigned long flags;
  186. unsigned int i;
  187. u32 ctl;
  188. spin_lock_irqsave(&cgu->lock, flags);
  189. ctl = readl(cgu->base + pll_info->reg);
  190. ctl &= ~BIT(pll_info->bypass_bit);
  191. ctl |= BIT(pll_info->enable_bit);
  192. writel(ctl, cgu->base + pll_info->reg);
  193. /* wait for the PLL to stabilise */
  194. for (i = 0; i < timeout; i++) {
  195. ctl = readl(cgu->base + pll_info->reg);
  196. if (ctl & BIT(pll_info->stable_bit))
  197. break;
  198. mdelay(1);
  199. }
  200. spin_unlock_irqrestore(&cgu->lock, flags);
  201. if (i == timeout)
  202. return -EBUSY;
  203. return 0;
  204. }
  205. static void ingenic_pll_disable(struct clk_hw *hw)
  206. {
  207. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  208. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  209. const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
  210. const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
  211. unsigned long flags;
  212. u32 ctl;
  213. spin_lock_irqsave(&cgu->lock, flags);
  214. ctl = readl(cgu->base + pll_info->reg);
  215. ctl &= ~BIT(pll_info->enable_bit);
  216. writel(ctl, cgu->base + pll_info->reg);
  217. spin_unlock_irqrestore(&cgu->lock, flags);
  218. }
  219. static int ingenic_pll_is_enabled(struct clk_hw *hw)
  220. {
  221. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  222. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  223. const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
  224. const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
  225. unsigned long flags;
  226. u32 ctl;
  227. spin_lock_irqsave(&cgu->lock, flags);
  228. ctl = readl(cgu->base + pll_info->reg);
  229. spin_unlock_irqrestore(&cgu->lock, flags);
  230. return !!(ctl & BIT(pll_info->enable_bit));
  231. }
  232. static const struct clk_ops ingenic_pll_ops = {
  233. .recalc_rate = ingenic_pll_recalc_rate,
  234. .round_rate = ingenic_pll_round_rate,
  235. .set_rate = ingenic_pll_set_rate,
  236. .enable = ingenic_pll_enable,
  237. .disable = ingenic_pll_disable,
  238. .is_enabled = ingenic_pll_is_enabled,
  239. };
  240. /*
  241. * Operations for all non-PLL clocks
  242. */
  243. static u8 ingenic_clk_get_parent(struct clk_hw *hw)
  244. {
  245. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  246. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  247. const struct ingenic_cgu_clk_info *clk_info;
  248. u32 reg;
  249. u8 i, hw_idx, idx = 0;
  250. clk_info = &cgu->clock_info[ingenic_clk->idx];
  251. if (clk_info->type & CGU_CLK_MUX) {
  252. reg = readl(cgu->base + clk_info->mux.reg);
  253. hw_idx = (reg >> clk_info->mux.shift) &
  254. GENMASK(clk_info->mux.bits - 1, 0);
  255. /*
  256. * Convert the hardware index to the parent index by skipping
  257. * over any -1's in the parents array.
  258. */
  259. for (i = 0; i < hw_idx; i++) {
  260. if (clk_info->parents[i] != -1)
  261. idx++;
  262. }
  263. }
  264. return idx;
  265. }
  266. static int ingenic_clk_set_parent(struct clk_hw *hw, u8 idx)
  267. {
  268. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  269. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  270. const struct ingenic_cgu_clk_info *clk_info;
  271. unsigned long flags;
  272. u8 curr_idx, hw_idx, num_poss;
  273. u32 reg, mask;
  274. clk_info = &cgu->clock_info[ingenic_clk->idx];
  275. if (clk_info->type & CGU_CLK_MUX) {
  276. /*
  277. * Convert the parent index to the hardware index by adding
  278. * 1 for any -1 in the parents array preceding the given
  279. * index. That is, we want the index of idx'th entry in
  280. * clk_info->parents which does not equal -1.
  281. */
  282. hw_idx = curr_idx = 0;
  283. num_poss = 1 << clk_info->mux.bits;
  284. for (; hw_idx < num_poss; hw_idx++) {
  285. if (clk_info->parents[hw_idx] == -1)
  286. continue;
  287. if (curr_idx == idx)
  288. break;
  289. curr_idx++;
  290. }
  291. /* idx should always be a valid parent */
  292. BUG_ON(curr_idx != idx);
  293. mask = GENMASK(clk_info->mux.bits - 1, 0);
  294. mask <<= clk_info->mux.shift;
  295. spin_lock_irqsave(&cgu->lock, flags);
  296. /* write the register */
  297. reg = readl(cgu->base + clk_info->mux.reg);
  298. reg &= ~mask;
  299. reg |= hw_idx << clk_info->mux.shift;
  300. writel(reg, cgu->base + clk_info->mux.reg);
  301. spin_unlock_irqrestore(&cgu->lock, flags);
  302. return 0;
  303. }
  304. return idx ? -EINVAL : 0;
  305. }
  306. static unsigned long
  307. ingenic_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
  308. {
  309. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  310. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  311. const struct ingenic_cgu_clk_info *clk_info;
  312. unsigned long rate = parent_rate;
  313. u32 div_reg, div;
  314. clk_info = &cgu->clock_info[ingenic_clk->idx];
  315. if (clk_info->type & CGU_CLK_DIV) {
  316. div_reg = readl(cgu->base + clk_info->div.reg);
  317. div = (div_reg >> clk_info->div.shift) &
  318. GENMASK(clk_info->div.bits - 1, 0);
  319. div += 1;
  320. div *= clk_info->div.div;
  321. rate /= div;
  322. } else if (clk_info->type & CGU_CLK_FIXDIV) {
  323. rate /= clk_info->fixdiv.div;
  324. }
  325. return rate;
  326. }
  327. static unsigned
  328. ingenic_clk_calc_div(const struct ingenic_cgu_clk_info *clk_info,
  329. unsigned long parent_rate, unsigned long req_rate)
  330. {
  331. unsigned div;
  332. /* calculate the divide */
  333. div = DIV_ROUND_UP(parent_rate, req_rate);
  334. /* and impose hardware constraints */
  335. div = min_t(unsigned, div, 1 << clk_info->div.bits);
  336. div = max_t(unsigned, div, 1);
  337. /*
  338. * If the divider value itself must be divided before being written to
  339. * the divider register, we must ensure we don't have any bits set that
  340. * would be lost as a result of doing so.
  341. */
  342. div /= clk_info->div.div;
  343. div *= clk_info->div.div;
  344. return div;
  345. }
  346. static long
  347. ingenic_clk_round_rate(struct clk_hw *hw, unsigned long req_rate,
  348. unsigned long *parent_rate)
  349. {
  350. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  351. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  352. const struct ingenic_cgu_clk_info *clk_info;
  353. long rate = *parent_rate;
  354. clk_info = &cgu->clock_info[ingenic_clk->idx];
  355. if (clk_info->type & CGU_CLK_DIV)
  356. rate /= ingenic_clk_calc_div(clk_info, *parent_rate, req_rate);
  357. else if (clk_info->type & CGU_CLK_FIXDIV)
  358. rate /= clk_info->fixdiv.div;
  359. return rate;
  360. }
  361. static int
  362. ingenic_clk_set_rate(struct clk_hw *hw, unsigned long req_rate,
  363. unsigned long parent_rate)
  364. {
  365. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  366. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  367. const struct ingenic_cgu_clk_info *clk_info;
  368. const unsigned timeout = 100;
  369. unsigned long rate, flags;
  370. unsigned div, i;
  371. u32 reg, mask;
  372. int ret = 0;
  373. clk_info = &cgu->clock_info[ingenic_clk->idx];
  374. if (clk_info->type & CGU_CLK_DIV) {
  375. div = ingenic_clk_calc_div(clk_info, parent_rate, req_rate);
  376. rate = parent_rate / div;
  377. if (rate != req_rate)
  378. return -EINVAL;
  379. spin_lock_irqsave(&cgu->lock, flags);
  380. reg = readl(cgu->base + clk_info->div.reg);
  381. /* update the divide */
  382. mask = GENMASK(clk_info->div.bits - 1, 0);
  383. reg &= ~(mask << clk_info->div.shift);
  384. reg |= ((div / clk_info->div.div) - 1) << clk_info->div.shift;
  385. /* clear the stop bit */
  386. if (clk_info->div.stop_bit != -1)
  387. reg &= ~BIT(clk_info->div.stop_bit);
  388. /* set the change enable bit */
  389. if (clk_info->div.ce_bit != -1)
  390. reg |= BIT(clk_info->div.ce_bit);
  391. /* update the hardware */
  392. writel(reg, cgu->base + clk_info->div.reg);
  393. /* wait for the change to take effect */
  394. if (clk_info->div.busy_bit != -1) {
  395. for (i = 0; i < timeout; i++) {
  396. reg = readl(cgu->base + clk_info->div.reg);
  397. if (!(reg & BIT(clk_info->div.busy_bit)))
  398. break;
  399. mdelay(1);
  400. }
  401. if (i == timeout)
  402. ret = -EBUSY;
  403. }
  404. spin_unlock_irqrestore(&cgu->lock, flags);
  405. return ret;
  406. }
  407. return -EINVAL;
  408. }
  409. static int ingenic_clk_enable(struct clk_hw *hw)
  410. {
  411. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  412. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  413. const struct ingenic_cgu_clk_info *clk_info;
  414. unsigned long flags;
  415. clk_info = &cgu->clock_info[ingenic_clk->idx];
  416. if (clk_info->type & CGU_CLK_GATE) {
  417. /* ungate the clock */
  418. spin_lock_irqsave(&cgu->lock, flags);
  419. ingenic_cgu_gate_set(cgu, &clk_info->gate, false);
  420. spin_unlock_irqrestore(&cgu->lock, flags);
  421. if (clk_info->gate.delay_us)
  422. udelay(clk_info->gate.delay_us);
  423. }
  424. return 0;
  425. }
  426. static void ingenic_clk_disable(struct clk_hw *hw)
  427. {
  428. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  429. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  430. const struct ingenic_cgu_clk_info *clk_info;
  431. unsigned long flags;
  432. clk_info = &cgu->clock_info[ingenic_clk->idx];
  433. if (clk_info->type & CGU_CLK_GATE) {
  434. /* gate the clock */
  435. spin_lock_irqsave(&cgu->lock, flags);
  436. ingenic_cgu_gate_set(cgu, &clk_info->gate, true);
  437. spin_unlock_irqrestore(&cgu->lock, flags);
  438. }
  439. }
  440. static int ingenic_clk_is_enabled(struct clk_hw *hw)
  441. {
  442. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  443. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  444. const struct ingenic_cgu_clk_info *clk_info;
  445. unsigned long flags;
  446. int enabled = 1;
  447. clk_info = &cgu->clock_info[ingenic_clk->idx];
  448. if (clk_info->type & CGU_CLK_GATE) {
  449. spin_lock_irqsave(&cgu->lock, flags);
  450. enabled = !ingenic_cgu_gate_get(cgu, &clk_info->gate);
  451. spin_unlock_irqrestore(&cgu->lock, flags);
  452. }
  453. return enabled;
  454. }
  455. static const struct clk_ops ingenic_clk_ops = {
  456. .get_parent = ingenic_clk_get_parent,
  457. .set_parent = ingenic_clk_set_parent,
  458. .recalc_rate = ingenic_clk_recalc_rate,
  459. .round_rate = ingenic_clk_round_rate,
  460. .set_rate = ingenic_clk_set_rate,
  461. .enable = ingenic_clk_enable,
  462. .disable = ingenic_clk_disable,
  463. .is_enabled = ingenic_clk_is_enabled,
  464. };
  465. /*
  466. * Setup functions.
  467. */
  468. static int ingenic_register_clock(struct ingenic_cgu *cgu, unsigned idx)
  469. {
  470. const struct ingenic_cgu_clk_info *clk_info = &cgu->clock_info[idx];
  471. struct clk_init_data clk_init;
  472. struct ingenic_clk *ingenic_clk = NULL;
  473. struct clk *clk, *parent;
  474. const char *parent_names[4];
  475. unsigned caps, i, num_possible;
  476. int err = -EINVAL;
  477. BUILD_BUG_ON(ARRAY_SIZE(clk_info->parents) > ARRAY_SIZE(parent_names));
  478. if (clk_info->type == CGU_CLK_EXT) {
  479. clk = of_clk_get_by_name(cgu->np, clk_info->name);
  480. if (IS_ERR(clk)) {
  481. pr_err("%s: no external clock '%s' provided\n",
  482. __func__, clk_info->name);
  483. err = -ENODEV;
  484. goto out;
  485. }
  486. err = clk_register_clkdev(clk, clk_info->name, NULL);
  487. if (err) {
  488. clk_put(clk);
  489. goto out;
  490. }
  491. cgu->clocks.clks[idx] = clk;
  492. return 0;
  493. }
  494. if (!clk_info->type) {
  495. pr_err("%s: no clock type specified for '%s'\n", __func__,
  496. clk_info->name);
  497. goto out;
  498. }
  499. ingenic_clk = kzalloc(sizeof(*ingenic_clk), GFP_KERNEL);
  500. if (!ingenic_clk) {
  501. err = -ENOMEM;
  502. goto out;
  503. }
  504. ingenic_clk->hw.init = &clk_init;
  505. ingenic_clk->cgu = cgu;
  506. ingenic_clk->idx = idx;
  507. clk_init.name = clk_info->name;
  508. clk_init.flags = 0;
  509. clk_init.parent_names = parent_names;
  510. caps = clk_info->type;
  511. if (caps & (CGU_CLK_MUX | CGU_CLK_CUSTOM)) {
  512. clk_init.num_parents = 0;
  513. if (caps & CGU_CLK_MUX)
  514. num_possible = 1 << clk_info->mux.bits;
  515. else
  516. num_possible = ARRAY_SIZE(clk_info->parents);
  517. for (i = 0; i < num_possible; i++) {
  518. if (clk_info->parents[i] == -1)
  519. continue;
  520. parent = cgu->clocks.clks[clk_info->parents[i]];
  521. parent_names[clk_init.num_parents] =
  522. __clk_get_name(parent);
  523. clk_init.num_parents++;
  524. }
  525. BUG_ON(!clk_init.num_parents);
  526. BUG_ON(clk_init.num_parents > ARRAY_SIZE(parent_names));
  527. } else {
  528. BUG_ON(clk_info->parents[0] == -1);
  529. clk_init.num_parents = 1;
  530. parent = cgu->clocks.clks[clk_info->parents[0]];
  531. parent_names[0] = __clk_get_name(parent);
  532. }
  533. if (caps & CGU_CLK_CUSTOM) {
  534. clk_init.ops = clk_info->custom.clk_ops;
  535. caps &= ~CGU_CLK_CUSTOM;
  536. if (caps) {
  537. pr_err("%s: custom clock may not be combined with type 0x%x\n",
  538. __func__, caps);
  539. goto out;
  540. }
  541. } else if (caps & CGU_CLK_PLL) {
  542. clk_init.ops = &ingenic_pll_ops;
  543. clk_init.flags |= CLK_SET_RATE_GATE;
  544. caps &= ~CGU_CLK_PLL;
  545. if (caps) {
  546. pr_err("%s: PLL may not be combined with type 0x%x\n",
  547. __func__, caps);
  548. goto out;
  549. }
  550. } else {
  551. clk_init.ops = &ingenic_clk_ops;
  552. }
  553. /* nothing to do for gates or fixed dividers */
  554. caps &= ~(CGU_CLK_GATE | CGU_CLK_FIXDIV);
  555. if (caps & CGU_CLK_MUX) {
  556. if (!(caps & CGU_CLK_MUX_GLITCHFREE))
  557. clk_init.flags |= CLK_SET_PARENT_GATE;
  558. caps &= ~(CGU_CLK_MUX | CGU_CLK_MUX_GLITCHFREE);
  559. }
  560. if (caps & CGU_CLK_DIV) {
  561. caps &= ~CGU_CLK_DIV;
  562. } else {
  563. /* pass rate changes to the parent clock */
  564. clk_init.flags |= CLK_SET_RATE_PARENT;
  565. }
  566. if (caps) {
  567. pr_err("%s: unknown clock type 0x%x\n", __func__, caps);
  568. goto out;
  569. }
  570. clk = clk_register(NULL, &ingenic_clk->hw);
  571. if (IS_ERR(clk)) {
  572. pr_err("%s: failed to register clock '%s'\n", __func__,
  573. clk_info->name);
  574. err = PTR_ERR(clk);
  575. goto out;
  576. }
  577. err = clk_register_clkdev(clk, clk_info->name, NULL);
  578. if (err)
  579. goto out;
  580. cgu->clocks.clks[idx] = clk;
  581. out:
  582. if (err)
  583. kfree(ingenic_clk);
  584. return err;
  585. }
  586. struct ingenic_cgu *
  587. ingenic_cgu_new(const struct ingenic_cgu_clk_info *clock_info,
  588. unsigned num_clocks, struct device_node *np)
  589. {
  590. struct ingenic_cgu *cgu;
  591. cgu = kzalloc(sizeof(*cgu), GFP_KERNEL);
  592. if (!cgu)
  593. goto err_out;
  594. cgu->base = of_iomap(np, 0);
  595. if (!cgu->base) {
  596. pr_err("%s: failed to map CGU registers\n", __func__);
  597. goto err_out_free;
  598. }
  599. cgu->np = np;
  600. cgu->clock_info = clock_info;
  601. cgu->clocks.clk_num = num_clocks;
  602. spin_lock_init(&cgu->lock);
  603. return cgu;
  604. err_out_free:
  605. kfree(cgu);
  606. err_out:
  607. return NULL;
  608. }
  609. int ingenic_cgu_register_clocks(struct ingenic_cgu *cgu)
  610. {
  611. unsigned i;
  612. int err;
  613. cgu->clocks.clks = kcalloc(cgu->clocks.clk_num, sizeof(struct clk *),
  614. GFP_KERNEL);
  615. if (!cgu->clocks.clks) {
  616. err = -ENOMEM;
  617. goto err_out;
  618. }
  619. for (i = 0; i < cgu->clocks.clk_num; i++) {
  620. err = ingenic_register_clock(cgu, i);
  621. if (err)
  622. goto err_out_unregister;
  623. }
  624. err = of_clk_add_provider(cgu->np, of_clk_src_onecell_get,
  625. &cgu->clocks);
  626. if (err)
  627. goto err_out_unregister;
  628. return 0;
  629. err_out_unregister:
  630. for (i = 0; i < cgu->clocks.clk_num; i++) {
  631. if (!cgu->clocks.clks[i])
  632. continue;
  633. if (cgu->clock_info[i].type & CGU_CLK_EXT)
  634. clk_put(cgu->clocks.clks[i]);
  635. else
  636. clk_unregister(cgu->clocks.clks[i]);
  637. }
  638. kfree(cgu->clocks.clks);
  639. err_out:
  640. return err;
  641. }