clk-rcg2.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/bitops.h>
  7. #include <linux/err.h>
  8. #include <linux/bug.h>
  9. #include <linux/export.h>
  10. #include <linux/clk-provider.h>
  11. #include <linux/delay.h>
  12. #include <linux/regmap.h>
  13. #include <linux/math64.h>
  14. #include <asm/div64.h>
  15. #include "clk-rcg.h"
  16. #include "common.h"
  17. #define CMD_REG 0x0
  18. #define CMD_UPDATE BIT(0)
  19. #define CMD_ROOT_EN BIT(1)
  20. #define CMD_DIRTY_CFG BIT(4)
  21. #define CMD_DIRTY_N BIT(5)
  22. #define CMD_DIRTY_M BIT(6)
  23. #define CMD_DIRTY_D BIT(7)
  24. #define CMD_ROOT_OFF BIT(31)
  25. #define CFG_REG 0x4
  26. #define CFG_SRC_DIV_SHIFT 0
  27. #define CFG_SRC_SEL_SHIFT 8
  28. #define CFG_SRC_SEL_MASK (0x7 << CFG_SRC_SEL_SHIFT)
  29. #define CFG_MODE_SHIFT 12
  30. #define CFG_MODE_MASK (0x3 << CFG_MODE_SHIFT)
  31. #define CFG_MODE_DUAL_EDGE (0x2 << CFG_MODE_SHIFT)
  32. #define CFG_HW_CLK_CTRL_MASK BIT(20)
  33. #define M_REG 0x8
  34. #define N_REG 0xc
  35. #define D_REG 0x10
  36. enum freq_policy {
  37. FLOOR,
  38. CEIL,
  39. };
  40. static int clk_rcg2_is_enabled(struct clk_hw *hw)
  41. {
  42. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  43. u32 cmd;
  44. int ret;
  45. ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
  46. if (ret)
  47. return ret;
  48. return (cmd & CMD_ROOT_OFF) == 0;
  49. }
  50. static u8 clk_rcg2_get_parent(struct clk_hw *hw)
  51. {
  52. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  53. int num_parents = clk_hw_get_num_parents(hw);
  54. u32 cfg;
  55. int i, ret;
  56. ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
  57. if (ret)
  58. goto err;
  59. cfg &= CFG_SRC_SEL_MASK;
  60. cfg >>= CFG_SRC_SEL_SHIFT;
  61. for (i = 0; i < num_parents; i++)
  62. if (cfg == rcg->parent_map[i].cfg)
  63. return i;
  64. err:
  65. pr_debug("%s: Clock %s has invalid parent, using default.\n",
  66. __func__, clk_hw_get_name(hw));
  67. return 0;
  68. }
  69. static int update_config(struct clk_rcg2 *rcg)
  70. {
  71. int count, ret;
  72. u32 cmd;
  73. struct clk_hw *hw = &rcg->clkr.hw;
  74. const char *name = clk_hw_get_name(hw);
  75. ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
  76. CMD_UPDATE, CMD_UPDATE);
  77. if (ret)
  78. return ret;
  79. /* Wait for update to take effect */
  80. for (count = 500; count > 0; count--) {
  81. ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
  82. if (ret)
  83. return ret;
  84. if (!(cmd & CMD_UPDATE))
  85. return 0;
  86. udelay(1);
  87. }
  88. WARN(1, "%s: rcg didn't update its configuration.", name);
  89. return 0;
  90. }
  91. static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
  92. {
  93. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  94. int ret;
  95. u32 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
  96. ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
  97. CFG_SRC_SEL_MASK, cfg);
  98. if (ret)
  99. return ret;
  100. return update_config(rcg);
  101. }
  102. /*
  103. * Calculate m/n:d rate
  104. *
  105. * parent_rate m
  106. * rate = ----------- x ---
  107. * hid_div n
  108. */
  109. static unsigned long
  110. calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div)
  111. {
  112. if (hid_div) {
  113. rate *= 2;
  114. rate /= hid_div + 1;
  115. }
  116. if (mode) {
  117. u64 tmp = rate;
  118. tmp *= m;
  119. do_div(tmp, n);
  120. rate = tmp;
  121. }
  122. return rate;
  123. }
  124. static unsigned long
  125. clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
  126. {
  127. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  128. u32 cfg, hid_div, m = 0, n = 0, mode = 0, mask;
  129. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
  130. if (rcg->mnd_width) {
  131. mask = BIT(rcg->mnd_width) - 1;
  132. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + M_REG, &m);
  133. m &= mask;
  134. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + N_REG, &n);
  135. n = ~n;
  136. n &= mask;
  137. n += m;
  138. mode = cfg & CFG_MODE_MASK;
  139. mode >>= CFG_MODE_SHIFT;
  140. }
  141. mask = BIT(rcg->hid_width) - 1;
  142. hid_div = cfg >> CFG_SRC_DIV_SHIFT;
  143. hid_div &= mask;
  144. return calc_rate(parent_rate, m, n, mode, hid_div);
  145. }
  146. static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
  147. struct clk_rate_request *req,
  148. enum freq_policy policy)
  149. {
  150. unsigned long clk_flags, rate = req->rate;
  151. struct clk_hw *p;
  152. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  153. int index;
  154. switch (policy) {
  155. case FLOOR:
  156. f = qcom_find_freq_floor(f, rate);
  157. break;
  158. case CEIL:
  159. f = qcom_find_freq(f, rate);
  160. break;
  161. default:
  162. return -EINVAL;
  163. };
  164. if (!f)
  165. return -EINVAL;
  166. index = qcom_find_src_index(hw, rcg->parent_map, f->src);
  167. if (index < 0)
  168. return index;
  169. clk_flags = clk_hw_get_flags(hw);
  170. p = clk_hw_get_parent_by_index(hw, index);
  171. if (clk_flags & CLK_SET_RATE_PARENT) {
  172. rate = f->freq;
  173. if (f->pre_div) {
  174. rate /= 2;
  175. rate *= f->pre_div + 1;
  176. }
  177. if (f->n) {
  178. u64 tmp = rate;
  179. tmp = tmp * f->n;
  180. do_div(tmp, f->m);
  181. rate = tmp;
  182. }
  183. } else {
  184. rate = clk_hw_get_rate(p);
  185. }
  186. req->best_parent_hw = p;
  187. req->best_parent_rate = rate;
  188. req->rate = f->freq;
  189. return 0;
  190. }
  191. static int clk_rcg2_determine_rate(struct clk_hw *hw,
  192. struct clk_rate_request *req)
  193. {
  194. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  195. return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, CEIL);
  196. }
  197. static int clk_rcg2_determine_floor_rate(struct clk_hw *hw,
  198. struct clk_rate_request *req)
  199. {
  200. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  201. return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, FLOOR);
  202. }
  203. static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
  204. {
  205. u32 cfg, mask;
  206. struct clk_hw *hw = &rcg->clkr.hw;
  207. int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src);
  208. if (index < 0)
  209. return index;
  210. if (rcg->mnd_width && f->n) {
  211. mask = BIT(rcg->mnd_width) - 1;
  212. ret = regmap_update_bits(rcg->clkr.regmap,
  213. rcg->cmd_rcgr + M_REG, mask, f->m);
  214. if (ret)
  215. return ret;
  216. ret = regmap_update_bits(rcg->clkr.regmap,
  217. rcg->cmd_rcgr + N_REG, mask, ~(f->n - f->m));
  218. if (ret)
  219. return ret;
  220. ret = regmap_update_bits(rcg->clkr.regmap,
  221. rcg->cmd_rcgr + D_REG, mask, ~f->n);
  222. if (ret)
  223. return ret;
  224. }
  225. mask = BIT(rcg->hid_width) - 1;
  226. mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK | CFG_HW_CLK_CTRL_MASK;
  227. cfg = f->pre_div << CFG_SRC_DIV_SHIFT;
  228. cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
  229. if (rcg->mnd_width && f->n && (f->m != f->n))
  230. cfg |= CFG_MODE_DUAL_EDGE;
  231. return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
  232. mask, cfg);
  233. }
  234. static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
  235. {
  236. int ret;
  237. ret = __clk_rcg2_configure(rcg, f);
  238. if (ret)
  239. return ret;
  240. return update_config(rcg);
  241. }
  242. static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
  243. enum freq_policy policy)
  244. {
  245. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  246. const struct freq_tbl *f;
  247. switch (policy) {
  248. case FLOOR:
  249. f = qcom_find_freq_floor(rcg->freq_tbl, rate);
  250. break;
  251. case CEIL:
  252. f = qcom_find_freq(rcg->freq_tbl, rate);
  253. break;
  254. default:
  255. return -EINVAL;
  256. };
  257. if (!f)
  258. return -EINVAL;
  259. return clk_rcg2_configure(rcg, f);
  260. }
  261. static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
  262. unsigned long parent_rate)
  263. {
  264. return __clk_rcg2_set_rate(hw, rate, CEIL);
  265. }
  266. static int clk_rcg2_set_floor_rate(struct clk_hw *hw, unsigned long rate,
  267. unsigned long parent_rate)
  268. {
  269. return __clk_rcg2_set_rate(hw, rate, FLOOR);
  270. }
  271. static int clk_rcg2_set_rate_and_parent(struct clk_hw *hw,
  272. unsigned long rate, unsigned long parent_rate, u8 index)
  273. {
  274. return __clk_rcg2_set_rate(hw, rate, CEIL);
  275. }
  276. static int clk_rcg2_set_floor_rate_and_parent(struct clk_hw *hw,
  277. unsigned long rate, unsigned long parent_rate, u8 index)
  278. {
  279. return __clk_rcg2_set_rate(hw, rate, FLOOR);
  280. }
  281. const struct clk_ops clk_rcg2_ops = {
  282. .is_enabled = clk_rcg2_is_enabled,
  283. .get_parent = clk_rcg2_get_parent,
  284. .set_parent = clk_rcg2_set_parent,
  285. .recalc_rate = clk_rcg2_recalc_rate,
  286. .determine_rate = clk_rcg2_determine_rate,
  287. .set_rate = clk_rcg2_set_rate,
  288. .set_rate_and_parent = clk_rcg2_set_rate_and_parent,
  289. };
  290. EXPORT_SYMBOL_GPL(clk_rcg2_ops);
  291. const struct clk_ops clk_rcg2_floor_ops = {
  292. .is_enabled = clk_rcg2_is_enabled,
  293. .get_parent = clk_rcg2_get_parent,
  294. .set_parent = clk_rcg2_set_parent,
  295. .recalc_rate = clk_rcg2_recalc_rate,
  296. .determine_rate = clk_rcg2_determine_floor_rate,
  297. .set_rate = clk_rcg2_set_floor_rate,
  298. .set_rate_and_parent = clk_rcg2_set_floor_rate_and_parent,
  299. };
  300. EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops);
  301. struct frac_entry {
  302. int num;
  303. int den;
  304. };
  305. static const struct frac_entry frac_table_675m[] = { /* link rate of 270M */
  306. { 52, 295 }, /* 119 M */
  307. { 11, 57 }, /* 130.25 M */
  308. { 63, 307 }, /* 138.50 M */
  309. { 11, 50 }, /* 148.50 M */
  310. { 47, 206 }, /* 154 M */
  311. { 31, 100 }, /* 205.25 M */
  312. { 107, 269 }, /* 268.50 M */
  313. { },
  314. };
  315. static struct frac_entry frac_table_810m[] = { /* Link rate of 162M */
  316. { 31, 211 }, /* 119 M */
  317. { 32, 199 }, /* 130.25 M */
  318. { 63, 307 }, /* 138.50 M */
  319. { 11, 60 }, /* 148.50 M */
  320. { 50, 263 }, /* 154 M */
  321. { 31, 120 }, /* 205.25 M */
  322. { 119, 359 }, /* 268.50 M */
  323. { },
  324. };
  325. static int clk_edp_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
  326. unsigned long parent_rate)
  327. {
  328. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  329. struct freq_tbl f = *rcg->freq_tbl;
  330. const struct frac_entry *frac;
  331. int delta = 100000;
  332. s64 src_rate = parent_rate;
  333. s64 request;
  334. u32 mask = BIT(rcg->hid_width) - 1;
  335. u32 hid_div;
  336. if (src_rate == 810000000)
  337. frac = frac_table_810m;
  338. else
  339. frac = frac_table_675m;
  340. for (; frac->num; frac++) {
  341. request = rate;
  342. request *= frac->den;
  343. request = div_s64(request, frac->num);
  344. if ((src_rate < (request - delta)) ||
  345. (src_rate > (request + delta)))
  346. continue;
  347. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
  348. &hid_div);
  349. f.pre_div = hid_div;
  350. f.pre_div >>= CFG_SRC_DIV_SHIFT;
  351. f.pre_div &= mask;
  352. f.m = frac->num;
  353. f.n = frac->den;
  354. return clk_rcg2_configure(rcg, &f);
  355. }
  356. return -EINVAL;
  357. }
  358. static int clk_edp_pixel_set_rate_and_parent(struct clk_hw *hw,
  359. unsigned long rate, unsigned long parent_rate, u8 index)
  360. {
  361. /* Parent index is set statically in frequency table */
  362. return clk_edp_pixel_set_rate(hw, rate, parent_rate);
  363. }
  364. static int clk_edp_pixel_determine_rate(struct clk_hw *hw,
  365. struct clk_rate_request *req)
  366. {
  367. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  368. const struct freq_tbl *f = rcg->freq_tbl;
  369. const struct frac_entry *frac;
  370. int delta = 100000;
  371. s64 request;
  372. u32 mask = BIT(rcg->hid_width) - 1;
  373. u32 hid_div;
  374. int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
  375. /* Force the correct parent */
  376. req->best_parent_hw = clk_hw_get_parent_by_index(hw, index);
  377. req->best_parent_rate = clk_hw_get_rate(req->best_parent_hw);
  378. if (req->best_parent_rate == 810000000)
  379. frac = frac_table_810m;
  380. else
  381. frac = frac_table_675m;
  382. for (; frac->num; frac++) {
  383. request = req->rate;
  384. request *= frac->den;
  385. request = div_s64(request, frac->num);
  386. if ((req->best_parent_rate < (request - delta)) ||
  387. (req->best_parent_rate > (request + delta)))
  388. continue;
  389. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
  390. &hid_div);
  391. hid_div >>= CFG_SRC_DIV_SHIFT;
  392. hid_div &= mask;
  393. req->rate = calc_rate(req->best_parent_rate,
  394. frac->num, frac->den,
  395. !!frac->den, hid_div);
  396. return 0;
  397. }
  398. return -EINVAL;
  399. }
  400. const struct clk_ops clk_edp_pixel_ops = {
  401. .is_enabled = clk_rcg2_is_enabled,
  402. .get_parent = clk_rcg2_get_parent,
  403. .set_parent = clk_rcg2_set_parent,
  404. .recalc_rate = clk_rcg2_recalc_rate,
  405. .set_rate = clk_edp_pixel_set_rate,
  406. .set_rate_and_parent = clk_edp_pixel_set_rate_and_parent,
  407. .determine_rate = clk_edp_pixel_determine_rate,
  408. };
  409. EXPORT_SYMBOL_GPL(clk_edp_pixel_ops);
  410. static int clk_byte_determine_rate(struct clk_hw *hw,
  411. struct clk_rate_request *req)
  412. {
  413. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  414. const struct freq_tbl *f = rcg->freq_tbl;
  415. int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
  416. unsigned long parent_rate, div;
  417. u32 mask = BIT(rcg->hid_width) - 1;
  418. struct clk_hw *p;
  419. if (req->rate == 0)
  420. return -EINVAL;
  421. req->best_parent_hw = p = clk_hw_get_parent_by_index(hw, index);
  422. req->best_parent_rate = parent_rate = clk_hw_round_rate(p, req->rate);
  423. div = DIV_ROUND_UP((2 * parent_rate), req->rate) - 1;
  424. div = min_t(u32, div, mask);
  425. req->rate = calc_rate(parent_rate, 0, 0, 0, div);
  426. return 0;
  427. }
  428. static int clk_byte_set_rate(struct clk_hw *hw, unsigned long rate,
  429. unsigned long parent_rate)
  430. {
  431. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  432. struct freq_tbl f = *rcg->freq_tbl;
  433. unsigned long div;
  434. u32 mask = BIT(rcg->hid_width) - 1;
  435. div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
  436. div = min_t(u32, div, mask);
  437. f.pre_div = div;
  438. return clk_rcg2_configure(rcg, &f);
  439. }
  440. static int clk_byte_set_rate_and_parent(struct clk_hw *hw,
  441. unsigned long rate, unsigned long parent_rate, u8 index)
  442. {
  443. /* Parent index is set statically in frequency table */
  444. return clk_byte_set_rate(hw, rate, parent_rate);
  445. }
  446. const struct clk_ops clk_byte_ops = {
  447. .is_enabled = clk_rcg2_is_enabled,
  448. .get_parent = clk_rcg2_get_parent,
  449. .set_parent = clk_rcg2_set_parent,
  450. .recalc_rate = clk_rcg2_recalc_rate,
  451. .set_rate = clk_byte_set_rate,
  452. .set_rate_and_parent = clk_byte_set_rate_and_parent,
  453. .determine_rate = clk_byte_determine_rate,
  454. };
  455. EXPORT_SYMBOL_GPL(clk_byte_ops);
  456. static int clk_byte2_determine_rate(struct clk_hw *hw,
  457. struct clk_rate_request *req)
  458. {
  459. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  460. unsigned long parent_rate, div;
  461. u32 mask = BIT(rcg->hid_width) - 1;
  462. struct clk_hw *p;
  463. unsigned long rate = req->rate;
  464. if (rate == 0)
  465. return -EINVAL;
  466. p = req->best_parent_hw;
  467. req->best_parent_rate = parent_rate = clk_hw_round_rate(p, rate);
  468. div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
  469. div = min_t(u32, div, mask);
  470. req->rate = calc_rate(parent_rate, 0, 0, 0, div);
  471. return 0;
  472. }
  473. static int clk_byte2_set_rate(struct clk_hw *hw, unsigned long rate,
  474. unsigned long parent_rate)
  475. {
  476. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  477. struct freq_tbl f = { 0 };
  478. unsigned long div;
  479. int i, num_parents = clk_hw_get_num_parents(hw);
  480. u32 mask = BIT(rcg->hid_width) - 1;
  481. u32 cfg;
  482. div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
  483. div = min_t(u32, div, mask);
  484. f.pre_div = div;
  485. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
  486. cfg &= CFG_SRC_SEL_MASK;
  487. cfg >>= CFG_SRC_SEL_SHIFT;
  488. for (i = 0; i < num_parents; i++) {
  489. if (cfg == rcg->parent_map[i].cfg) {
  490. f.src = rcg->parent_map[i].src;
  491. return clk_rcg2_configure(rcg, &f);
  492. }
  493. }
  494. return -EINVAL;
  495. }
  496. static int clk_byte2_set_rate_and_parent(struct clk_hw *hw,
  497. unsigned long rate, unsigned long parent_rate, u8 index)
  498. {
  499. /* Read the hardware to determine parent during set_rate */
  500. return clk_byte2_set_rate(hw, rate, parent_rate);
  501. }
  502. const struct clk_ops clk_byte2_ops = {
  503. .is_enabled = clk_rcg2_is_enabled,
  504. .get_parent = clk_rcg2_get_parent,
  505. .set_parent = clk_rcg2_set_parent,
  506. .recalc_rate = clk_rcg2_recalc_rate,
  507. .set_rate = clk_byte2_set_rate,
  508. .set_rate_and_parent = clk_byte2_set_rate_and_parent,
  509. .determine_rate = clk_byte2_determine_rate,
  510. };
  511. EXPORT_SYMBOL_GPL(clk_byte2_ops);
  512. static const struct frac_entry frac_table_pixel[] = {
  513. { 3, 8 },
  514. { 2, 9 },
  515. { 4, 9 },
  516. { 1, 1 },
  517. { }
  518. };
  519. static int clk_pixel_determine_rate(struct clk_hw *hw,
  520. struct clk_rate_request *req)
  521. {
  522. unsigned long request, src_rate;
  523. int delta = 100000;
  524. const struct frac_entry *frac = frac_table_pixel;
  525. for (; frac->num; frac++) {
  526. request = (req->rate * frac->den) / frac->num;
  527. src_rate = clk_hw_round_rate(req->best_parent_hw, request);
  528. if ((src_rate < (request - delta)) ||
  529. (src_rate > (request + delta)))
  530. continue;
  531. req->best_parent_rate = src_rate;
  532. req->rate = (src_rate * frac->num) / frac->den;
  533. return 0;
  534. }
  535. return -EINVAL;
  536. }
  537. static int clk_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
  538. unsigned long parent_rate)
  539. {
  540. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  541. struct freq_tbl f = { 0 };
  542. const struct frac_entry *frac = frac_table_pixel;
  543. unsigned long request;
  544. int delta = 100000;
  545. u32 mask = BIT(rcg->hid_width) - 1;
  546. u32 hid_div, cfg;
  547. int i, num_parents = clk_hw_get_num_parents(hw);
  548. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
  549. cfg &= CFG_SRC_SEL_MASK;
  550. cfg >>= CFG_SRC_SEL_SHIFT;
  551. for (i = 0; i < num_parents; i++)
  552. if (cfg == rcg->parent_map[i].cfg) {
  553. f.src = rcg->parent_map[i].src;
  554. break;
  555. }
  556. for (; frac->num; frac++) {
  557. request = (rate * frac->den) / frac->num;
  558. if ((parent_rate < (request - delta)) ||
  559. (parent_rate > (request + delta)))
  560. continue;
  561. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
  562. &hid_div);
  563. f.pre_div = hid_div;
  564. f.pre_div >>= CFG_SRC_DIV_SHIFT;
  565. f.pre_div &= mask;
  566. f.m = frac->num;
  567. f.n = frac->den;
  568. return clk_rcg2_configure(rcg, &f);
  569. }
  570. return -EINVAL;
  571. }
  572. static int clk_pixel_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
  573. unsigned long parent_rate, u8 index)
  574. {
  575. return clk_pixel_set_rate(hw, rate, parent_rate);
  576. }
  577. const struct clk_ops clk_pixel_ops = {
  578. .is_enabled = clk_rcg2_is_enabled,
  579. .get_parent = clk_rcg2_get_parent,
  580. .set_parent = clk_rcg2_set_parent,
  581. .recalc_rate = clk_rcg2_recalc_rate,
  582. .set_rate = clk_pixel_set_rate,
  583. .set_rate_and_parent = clk_pixel_set_rate_and_parent,
  584. .determine_rate = clk_pixel_determine_rate,
  585. };
  586. EXPORT_SYMBOL_GPL(clk_pixel_ops);
  587. static int clk_gfx3d_determine_rate(struct clk_hw *hw,
  588. struct clk_rate_request *req)
  589. {
  590. struct clk_rate_request parent_req = { };
  591. struct clk_hw *p2, *p8, *p9, *xo;
  592. unsigned long p9_rate;
  593. int ret;
  594. xo = clk_hw_get_parent_by_index(hw, 0);
  595. if (req->rate == clk_hw_get_rate(xo)) {
  596. req->best_parent_hw = xo;
  597. return 0;
  598. }
  599. p9 = clk_hw_get_parent_by_index(hw, 2);
  600. p2 = clk_hw_get_parent_by_index(hw, 3);
  601. p8 = clk_hw_get_parent_by_index(hw, 4);
  602. /* PLL9 is a fixed rate PLL */
  603. p9_rate = clk_hw_get_rate(p9);
  604. parent_req.rate = req->rate = min(req->rate, p9_rate);
  605. if (req->rate == p9_rate) {
  606. req->rate = req->best_parent_rate = p9_rate;
  607. req->best_parent_hw = p9;
  608. return 0;
  609. }
  610. if (req->best_parent_hw == p9) {
  611. /* Are we going back to a previously used rate? */
  612. if (clk_hw_get_rate(p8) == req->rate)
  613. req->best_parent_hw = p8;
  614. else
  615. req->best_parent_hw = p2;
  616. } else if (req->best_parent_hw == p8) {
  617. req->best_parent_hw = p2;
  618. } else {
  619. req->best_parent_hw = p8;
  620. }
  621. ret = __clk_determine_rate(req->best_parent_hw, &parent_req);
  622. if (ret)
  623. return ret;
  624. req->rate = req->best_parent_rate = parent_req.rate;
  625. return 0;
  626. }
  627. static int clk_gfx3d_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
  628. unsigned long parent_rate, u8 index)
  629. {
  630. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  631. u32 cfg;
  632. int ret;
  633. /* Just mux it, we don't use the division or m/n hardware */
  634. cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
  635. ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg);
  636. if (ret)
  637. return ret;
  638. return update_config(rcg);
  639. }
  640. static int clk_gfx3d_set_rate(struct clk_hw *hw, unsigned long rate,
  641. unsigned long parent_rate)
  642. {
  643. /*
  644. * We should never get here; clk_gfx3d_determine_rate() should always
  645. * make us use a different parent than what we're currently using, so
  646. * clk_gfx3d_set_rate_and_parent() should always be called.
  647. */
  648. return 0;
  649. }
  650. const struct clk_ops clk_gfx3d_ops = {
  651. .is_enabled = clk_rcg2_is_enabled,
  652. .get_parent = clk_rcg2_get_parent,
  653. .set_parent = clk_rcg2_set_parent,
  654. .recalc_rate = clk_rcg2_recalc_rate,
  655. .set_rate = clk_gfx3d_set_rate,
  656. .set_rate_and_parent = clk_gfx3d_set_rate_and_parent,
  657. .determine_rate = clk_gfx3d_determine_rate,
  658. };
  659. EXPORT_SYMBOL_GPL(clk_gfx3d_ops);
  660. static int clk_rcg2_set_force_enable(struct clk_hw *hw)
  661. {
  662. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  663. const char *name = clk_hw_get_name(hw);
  664. int ret, count;
  665. ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
  666. CMD_ROOT_EN, CMD_ROOT_EN);
  667. if (ret)
  668. return ret;
  669. /* wait for RCG to turn ON */
  670. for (count = 500; count > 0; count--) {
  671. if (clk_rcg2_is_enabled(hw))
  672. return 0;
  673. udelay(1);
  674. }
  675. pr_err("%s: RCG did not turn on\n", name);
  676. return -ETIMEDOUT;
  677. }
  678. static int clk_rcg2_clear_force_enable(struct clk_hw *hw)
  679. {
  680. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  681. return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
  682. CMD_ROOT_EN, 0);
  683. }
  684. static int
  685. clk_rcg2_shared_force_enable_clear(struct clk_hw *hw, const struct freq_tbl *f)
  686. {
  687. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  688. int ret;
  689. ret = clk_rcg2_set_force_enable(hw);
  690. if (ret)
  691. return ret;
  692. ret = clk_rcg2_configure(rcg, f);
  693. if (ret)
  694. return ret;
  695. return clk_rcg2_clear_force_enable(hw);
  696. }
  697. static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
  698. unsigned long parent_rate)
  699. {
  700. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  701. const struct freq_tbl *f;
  702. f = qcom_find_freq(rcg->freq_tbl, rate);
  703. if (!f)
  704. return -EINVAL;
  705. /*
  706. * In case clock is disabled, update the CFG, M, N and D registers
  707. * and don't hit the update bit of CMD register.
  708. */
  709. if (!__clk_is_enabled(hw->clk))
  710. return __clk_rcg2_configure(rcg, f);
  711. return clk_rcg2_shared_force_enable_clear(hw, f);
  712. }
  713. static int clk_rcg2_shared_set_rate_and_parent(struct clk_hw *hw,
  714. unsigned long rate, unsigned long parent_rate, u8 index)
  715. {
  716. return clk_rcg2_shared_set_rate(hw, rate, parent_rate);
  717. }
  718. static int clk_rcg2_shared_enable(struct clk_hw *hw)
  719. {
  720. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  721. int ret;
  722. /*
  723. * Set the update bit because required configuration has already
  724. * been written in clk_rcg2_shared_set_rate()
  725. */
  726. ret = clk_rcg2_set_force_enable(hw);
  727. if (ret)
  728. return ret;
  729. ret = update_config(rcg);
  730. if (ret)
  731. return ret;
  732. return clk_rcg2_clear_force_enable(hw);
  733. }
  734. static void clk_rcg2_shared_disable(struct clk_hw *hw)
  735. {
  736. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  737. u32 cfg;
  738. /*
  739. * Store current configuration as switching to safe source would clear
  740. * the SRC and DIV of CFG register
  741. */
  742. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
  743. /*
  744. * Park the RCG at a safe configuration - sourced off of safe source.
  745. * Force enable and disable the RCG while configuring it to safeguard
  746. * against any update signal coming from the downstream clock.
  747. * The current parent is still prepared and enabled at this point, and
  748. * the safe source is always on while application processor subsystem
  749. * is online. Therefore, the RCG can safely switch its parent.
  750. */
  751. clk_rcg2_set_force_enable(hw);
  752. regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
  753. rcg->safe_src_index << CFG_SRC_SEL_SHIFT);
  754. update_config(rcg);
  755. clk_rcg2_clear_force_enable(hw);
  756. /* Write back the stored configuration corresponding to current rate */
  757. regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg);
  758. }
  759. const struct clk_ops clk_rcg2_shared_ops = {
  760. .enable = clk_rcg2_shared_enable,
  761. .disable = clk_rcg2_shared_disable,
  762. .get_parent = clk_rcg2_get_parent,
  763. .set_parent = clk_rcg2_set_parent,
  764. .recalc_rate = clk_rcg2_recalc_rate,
  765. .determine_rate = clk_rcg2_determine_rate,
  766. .set_rate = clk_rcg2_shared_set_rate,
  767. .set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent,
  768. };
  769. EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops);