clk-rcg2.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/bitops.h>
  7. #include <linux/err.h>
  8. #include <linux/bug.h>
  9. #include <linux/export.h>
  10. #include <linux/clk-provider.h>
  11. #include <linux/delay.h>
  12. #include <linux/regmap.h>
  13. #include <linux/math64.h>
  14. #include <linux/slab.h>
  15. #include <asm/div64.h>
  16. #include "clk-rcg.h"
  17. #include "common.h"
  18. #define CMD_REG 0x0
  19. #define CMD_UPDATE BIT(0)
  20. #define CMD_ROOT_EN BIT(1)
  21. #define CMD_DIRTY_CFG BIT(4)
  22. #define CMD_DIRTY_N BIT(5)
  23. #define CMD_DIRTY_M BIT(6)
  24. #define CMD_DIRTY_D BIT(7)
  25. #define CMD_ROOT_OFF BIT(31)
  26. #define CFG_REG 0x4
  27. #define CFG_SRC_DIV_SHIFT 0
  28. #define CFG_SRC_SEL_SHIFT 8
  29. #define CFG_SRC_SEL_MASK (0x7 << CFG_SRC_SEL_SHIFT)
  30. #define CFG_MODE_SHIFT 12
  31. #define CFG_MODE_MASK (0x3 << CFG_MODE_SHIFT)
  32. #define CFG_MODE_DUAL_EDGE (0x2 << CFG_MODE_SHIFT)
  33. #define CFG_HW_CLK_CTRL_MASK BIT(20)
  34. #define M_REG 0x8
  35. #define N_REG 0xc
  36. #define D_REG 0x10
  37. /* Dynamic Frequency Scaling */
  38. #define MAX_PERF_LEVEL 8
  39. #define SE_CMD_DFSR_OFFSET 0x14
  40. #define SE_CMD_DFS_EN BIT(0)
  41. #define SE_PERF_DFSR(level) (0x1c + 0x4 * (level))
  42. #define SE_PERF_M_DFSR(level) (0x5c + 0x4 * (level))
  43. #define SE_PERF_N_DFSR(level) (0x9c + 0x4 * (level))
  44. enum freq_policy {
  45. FLOOR,
  46. CEIL,
  47. };
  48. static int clk_rcg2_is_enabled(struct clk_hw *hw)
  49. {
  50. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  51. u32 cmd;
  52. int ret;
  53. ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
  54. if (ret)
  55. return ret;
  56. return (cmd & CMD_ROOT_OFF) == 0;
  57. }
  58. static u8 clk_rcg2_get_parent(struct clk_hw *hw)
  59. {
  60. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  61. int num_parents = clk_hw_get_num_parents(hw);
  62. u32 cfg;
  63. int i, ret;
  64. ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
  65. if (ret)
  66. goto err;
  67. cfg &= CFG_SRC_SEL_MASK;
  68. cfg >>= CFG_SRC_SEL_SHIFT;
  69. for (i = 0; i < num_parents; i++)
  70. if (cfg == rcg->parent_map[i].cfg)
  71. return i;
  72. err:
  73. pr_debug("%s: Clock %s has invalid parent, using default.\n",
  74. __func__, clk_hw_get_name(hw));
  75. return 0;
  76. }
  77. static int update_config(struct clk_rcg2 *rcg)
  78. {
  79. int count, ret;
  80. u32 cmd;
  81. struct clk_hw *hw = &rcg->clkr.hw;
  82. const char *name = clk_hw_get_name(hw);
  83. ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
  84. CMD_UPDATE, CMD_UPDATE);
  85. if (ret)
  86. return ret;
  87. /* Wait for update to take effect */
  88. for (count = 500; count > 0; count--) {
  89. ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
  90. if (ret)
  91. return ret;
  92. if (!(cmd & CMD_UPDATE))
  93. return 0;
  94. udelay(1);
  95. }
  96. WARN(1, "%s: rcg didn't update its configuration.", name);
  97. return 0;
  98. }
  99. static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
  100. {
  101. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  102. int ret;
  103. u32 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
  104. ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
  105. CFG_SRC_SEL_MASK, cfg);
  106. if (ret)
  107. return ret;
  108. return update_config(rcg);
  109. }
  110. /*
  111. * Calculate m/n:d rate
  112. *
  113. * parent_rate m
  114. * rate = ----------- x ---
  115. * hid_div n
  116. */
  117. static unsigned long
  118. calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div)
  119. {
  120. if (hid_div) {
  121. rate *= 2;
  122. rate /= hid_div + 1;
  123. }
  124. if (mode) {
  125. u64 tmp = rate;
  126. tmp *= m;
  127. do_div(tmp, n);
  128. rate = tmp;
  129. }
  130. return rate;
  131. }
  132. static unsigned long
  133. clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
  134. {
  135. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  136. u32 cfg, hid_div, m = 0, n = 0, mode = 0, mask;
  137. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
  138. if (rcg->mnd_width) {
  139. mask = BIT(rcg->mnd_width) - 1;
  140. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + M_REG, &m);
  141. m &= mask;
  142. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + N_REG, &n);
  143. n = ~n;
  144. n &= mask;
  145. n += m;
  146. mode = cfg & CFG_MODE_MASK;
  147. mode >>= CFG_MODE_SHIFT;
  148. }
  149. mask = BIT(rcg->hid_width) - 1;
  150. hid_div = cfg >> CFG_SRC_DIV_SHIFT;
  151. hid_div &= mask;
  152. return calc_rate(parent_rate, m, n, mode, hid_div);
  153. }
  154. static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
  155. struct clk_rate_request *req,
  156. enum freq_policy policy)
  157. {
  158. unsigned long clk_flags, rate = req->rate;
  159. struct clk_hw *p;
  160. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  161. int index;
  162. switch (policy) {
  163. case FLOOR:
  164. f = qcom_find_freq_floor(f, rate);
  165. break;
  166. case CEIL:
  167. f = qcom_find_freq(f, rate);
  168. break;
  169. default:
  170. return -EINVAL;
  171. };
  172. if (!f)
  173. return -EINVAL;
  174. index = qcom_find_src_index(hw, rcg->parent_map, f->src);
  175. if (index < 0)
  176. return index;
  177. clk_flags = clk_hw_get_flags(hw);
  178. p = clk_hw_get_parent_by_index(hw, index);
  179. if (clk_flags & CLK_SET_RATE_PARENT) {
  180. rate = f->freq;
  181. if (f->pre_div) {
  182. rate /= 2;
  183. rate *= f->pre_div + 1;
  184. }
  185. if (f->n) {
  186. u64 tmp = rate;
  187. tmp = tmp * f->n;
  188. do_div(tmp, f->m);
  189. rate = tmp;
  190. }
  191. } else {
  192. rate = clk_hw_get_rate(p);
  193. }
  194. req->best_parent_hw = p;
  195. req->best_parent_rate = rate;
  196. req->rate = f->freq;
  197. return 0;
  198. }
  199. static int clk_rcg2_determine_rate(struct clk_hw *hw,
  200. struct clk_rate_request *req)
  201. {
  202. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  203. return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, CEIL);
  204. }
  205. static int clk_rcg2_determine_floor_rate(struct clk_hw *hw,
  206. struct clk_rate_request *req)
  207. {
  208. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  209. return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, FLOOR);
  210. }
  211. static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
  212. {
  213. u32 cfg, mask;
  214. struct clk_hw *hw = &rcg->clkr.hw;
  215. int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src);
  216. if (index < 0)
  217. return index;
  218. if (rcg->mnd_width && f->n) {
  219. mask = BIT(rcg->mnd_width) - 1;
  220. ret = regmap_update_bits(rcg->clkr.regmap,
  221. rcg->cmd_rcgr + M_REG, mask, f->m);
  222. if (ret)
  223. return ret;
  224. ret = regmap_update_bits(rcg->clkr.regmap,
  225. rcg->cmd_rcgr + N_REG, mask, ~(f->n - f->m));
  226. if (ret)
  227. return ret;
  228. ret = regmap_update_bits(rcg->clkr.regmap,
  229. rcg->cmd_rcgr + D_REG, mask, ~f->n);
  230. if (ret)
  231. return ret;
  232. }
  233. mask = BIT(rcg->hid_width) - 1;
  234. mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK | CFG_HW_CLK_CTRL_MASK;
  235. cfg = f->pre_div << CFG_SRC_DIV_SHIFT;
  236. cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
  237. if (rcg->mnd_width && f->n && (f->m != f->n))
  238. cfg |= CFG_MODE_DUAL_EDGE;
  239. return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
  240. mask, cfg);
  241. }
  242. static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
  243. {
  244. int ret;
  245. ret = __clk_rcg2_configure(rcg, f);
  246. if (ret)
  247. return ret;
  248. return update_config(rcg);
  249. }
  250. static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
  251. enum freq_policy policy)
  252. {
  253. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  254. const struct freq_tbl *f;
  255. switch (policy) {
  256. case FLOOR:
  257. f = qcom_find_freq_floor(rcg->freq_tbl, rate);
  258. break;
  259. case CEIL:
  260. f = qcom_find_freq(rcg->freq_tbl, rate);
  261. break;
  262. default:
  263. return -EINVAL;
  264. };
  265. if (!f)
  266. return -EINVAL;
  267. return clk_rcg2_configure(rcg, f);
  268. }
  269. static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
  270. unsigned long parent_rate)
  271. {
  272. return __clk_rcg2_set_rate(hw, rate, CEIL);
  273. }
  274. static int clk_rcg2_set_floor_rate(struct clk_hw *hw, unsigned long rate,
  275. unsigned long parent_rate)
  276. {
  277. return __clk_rcg2_set_rate(hw, rate, FLOOR);
  278. }
  279. static int clk_rcg2_set_rate_and_parent(struct clk_hw *hw,
  280. unsigned long rate, unsigned long parent_rate, u8 index)
  281. {
  282. return __clk_rcg2_set_rate(hw, rate, CEIL);
  283. }
  284. static int clk_rcg2_set_floor_rate_and_parent(struct clk_hw *hw,
  285. unsigned long rate, unsigned long parent_rate, u8 index)
  286. {
  287. return __clk_rcg2_set_rate(hw, rate, FLOOR);
  288. }
  289. const struct clk_ops clk_rcg2_ops = {
  290. .is_enabled = clk_rcg2_is_enabled,
  291. .get_parent = clk_rcg2_get_parent,
  292. .set_parent = clk_rcg2_set_parent,
  293. .recalc_rate = clk_rcg2_recalc_rate,
  294. .determine_rate = clk_rcg2_determine_rate,
  295. .set_rate = clk_rcg2_set_rate,
  296. .set_rate_and_parent = clk_rcg2_set_rate_and_parent,
  297. };
  298. EXPORT_SYMBOL_GPL(clk_rcg2_ops);
  299. const struct clk_ops clk_rcg2_floor_ops = {
  300. .is_enabled = clk_rcg2_is_enabled,
  301. .get_parent = clk_rcg2_get_parent,
  302. .set_parent = clk_rcg2_set_parent,
  303. .recalc_rate = clk_rcg2_recalc_rate,
  304. .determine_rate = clk_rcg2_determine_floor_rate,
  305. .set_rate = clk_rcg2_set_floor_rate,
  306. .set_rate_and_parent = clk_rcg2_set_floor_rate_and_parent,
  307. };
  308. EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops);
  309. struct frac_entry {
  310. int num;
  311. int den;
  312. };
  313. static const struct frac_entry frac_table_675m[] = { /* link rate of 270M */
  314. { 52, 295 }, /* 119 M */
  315. { 11, 57 }, /* 130.25 M */
  316. { 63, 307 }, /* 138.50 M */
  317. { 11, 50 }, /* 148.50 M */
  318. { 47, 206 }, /* 154 M */
  319. { 31, 100 }, /* 205.25 M */
  320. { 107, 269 }, /* 268.50 M */
  321. { },
  322. };
  323. static struct frac_entry frac_table_810m[] = { /* Link rate of 162M */
  324. { 31, 211 }, /* 119 M */
  325. { 32, 199 }, /* 130.25 M */
  326. { 63, 307 }, /* 138.50 M */
  327. { 11, 60 }, /* 148.50 M */
  328. { 50, 263 }, /* 154 M */
  329. { 31, 120 }, /* 205.25 M */
  330. { 119, 359 }, /* 268.50 M */
  331. { },
  332. };
  333. static int clk_edp_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
  334. unsigned long parent_rate)
  335. {
  336. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  337. struct freq_tbl f = *rcg->freq_tbl;
  338. const struct frac_entry *frac;
  339. int delta = 100000;
  340. s64 src_rate = parent_rate;
  341. s64 request;
  342. u32 mask = BIT(rcg->hid_width) - 1;
  343. u32 hid_div;
  344. if (src_rate == 810000000)
  345. frac = frac_table_810m;
  346. else
  347. frac = frac_table_675m;
  348. for (; frac->num; frac++) {
  349. request = rate;
  350. request *= frac->den;
  351. request = div_s64(request, frac->num);
  352. if ((src_rate < (request - delta)) ||
  353. (src_rate > (request + delta)))
  354. continue;
  355. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
  356. &hid_div);
  357. f.pre_div = hid_div;
  358. f.pre_div >>= CFG_SRC_DIV_SHIFT;
  359. f.pre_div &= mask;
  360. f.m = frac->num;
  361. f.n = frac->den;
  362. return clk_rcg2_configure(rcg, &f);
  363. }
  364. return -EINVAL;
  365. }
  366. static int clk_edp_pixel_set_rate_and_parent(struct clk_hw *hw,
  367. unsigned long rate, unsigned long parent_rate, u8 index)
  368. {
  369. /* Parent index is set statically in frequency table */
  370. return clk_edp_pixel_set_rate(hw, rate, parent_rate);
  371. }
  372. static int clk_edp_pixel_determine_rate(struct clk_hw *hw,
  373. struct clk_rate_request *req)
  374. {
  375. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  376. const struct freq_tbl *f = rcg->freq_tbl;
  377. const struct frac_entry *frac;
  378. int delta = 100000;
  379. s64 request;
  380. u32 mask = BIT(rcg->hid_width) - 1;
  381. u32 hid_div;
  382. int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
  383. /* Force the correct parent */
  384. req->best_parent_hw = clk_hw_get_parent_by_index(hw, index);
  385. req->best_parent_rate = clk_hw_get_rate(req->best_parent_hw);
  386. if (req->best_parent_rate == 810000000)
  387. frac = frac_table_810m;
  388. else
  389. frac = frac_table_675m;
  390. for (; frac->num; frac++) {
  391. request = req->rate;
  392. request *= frac->den;
  393. request = div_s64(request, frac->num);
  394. if ((req->best_parent_rate < (request - delta)) ||
  395. (req->best_parent_rate > (request + delta)))
  396. continue;
  397. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
  398. &hid_div);
  399. hid_div >>= CFG_SRC_DIV_SHIFT;
  400. hid_div &= mask;
  401. req->rate = calc_rate(req->best_parent_rate,
  402. frac->num, frac->den,
  403. !!frac->den, hid_div);
  404. return 0;
  405. }
  406. return -EINVAL;
  407. }
  408. const struct clk_ops clk_edp_pixel_ops = {
  409. .is_enabled = clk_rcg2_is_enabled,
  410. .get_parent = clk_rcg2_get_parent,
  411. .set_parent = clk_rcg2_set_parent,
  412. .recalc_rate = clk_rcg2_recalc_rate,
  413. .set_rate = clk_edp_pixel_set_rate,
  414. .set_rate_and_parent = clk_edp_pixel_set_rate_and_parent,
  415. .determine_rate = clk_edp_pixel_determine_rate,
  416. };
  417. EXPORT_SYMBOL_GPL(clk_edp_pixel_ops);
  418. static int clk_byte_determine_rate(struct clk_hw *hw,
  419. struct clk_rate_request *req)
  420. {
  421. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  422. const struct freq_tbl *f = rcg->freq_tbl;
  423. int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
  424. unsigned long parent_rate, div;
  425. u32 mask = BIT(rcg->hid_width) - 1;
  426. struct clk_hw *p;
  427. if (req->rate == 0)
  428. return -EINVAL;
  429. req->best_parent_hw = p = clk_hw_get_parent_by_index(hw, index);
  430. req->best_parent_rate = parent_rate = clk_hw_round_rate(p, req->rate);
  431. div = DIV_ROUND_UP((2 * parent_rate), req->rate) - 1;
  432. div = min_t(u32, div, mask);
  433. req->rate = calc_rate(parent_rate, 0, 0, 0, div);
  434. return 0;
  435. }
  436. static int clk_byte_set_rate(struct clk_hw *hw, unsigned long rate,
  437. unsigned long parent_rate)
  438. {
  439. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  440. struct freq_tbl f = *rcg->freq_tbl;
  441. unsigned long div;
  442. u32 mask = BIT(rcg->hid_width) - 1;
  443. div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
  444. div = min_t(u32, div, mask);
  445. f.pre_div = div;
  446. return clk_rcg2_configure(rcg, &f);
  447. }
  448. static int clk_byte_set_rate_and_parent(struct clk_hw *hw,
  449. unsigned long rate, unsigned long parent_rate, u8 index)
  450. {
  451. /* Parent index is set statically in frequency table */
  452. return clk_byte_set_rate(hw, rate, parent_rate);
  453. }
  454. const struct clk_ops clk_byte_ops = {
  455. .is_enabled = clk_rcg2_is_enabled,
  456. .get_parent = clk_rcg2_get_parent,
  457. .set_parent = clk_rcg2_set_parent,
  458. .recalc_rate = clk_rcg2_recalc_rate,
  459. .set_rate = clk_byte_set_rate,
  460. .set_rate_and_parent = clk_byte_set_rate_and_parent,
  461. .determine_rate = clk_byte_determine_rate,
  462. };
  463. EXPORT_SYMBOL_GPL(clk_byte_ops);
  464. static int clk_byte2_determine_rate(struct clk_hw *hw,
  465. struct clk_rate_request *req)
  466. {
  467. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  468. unsigned long parent_rate, div;
  469. u32 mask = BIT(rcg->hid_width) - 1;
  470. struct clk_hw *p;
  471. unsigned long rate = req->rate;
  472. if (rate == 0)
  473. return -EINVAL;
  474. p = req->best_parent_hw;
  475. req->best_parent_rate = parent_rate = clk_hw_round_rate(p, rate);
  476. div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
  477. div = min_t(u32, div, mask);
  478. req->rate = calc_rate(parent_rate, 0, 0, 0, div);
  479. return 0;
  480. }
  481. static int clk_byte2_set_rate(struct clk_hw *hw, unsigned long rate,
  482. unsigned long parent_rate)
  483. {
  484. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  485. struct freq_tbl f = { 0 };
  486. unsigned long div;
  487. int i, num_parents = clk_hw_get_num_parents(hw);
  488. u32 mask = BIT(rcg->hid_width) - 1;
  489. u32 cfg;
  490. div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
  491. div = min_t(u32, div, mask);
  492. f.pre_div = div;
  493. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
  494. cfg &= CFG_SRC_SEL_MASK;
  495. cfg >>= CFG_SRC_SEL_SHIFT;
  496. for (i = 0; i < num_parents; i++) {
  497. if (cfg == rcg->parent_map[i].cfg) {
  498. f.src = rcg->parent_map[i].src;
  499. return clk_rcg2_configure(rcg, &f);
  500. }
  501. }
  502. return -EINVAL;
  503. }
  504. static int clk_byte2_set_rate_and_parent(struct clk_hw *hw,
  505. unsigned long rate, unsigned long parent_rate, u8 index)
  506. {
  507. /* Read the hardware to determine parent during set_rate */
  508. return clk_byte2_set_rate(hw, rate, parent_rate);
  509. }
  510. const struct clk_ops clk_byte2_ops = {
  511. .is_enabled = clk_rcg2_is_enabled,
  512. .get_parent = clk_rcg2_get_parent,
  513. .set_parent = clk_rcg2_set_parent,
  514. .recalc_rate = clk_rcg2_recalc_rate,
  515. .set_rate = clk_byte2_set_rate,
  516. .set_rate_and_parent = clk_byte2_set_rate_and_parent,
  517. .determine_rate = clk_byte2_determine_rate,
  518. };
  519. EXPORT_SYMBOL_GPL(clk_byte2_ops);
  520. static const struct frac_entry frac_table_pixel[] = {
  521. { 3, 8 },
  522. { 2, 9 },
  523. { 4, 9 },
  524. { 1, 1 },
  525. { }
  526. };
  527. static int clk_pixel_determine_rate(struct clk_hw *hw,
  528. struct clk_rate_request *req)
  529. {
  530. unsigned long request, src_rate;
  531. int delta = 100000;
  532. const struct frac_entry *frac = frac_table_pixel;
  533. for (; frac->num; frac++) {
  534. request = (req->rate * frac->den) / frac->num;
  535. src_rate = clk_hw_round_rate(req->best_parent_hw, request);
  536. if ((src_rate < (request - delta)) ||
  537. (src_rate > (request + delta)))
  538. continue;
  539. req->best_parent_rate = src_rate;
  540. req->rate = (src_rate * frac->num) / frac->den;
  541. return 0;
  542. }
  543. return -EINVAL;
  544. }
  545. static int clk_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
  546. unsigned long parent_rate)
  547. {
  548. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  549. struct freq_tbl f = { 0 };
  550. const struct frac_entry *frac = frac_table_pixel;
  551. unsigned long request;
  552. int delta = 100000;
  553. u32 mask = BIT(rcg->hid_width) - 1;
  554. u32 hid_div, cfg;
  555. int i, num_parents = clk_hw_get_num_parents(hw);
  556. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
  557. cfg &= CFG_SRC_SEL_MASK;
  558. cfg >>= CFG_SRC_SEL_SHIFT;
  559. for (i = 0; i < num_parents; i++)
  560. if (cfg == rcg->parent_map[i].cfg) {
  561. f.src = rcg->parent_map[i].src;
  562. break;
  563. }
  564. for (; frac->num; frac++) {
  565. request = (rate * frac->den) / frac->num;
  566. if ((parent_rate < (request - delta)) ||
  567. (parent_rate > (request + delta)))
  568. continue;
  569. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
  570. &hid_div);
  571. f.pre_div = hid_div;
  572. f.pre_div >>= CFG_SRC_DIV_SHIFT;
  573. f.pre_div &= mask;
  574. f.m = frac->num;
  575. f.n = frac->den;
  576. return clk_rcg2_configure(rcg, &f);
  577. }
  578. return -EINVAL;
  579. }
  580. static int clk_pixel_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
  581. unsigned long parent_rate, u8 index)
  582. {
  583. return clk_pixel_set_rate(hw, rate, parent_rate);
  584. }
  585. const struct clk_ops clk_pixel_ops = {
  586. .is_enabled = clk_rcg2_is_enabled,
  587. .get_parent = clk_rcg2_get_parent,
  588. .set_parent = clk_rcg2_set_parent,
  589. .recalc_rate = clk_rcg2_recalc_rate,
  590. .set_rate = clk_pixel_set_rate,
  591. .set_rate_and_parent = clk_pixel_set_rate_and_parent,
  592. .determine_rate = clk_pixel_determine_rate,
  593. };
  594. EXPORT_SYMBOL_GPL(clk_pixel_ops);
  595. static int clk_gfx3d_determine_rate(struct clk_hw *hw,
  596. struct clk_rate_request *req)
  597. {
  598. struct clk_rate_request parent_req = { };
  599. struct clk_hw *p2, *p8, *p9, *xo;
  600. unsigned long p9_rate;
  601. int ret;
  602. xo = clk_hw_get_parent_by_index(hw, 0);
  603. if (req->rate == clk_hw_get_rate(xo)) {
  604. req->best_parent_hw = xo;
  605. return 0;
  606. }
  607. p9 = clk_hw_get_parent_by_index(hw, 2);
  608. p2 = clk_hw_get_parent_by_index(hw, 3);
  609. p8 = clk_hw_get_parent_by_index(hw, 4);
  610. /* PLL9 is a fixed rate PLL */
  611. p9_rate = clk_hw_get_rate(p9);
  612. parent_req.rate = req->rate = min(req->rate, p9_rate);
  613. if (req->rate == p9_rate) {
  614. req->rate = req->best_parent_rate = p9_rate;
  615. req->best_parent_hw = p9;
  616. return 0;
  617. }
  618. if (req->best_parent_hw == p9) {
  619. /* Are we going back to a previously used rate? */
  620. if (clk_hw_get_rate(p8) == req->rate)
  621. req->best_parent_hw = p8;
  622. else
  623. req->best_parent_hw = p2;
  624. } else if (req->best_parent_hw == p8) {
  625. req->best_parent_hw = p2;
  626. } else {
  627. req->best_parent_hw = p8;
  628. }
  629. ret = __clk_determine_rate(req->best_parent_hw, &parent_req);
  630. if (ret)
  631. return ret;
  632. req->rate = req->best_parent_rate = parent_req.rate;
  633. return 0;
  634. }
  635. static int clk_gfx3d_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
  636. unsigned long parent_rate, u8 index)
  637. {
  638. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  639. u32 cfg;
  640. int ret;
  641. /* Just mux it, we don't use the division or m/n hardware */
  642. cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
  643. ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg);
  644. if (ret)
  645. return ret;
  646. return update_config(rcg);
  647. }
  648. static int clk_gfx3d_set_rate(struct clk_hw *hw, unsigned long rate,
  649. unsigned long parent_rate)
  650. {
  651. /*
  652. * We should never get here; clk_gfx3d_determine_rate() should always
  653. * make us use a different parent than what we're currently using, so
  654. * clk_gfx3d_set_rate_and_parent() should always be called.
  655. */
  656. return 0;
  657. }
  658. const struct clk_ops clk_gfx3d_ops = {
  659. .is_enabled = clk_rcg2_is_enabled,
  660. .get_parent = clk_rcg2_get_parent,
  661. .set_parent = clk_rcg2_set_parent,
  662. .recalc_rate = clk_rcg2_recalc_rate,
  663. .set_rate = clk_gfx3d_set_rate,
  664. .set_rate_and_parent = clk_gfx3d_set_rate_and_parent,
  665. .determine_rate = clk_gfx3d_determine_rate,
  666. };
  667. EXPORT_SYMBOL_GPL(clk_gfx3d_ops);
  668. static int clk_rcg2_set_force_enable(struct clk_hw *hw)
  669. {
  670. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  671. const char *name = clk_hw_get_name(hw);
  672. int ret, count;
  673. ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
  674. CMD_ROOT_EN, CMD_ROOT_EN);
  675. if (ret)
  676. return ret;
  677. /* wait for RCG to turn ON */
  678. for (count = 500; count > 0; count--) {
  679. if (clk_rcg2_is_enabled(hw))
  680. return 0;
  681. udelay(1);
  682. }
  683. pr_err("%s: RCG did not turn on\n", name);
  684. return -ETIMEDOUT;
  685. }
  686. static int clk_rcg2_clear_force_enable(struct clk_hw *hw)
  687. {
  688. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  689. return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
  690. CMD_ROOT_EN, 0);
  691. }
  692. static int
  693. clk_rcg2_shared_force_enable_clear(struct clk_hw *hw, const struct freq_tbl *f)
  694. {
  695. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  696. int ret;
  697. ret = clk_rcg2_set_force_enable(hw);
  698. if (ret)
  699. return ret;
  700. ret = clk_rcg2_configure(rcg, f);
  701. if (ret)
  702. return ret;
  703. return clk_rcg2_clear_force_enable(hw);
  704. }
  705. static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
  706. unsigned long parent_rate)
  707. {
  708. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  709. const struct freq_tbl *f;
  710. f = qcom_find_freq(rcg->freq_tbl, rate);
  711. if (!f)
  712. return -EINVAL;
  713. /*
  714. * In case clock is disabled, update the CFG, M, N and D registers
  715. * and don't hit the update bit of CMD register.
  716. */
  717. if (!__clk_is_enabled(hw->clk))
  718. return __clk_rcg2_configure(rcg, f);
  719. return clk_rcg2_shared_force_enable_clear(hw, f);
  720. }
  721. static int clk_rcg2_shared_set_rate_and_parent(struct clk_hw *hw,
  722. unsigned long rate, unsigned long parent_rate, u8 index)
  723. {
  724. return clk_rcg2_shared_set_rate(hw, rate, parent_rate);
  725. }
  726. static int clk_rcg2_shared_enable(struct clk_hw *hw)
  727. {
  728. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  729. int ret;
  730. /*
  731. * Set the update bit because required configuration has already
  732. * been written in clk_rcg2_shared_set_rate()
  733. */
  734. ret = clk_rcg2_set_force_enable(hw);
  735. if (ret)
  736. return ret;
  737. ret = update_config(rcg);
  738. if (ret)
  739. return ret;
  740. return clk_rcg2_clear_force_enable(hw);
  741. }
  742. static void clk_rcg2_shared_disable(struct clk_hw *hw)
  743. {
  744. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  745. u32 cfg;
  746. /*
  747. * Store current configuration as switching to safe source would clear
  748. * the SRC and DIV of CFG register
  749. */
  750. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
  751. /*
  752. * Park the RCG at a safe configuration - sourced off of safe source.
  753. * Force enable and disable the RCG while configuring it to safeguard
  754. * against any update signal coming from the downstream clock.
  755. * The current parent is still prepared and enabled at this point, and
  756. * the safe source is always on while application processor subsystem
  757. * is online. Therefore, the RCG can safely switch its parent.
  758. */
  759. clk_rcg2_set_force_enable(hw);
  760. regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
  761. rcg->safe_src_index << CFG_SRC_SEL_SHIFT);
  762. update_config(rcg);
  763. clk_rcg2_clear_force_enable(hw);
  764. /* Write back the stored configuration corresponding to current rate */
  765. regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg);
  766. }
  767. const struct clk_ops clk_rcg2_shared_ops = {
  768. .enable = clk_rcg2_shared_enable,
  769. .disable = clk_rcg2_shared_disable,
  770. .get_parent = clk_rcg2_get_parent,
  771. .set_parent = clk_rcg2_set_parent,
  772. .recalc_rate = clk_rcg2_recalc_rate,
  773. .determine_rate = clk_rcg2_determine_rate,
  774. .set_rate = clk_rcg2_shared_set_rate,
  775. .set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent,
  776. };
  777. EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops);
  778. /* Common APIs to be used for DFS based RCGR */
  779. static void clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l,
  780. struct freq_tbl *f)
  781. {
  782. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  783. struct clk_hw *p;
  784. unsigned long prate = 0;
  785. u32 val, mask, cfg, mode;
  786. int i, num_parents;
  787. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(l), &cfg);
  788. mask = BIT(rcg->hid_width) - 1;
  789. f->pre_div = 1;
  790. if (cfg & mask)
  791. f->pre_div = cfg & mask;
  792. cfg &= CFG_SRC_SEL_MASK;
  793. cfg >>= CFG_SRC_SEL_SHIFT;
  794. num_parents = clk_hw_get_num_parents(hw);
  795. for (i = 0; i < num_parents; i++) {
  796. if (cfg == rcg->parent_map[i].cfg) {
  797. f->src = rcg->parent_map[i].src;
  798. p = clk_hw_get_parent_by_index(&rcg->clkr.hw, i);
  799. prate = clk_hw_get_rate(p);
  800. }
  801. }
  802. mode = cfg & CFG_MODE_MASK;
  803. mode >>= CFG_MODE_SHIFT;
  804. if (mode) {
  805. mask = BIT(rcg->mnd_width) - 1;
  806. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_M_DFSR(l),
  807. &val);
  808. val &= mask;
  809. f->m = val;
  810. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_N_DFSR(l),
  811. &val);
  812. val = ~val;
  813. val &= mask;
  814. val += f->m;
  815. f->n = val;
  816. }
  817. f->freq = calc_rate(prate, f->m, f->n, mode, f->pre_div);
  818. }
  819. static int clk_rcg2_dfs_populate_freq_table(struct clk_rcg2 *rcg)
  820. {
  821. struct freq_tbl *freq_tbl;
  822. int i;
  823. /* Allocate space for 1 extra since table is NULL terminated */
  824. freq_tbl = kcalloc(MAX_PERF_LEVEL + 1, sizeof(*freq_tbl), GFP_KERNEL);
  825. if (!freq_tbl)
  826. return -ENOMEM;
  827. rcg->freq_tbl = freq_tbl;
  828. for (i = 0; i < MAX_PERF_LEVEL; i++)
  829. clk_rcg2_dfs_populate_freq(&rcg->clkr.hw, i, freq_tbl + i);
  830. return 0;
  831. }
  832. static int clk_rcg2_dfs_determine_rate(struct clk_hw *hw,
  833. struct clk_rate_request *req)
  834. {
  835. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  836. int ret;
  837. if (!rcg->freq_tbl) {
  838. ret = clk_rcg2_dfs_populate_freq_table(rcg);
  839. if (ret) {
  840. pr_err("Failed to update DFS tables for %s\n",
  841. clk_hw_get_name(hw));
  842. return ret;
  843. }
  844. }
  845. return clk_rcg2_determine_rate(hw, req);
  846. }
  847. static unsigned long
  848. clk_rcg2_dfs_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
  849. {
  850. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  851. u32 level, mask, cfg, m = 0, n = 0, mode, pre_div;
  852. regmap_read(rcg->clkr.regmap,
  853. rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &level);
  854. level &= GENMASK(4, 1);
  855. level >>= 1;
  856. if (rcg->freq_tbl)
  857. return rcg->freq_tbl[level].freq;
  858. /*
  859. * Assume that parent_rate is actually the parent because
  860. * we can't do any better at figuring it out when the table
  861. * hasn't been populated yet. We only populate the table
  862. * in determine_rate because we can't guarantee the parents
  863. * will be registered with the framework until then.
  864. */
  865. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(level),
  866. &cfg);
  867. mask = BIT(rcg->hid_width) - 1;
  868. pre_div = 1;
  869. if (cfg & mask)
  870. pre_div = cfg & mask;
  871. mode = cfg & CFG_MODE_MASK;
  872. mode >>= CFG_MODE_SHIFT;
  873. if (mode) {
  874. mask = BIT(rcg->mnd_width) - 1;
  875. regmap_read(rcg->clkr.regmap,
  876. rcg->cmd_rcgr + SE_PERF_M_DFSR(level), &m);
  877. m &= mask;
  878. regmap_read(rcg->clkr.regmap,
  879. rcg->cmd_rcgr + SE_PERF_N_DFSR(level), &n);
  880. n = ~n;
  881. n &= mask;
  882. n += m;
  883. }
  884. return calc_rate(parent_rate, m, n, mode, pre_div);
  885. }
  886. static const struct clk_ops clk_rcg2_dfs_ops = {
  887. .is_enabled = clk_rcg2_is_enabled,
  888. .get_parent = clk_rcg2_get_parent,
  889. .determine_rate = clk_rcg2_dfs_determine_rate,
  890. .recalc_rate = clk_rcg2_dfs_recalc_rate,
  891. };
  892. static int clk_rcg2_enable_dfs(const struct clk_rcg_dfs_data *data,
  893. struct regmap *regmap)
  894. {
  895. struct clk_rcg2 *rcg = data->rcg;
  896. struct clk_init_data *init = data->init;
  897. u32 val;
  898. int ret;
  899. ret = regmap_read(regmap, rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &val);
  900. if (ret)
  901. return -EINVAL;
  902. if (!(val & SE_CMD_DFS_EN))
  903. return 0;
  904. /*
  905. * Rate changes with consumer writing a register in
  906. * their own I/O region
  907. */
  908. init->flags |= CLK_GET_RATE_NOCACHE;
  909. init->ops = &clk_rcg2_dfs_ops;
  910. rcg->freq_tbl = NULL;
  911. pr_debug("DFS registered for clk %s\n", init->name);
  912. return 0;
  913. }
  914. int qcom_cc_register_rcg_dfs(struct regmap *regmap,
  915. const struct clk_rcg_dfs_data *rcgs, size_t len)
  916. {
  917. int i, ret;
  918. for (i = 0; i < len; i++) {
  919. ret = clk_rcg2_enable_dfs(&rcgs[i], regmap);
  920. if (ret) {
  921. const char *name = rcgs[i].init->name;
  922. pr_err("DFS register failed for clk %s\n", name);
  923. return ret;
  924. }
  925. }
  926. return 0;
  927. }
  928. EXPORT_SYMBOL_GPL(qcom_cc_register_rcg_dfs);