gk20a.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659
  1. /*
  2. * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  18. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  19. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  20. * DEALINGS IN THE SOFTWARE.
  21. *
  22. * Shamelessly ripped off from ChromeOS's gk20a/clk_pllg.c
  23. *
  24. */
  25. #include "priv.h"
  26. #include "gk20a.h"
  27. #include <core/tegra.h>
  28. #include <subdev/timer.h>
  29. static const u8 _pl_to_div[] = {
  30. /* PL: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 */
  31. /* p: */ 1, 2, 3, 4, 5, 6, 8, 10, 12, 16, 12, 16, 20, 24, 32,
  32. };
  33. static u32 pl_to_div(u32 pl)
  34. {
  35. if (pl >= ARRAY_SIZE(_pl_to_div))
  36. return 1;
  37. return _pl_to_div[pl];
  38. }
  39. static u32 div_to_pl(u32 div)
  40. {
  41. u32 pl;
  42. for (pl = 0; pl < ARRAY_SIZE(_pl_to_div) - 1; pl++) {
  43. if (_pl_to_div[pl] >= div)
  44. return pl;
  45. }
  46. return ARRAY_SIZE(_pl_to_div) - 1;
  47. }
  48. static const struct gk20a_clk_pllg_params gk20a_pllg_params = {
  49. .min_vco = 1000000, .max_vco = 2064000,
  50. .min_u = 12000, .max_u = 38000,
  51. .min_m = 1, .max_m = 255,
  52. .min_n = 8, .max_n = 255,
  53. .min_pl = 1, .max_pl = 32,
  54. };
  55. void
  56. gk20a_pllg_read_mnp(struct gk20a_clk *clk, struct gk20a_pll *pll)
  57. {
  58. struct nvkm_device *device = clk->base.subdev.device;
  59. u32 val;
  60. val = nvkm_rd32(device, GPCPLL_COEFF);
  61. pll->m = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
  62. pll->n = (val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH);
  63. pll->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
  64. }
  65. void
  66. gk20a_pllg_write_mnp(struct gk20a_clk *clk, const struct gk20a_pll *pll)
  67. {
  68. struct nvkm_device *device = clk->base.subdev.device;
  69. u32 val;
  70. val = (pll->m & MASK(GPCPLL_COEFF_M_WIDTH)) << GPCPLL_COEFF_M_SHIFT;
  71. val |= (pll->n & MASK(GPCPLL_COEFF_N_WIDTH)) << GPCPLL_COEFF_N_SHIFT;
  72. val |= (pll->pl & MASK(GPCPLL_COEFF_P_WIDTH)) << GPCPLL_COEFF_P_SHIFT;
  73. nvkm_wr32(device, GPCPLL_COEFF, val);
  74. }
  75. u32
  76. gk20a_pllg_calc_rate(struct gk20a_clk *clk, struct gk20a_pll *pll)
  77. {
  78. u32 rate;
  79. u32 divider;
  80. rate = clk->parent_rate * pll->n;
  81. divider = pll->m * clk->pl_to_div(pll->pl);
  82. return rate / divider / 2;
  83. }
  84. int
  85. gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate,
  86. struct gk20a_pll *pll)
  87. {
  88. struct nvkm_subdev *subdev = &clk->base.subdev;
  89. u32 target_clk_f, ref_clk_f, target_freq;
  90. u32 min_vco_f, max_vco_f;
  91. u32 low_pl, high_pl, best_pl;
  92. u32 target_vco_f;
  93. u32 best_m, best_n;
  94. u32 best_delta = ~0;
  95. u32 pl;
  96. target_clk_f = rate * 2 / KHZ;
  97. ref_clk_f = clk->parent_rate / KHZ;
  98. target_vco_f = target_clk_f + target_clk_f / 50;
  99. max_vco_f = max(clk->params->max_vco, target_vco_f);
  100. min_vco_f = clk->params->min_vco;
  101. best_m = clk->params->max_m;
  102. best_n = clk->params->min_n;
  103. best_pl = clk->params->min_pl;
  104. /* min_pl <= high_pl <= max_pl */
  105. high_pl = (max_vco_f + target_vco_f - 1) / target_vco_f;
  106. high_pl = min(high_pl, clk->params->max_pl);
  107. high_pl = max(high_pl, clk->params->min_pl);
  108. high_pl = clk->div_to_pl(high_pl);
  109. /* min_pl <= low_pl <= max_pl */
  110. low_pl = min_vco_f / target_vco_f;
  111. low_pl = min(low_pl, clk->params->max_pl);
  112. low_pl = max(low_pl, clk->params->min_pl);
  113. low_pl = clk->div_to_pl(low_pl);
  114. nvkm_debug(subdev, "low_PL %d(div%d), high_PL %d(div%d)", low_pl,
  115. clk->pl_to_div(low_pl), high_pl, clk->pl_to_div(high_pl));
  116. /* Select lowest possible VCO */
  117. for (pl = low_pl; pl <= high_pl; pl++) {
  118. u32 m, n, n2;
  119. target_vco_f = target_clk_f * clk->pl_to_div(pl);
  120. for (m = clk->params->min_m; m <= clk->params->max_m; m++) {
  121. u32 u_f = ref_clk_f / m;
  122. if (u_f < clk->params->min_u)
  123. break;
  124. if (u_f > clk->params->max_u)
  125. continue;
  126. n = (target_vco_f * m) / ref_clk_f;
  127. n2 = ((target_vco_f * m) + (ref_clk_f - 1)) / ref_clk_f;
  128. if (n > clk->params->max_n)
  129. break;
  130. for (; n <= n2; n++) {
  131. u32 vco_f;
  132. if (n < clk->params->min_n)
  133. continue;
  134. if (n > clk->params->max_n)
  135. break;
  136. vco_f = ref_clk_f * n / m;
  137. if (vco_f >= min_vco_f && vco_f <= max_vco_f) {
  138. u32 delta, lwv;
  139. lwv = (vco_f + (clk->pl_to_div(pl) / 2))
  140. / clk->pl_to_div(pl);
  141. delta = abs(lwv - target_clk_f);
  142. if (delta < best_delta) {
  143. best_delta = delta;
  144. best_m = m;
  145. best_n = n;
  146. best_pl = pl;
  147. if (best_delta == 0)
  148. goto found_match;
  149. }
  150. }
  151. }
  152. }
  153. }
  154. found_match:
  155. WARN_ON(best_delta == ~0);
  156. if (best_delta != 0)
  157. nvkm_debug(subdev,
  158. "no best match for target @ %dMHz on gpc_pll",
  159. target_clk_f / KHZ);
  160. pll->m = best_m;
  161. pll->n = best_n;
  162. pll->pl = best_pl;
  163. target_freq = gk20a_pllg_calc_rate(clk, pll);
  164. nvkm_debug(subdev,
  165. "actual target freq %d KHz, M %d, N %d, PL %d(div%d)\n",
  166. target_freq / KHZ, pll->m, pll->n, pll->pl,
  167. clk->pl_to_div(pll->pl));
  168. return 0;
  169. }
  170. static int
  171. gk20a_pllg_slide(struct gk20a_clk *clk, u32 n)
  172. {
  173. struct nvkm_subdev *subdev = &clk->base.subdev;
  174. struct nvkm_device *device = subdev->device;
  175. struct gk20a_pll pll;
  176. int ret = 0;
  177. /* get old coefficients */
  178. gk20a_pllg_read_mnp(clk, &pll);
  179. /* do nothing if NDIV is the same */
  180. if (n == pll.n)
  181. return 0;
  182. /* pll slowdown mode */
  183. nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
  184. BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT),
  185. BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT));
  186. /* new ndiv ready for ramp */
  187. pll.n = n;
  188. udelay(1);
  189. gk20a_pllg_write_mnp(clk, &pll);
  190. /* dynamic ramp to new ndiv */
  191. udelay(1);
  192. nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
  193. BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT),
  194. BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT));
  195. /* wait for ramping to complete */
  196. if (nvkm_wait_usec(device, 500, GPC_BCAST_NDIV_SLOWDOWN_DEBUG,
  197. GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK,
  198. GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK) < 0)
  199. ret = -ETIMEDOUT;
  200. /* exit slowdown mode */
  201. nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
  202. BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT) |
  203. BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0);
  204. nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
  205. return ret;
  206. }
  207. static int
  208. gk20a_pllg_enable(struct gk20a_clk *clk)
  209. {
  210. struct nvkm_device *device = clk->base.subdev.device;
  211. u32 val;
  212. nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
  213. nvkm_rd32(device, GPCPLL_CFG);
  214. /* enable lock detection */
  215. val = nvkm_rd32(device, GPCPLL_CFG);
  216. if (val & GPCPLL_CFG_LOCK_DET_OFF) {
  217. val &= ~GPCPLL_CFG_LOCK_DET_OFF;
  218. nvkm_wr32(device, GPCPLL_CFG, val);
  219. }
  220. /* wait for lock */
  221. if (nvkm_wait_usec(device, 300, GPCPLL_CFG, GPCPLL_CFG_LOCK,
  222. GPCPLL_CFG_LOCK) < 0)
  223. return -ETIMEDOUT;
  224. /* switch to VCO mode */
  225. nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT),
  226. BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
  227. return 0;
  228. }
  229. static void
  230. gk20a_pllg_disable(struct gk20a_clk *clk)
  231. {
  232. struct nvkm_device *device = clk->base.subdev.device;
  233. /* put PLL in bypass before disabling it */
  234. nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
  235. nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
  236. nvkm_rd32(device, GPCPLL_CFG);
  237. }
  238. static int
  239. gk20a_pllg_program_mnp(struct gk20a_clk *clk, const struct gk20a_pll *pll)
  240. {
  241. struct nvkm_subdev *subdev = &clk->base.subdev;
  242. struct nvkm_device *device = subdev->device;
  243. struct gk20a_pll cur_pll;
  244. int ret;
  245. gk20a_pllg_read_mnp(clk, &cur_pll);
  246. /* split VCO-to-bypass jump in half by setting out divider 1:2 */
  247. nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
  248. GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
  249. /* Intentional 2nd write to assure linear divider operation */
  250. nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
  251. GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
  252. nvkm_rd32(device, GPC2CLK_OUT);
  253. udelay(2);
  254. gk20a_pllg_disable(clk);
  255. gk20a_pllg_write_mnp(clk, pll);
  256. ret = gk20a_pllg_enable(clk);
  257. if (ret)
  258. return ret;
  259. /* restore out divider 1:1 */
  260. udelay(2);
  261. nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
  262. GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
  263. /* Intentional 2nd write to assure linear divider operation */
  264. nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
  265. GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
  266. nvkm_rd32(device, GPC2CLK_OUT);
  267. return 0;
  268. }
  269. static int
  270. gk20a_pllg_program_mnp_slide(struct gk20a_clk *clk, const struct gk20a_pll *pll)
  271. {
  272. struct gk20a_pll cur_pll;
  273. int ret;
  274. if (gk20a_pllg_is_enabled(clk)) {
  275. gk20a_pllg_read_mnp(clk, &cur_pll);
  276. /* just do NDIV slide if there is no change to M and PL */
  277. if (pll->m == cur_pll.m && pll->pl == cur_pll.pl)
  278. return gk20a_pllg_slide(clk, pll->n);
  279. /* slide down to current NDIV_LO */
  280. cur_pll.n = gk20a_pllg_n_lo(clk, &cur_pll);
  281. ret = gk20a_pllg_slide(clk, cur_pll.n);
  282. if (ret)
  283. return ret;
  284. }
  285. /* program MNP with the new clock parameters and new NDIV_LO */
  286. cur_pll = *pll;
  287. cur_pll.n = gk20a_pllg_n_lo(clk, &cur_pll);
  288. ret = gk20a_pllg_program_mnp(clk, &cur_pll);
  289. if (ret)
  290. return ret;
  291. /* slide up to new NDIV */
  292. return gk20a_pllg_slide(clk, pll->n);
  293. }
  294. static struct nvkm_pstate
  295. gk20a_pstates[] = {
  296. {
  297. .base = {
  298. .domain[nv_clk_src_gpc] = 72000,
  299. .voltage = 0,
  300. },
  301. },
  302. {
  303. .base = {
  304. .domain[nv_clk_src_gpc] = 108000,
  305. .voltage = 1,
  306. },
  307. },
  308. {
  309. .base = {
  310. .domain[nv_clk_src_gpc] = 180000,
  311. .voltage = 2,
  312. },
  313. },
  314. {
  315. .base = {
  316. .domain[nv_clk_src_gpc] = 252000,
  317. .voltage = 3,
  318. },
  319. },
  320. {
  321. .base = {
  322. .domain[nv_clk_src_gpc] = 324000,
  323. .voltage = 4,
  324. },
  325. },
  326. {
  327. .base = {
  328. .domain[nv_clk_src_gpc] = 396000,
  329. .voltage = 5,
  330. },
  331. },
  332. {
  333. .base = {
  334. .domain[nv_clk_src_gpc] = 468000,
  335. .voltage = 6,
  336. },
  337. },
  338. {
  339. .base = {
  340. .domain[nv_clk_src_gpc] = 540000,
  341. .voltage = 7,
  342. },
  343. },
  344. {
  345. .base = {
  346. .domain[nv_clk_src_gpc] = 612000,
  347. .voltage = 8,
  348. },
  349. },
  350. {
  351. .base = {
  352. .domain[nv_clk_src_gpc] = 648000,
  353. .voltage = 9,
  354. },
  355. },
  356. {
  357. .base = {
  358. .domain[nv_clk_src_gpc] = 684000,
  359. .voltage = 10,
  360. },
  361. },
  362. {
  363. .base = {
  364. .domain[nv_clk_src_gpc] = 708000,
  365. .voltage = 11,
  366. },
  367. },
  368. {
  369. .base = {
  370. .domain[nv_clk_src_gpc] = 756000,
  371. .voltage = 12,
  372. },
  373. },
  374. {
  375. .base = {
  376. .domain[nv_clk_src_gpc] = 804000,
  377. .voltage = 13,
  378. },
  379. },
  380. {
  381. .base = {
  382. .domain[nv_clk_src_gpc] = 852000,
  383. .voltage = 14,
  384. },
  385. },
  386. };
  387. int
  388. gk20a_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
  389. {
  390. struct gk20a_clk *clk = gk20a_clk(base);
  391. struct nvkm_subdev *subdev = &clk->base.subdev;
  392. struct nvkm_device *device = subdev->device;
  393. struct gk20a_pll pll;
  394. switch (src) {
  395. case nv_clk_src_crystal:
  396. return device->crystal;
  397. case nv_clk_src_gpc:
  398. gk20a_pllg_read_mnp(clk, &pll);
  399. return gk20a_pllg_calc_rate(clk, &pll) / GK20A_CLK_GPC_MDIV;
  400. default:
  401. nvkm_error(subdev, "invalid clock source %d\n", src);
  402. return -EINVAL;
  403. }
  404. }
  405. int
  406. gk20a_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
  407. {
  408. struct gk20a_clk *clk = gk20a_clk(base);
  409. return gk20a_pllg_calc_mnp(clk, cstate->domain[nv_clk_src_gpc] *
  410. GK20A_CLK_GPC_MDIV, &clk->pll);
  411. }
  412. int
  413. gk20a_clk_prog(struct nvkm_clk *base)
  414. {
  415. struct gk20a_clk *clk = gk20a_clk(base);
  416. int ret;
  417. ret = gk20a_pllg_program_mnp_slide(clk, &clk->pll);
  418. if (ret)
  419. ret = gk20a_pllg_program_mnp(clk, &clk->pll);
  420. return ret;
  421. }
  422. void
  423. gk20a_clk_tidy(struct nvkm_clk *base)
  424. {
  425. }
  426. int
  427. gk20a_clk_setup_slide(struct gk20a_clk *clk)
  428. {
  429. struct nvkm_subdev *subdev = &clk->base.subdev;
  430. struct nvkm_device *device = subdev->device;
  431. u32 step_a, step_b;
  432. switch (clk->parent_rate) {
  433. case 12000000:
  434. case 12800000:
  435. case 13000000:
  436. step_a = 0x2b;
  437. step_b = 0x0b;
  438. break;
  439. case 19200000:
  440. step_a = 0x12;
  441. step_b = 0x08;
  442. break;
  443. case 38400000:
  444. step_a = 0x04;
  445. step_b = 0x05;
  446. break;
  447. default:
  448. nvkm_error(subdev, "invalid parent clock rate %u KHz",
  449. clk->parent_rate / KHZ);
  450. return -EINVAL;
  451. }
  452. nvkm_mask(device, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT,
  453. step_a << GPCPLL_CFG2_PLL_STEPA_SHIFT);
  454. nvkm_mask(device, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT,
  455. step_b << GPCPLL_CFG3_PLL_STEPB_SHIFT);
  456. return 0;
  457. }
  458. void
  459. gk20a_clk_fini(struct nvkm_clk *base)
  460. {
  461. struct nvkm_device *device = base->subdev.device;
  462. struct gk20a_clk *clk = gk20a_clk(base);
  463. /* slide to VCO min */
  464. if (gk20a_pllg_is_enabled(clk)) {
  465. struct gk20a_pll pll;
  466. u32 n_lo;
  467. gk20a_pllg_read_mnp(clk, &pll);
  468. n_lo = gk20a_pllg_n_lo(clk, &pll);
  469. gk20a_pllg_slide(clk, n_lo);
  470. }
  471. gk20a_pllg_disable(clk);
  472. /* set IDDQ */
  473. nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 1);
  474. }
  475. static int
  476. gk20a_clk_init(struct nvkm_clk *base)
  477. {
  478. struct gk20a_clk *clk = gk20a_clk(base);
  479. struct nvkm_subdev *subdev = &clk->base.subdev;
  480. struct nvkm_device *device = subdev->device;
  481. int ret;
  482. /* get out from IDDQ */
  483. nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 0);
  484. nvkm_rd32(device, GPCPLL_CFG);
  485. udelay(5);
  486. nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK,
  487. GPC2CLK_OUT_INIT_VAL);
  488. ret = gk20a_clk_setup_slide(clk);
  489. if (ret)
  490. return ret;
  491. /* Start with lowest frequency */
  492. base->func->calc(base, &base->func->pstates[0].base);
  493. ret = base->func->prog(&clk->base);
  494. if (ret) {
  495. nvkm_error(subdev, "cannot initialize clock\n");
  496. return ret;
  497. }
  498. return 0;
  499. }
  500. static const struct nvkm_clk_func
  501. gk20a_clk = {
  502. .init = gk20a_clk_init,
  503. .fini = gk20a_clk_fini,
  504. .read = gk20a_clk_read,
  505. .calc = gk20a_clk_calc,
  506. .prog = gk20a_clk_prog,
  507. .tidy = gk20a_clk_tidy,
  508. .pstates = gk20a_pstates,
  509. .nr_pstates = ARRAY_SIZE(gk20a_pstates),
  510. .domains = {
  511. { nv_clk_src_crystal, 0xff },
  512. { nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV },
  513. { nv_clk_src_max }
  514. }
  515. };
  516. int
  517. gk20a_clk_ctor(struct nvkm_device *device, int index,
  518. const struct nvkm_clk_func *func,
  519. const struct gk20a_clk_pllg_params *params,
  520. struct gk20a_clk *clk)
  521. {
  522. struct nvkm_device_tegra *tdev = device->func->tegra(device);
  523. int ret;
  524. int i;
  525. /* Finish initializing the pstates */
  526. for (i = 0; i < func->nr_pstates; i++) {
  527. INIT_LIST_HEAD(&func->pstates[i].list);
  528. func->pstates[i].pstate = i + 1;
  529. }
  530. clk->params = params;
  531. clk->parent_rate = clk_get_rate(tdev->clk);
  532. ret = nvkm_clk_ctor(func, device, index, true, &clk->base);
  533. if (ret)
  534. return ret;
  535. nvkm_debug(&clk->base.subdev, "parent clock rate: %d Khz\n",
  536. clk->parent_rate / KHZ);
  537. return 0;
  538. }
  539. int
  540. gk20a_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
  541. {
  542. struct gk20a_clk *clk;
  543. int ret;
  544. clk = kzalloc(sizeof(*clk), GFP_KERNEL);
  545. if (!clk)
  546. return -ENOMEM;
  547. *pclk = &clk->base;
  548. ret = gk20a_clk_ctor(device, index, &gk20a_clk, &gk20a_pllg_params,
  549. clk);
  550. clk->pl_to_div = pl_to_div;
  551. clk->div_to_pl = div_to_pl;
  552. return ret;
  553. }