mcp77.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423
  1. /*
  2. * Copyright 2012 Red Hat Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Ben Skeggs
  23. */
  24. #define mcp77_clk(p) container_of((p), struct mcp77_clk, base)
  25. #include "gt215.h"
  26. #include "pll.h"
  27. #include <subdev/bios.h>
  28. #include <subdev/bios/pll.h>
  29. #include <subdev/timer.h>
  30. struct mcp77_clk {
  31. struct nvkm_clk base;
  32. enum nv_clk_src csrc, ssrc, vsrc;
  33. u32 cctrl, sctrl;
  34. u32 ccoef, scoef;
  35. u32 cpost, spost;
  36. u32 vdiv;
  37. };
  38. static u32
  39. read_div(struct mcp77_clk *clk)
  40. {
  41. struct nvkm_device *device = clk->base.subdev.device;
  42. return nvkm_rd32(device, 0x004600);
  43. }
  44. static u32
  45. read_pll(struct mcp77_clk *clk, u32 base)
  46. {
  47. struct nvkm_device *device = clk->base.subdev.device;
  48. u32 ctrl = nvkm_rd32(device, base + 0);
  49. u32 coef = nvkm_rd32(device, base + 4);
  50. u32 ref = nvkm_clk_read(&clk->base, nv_clk_src_href);
  51. u32 post_div = 0;
  52. u32 clock = 0;
  53. int N1, M1;
  54. switch (base){
  55. case 0x4020:
  56. post_div = 1 << ((nvkm_rd32(device, 0x4070) & 0x000f0000) >> 16);
  57. break;
  58. case 0x4028:
  59. post_div = (nvkm_rd32(device, 0x4040) & 0x000f0000) >> 16;
  60. break;
  61. default:
  62. break;
  63. }
  64. N1 = (coef & 0x0000ff00) >> 8;
  65. M1 = (coef & 0x000000ff);
  66. if ((ctrl & 0x80000000) && M1) {
  67. clock = ref * N1 / M1;
  68. clock = clock / post_div;
  69. }
  70. return clock;
  71. }
  72. static int
  73. mcp77_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
  74. {
  75. struct mcp77_clk *clk = mcp77_clk(base);
  76. struct nvkm_subdev *subdev = &clk->base.subdev;
  77. struct nvkm_device *device = subdev->device;
  78. u32 mast = nvkm_rd32(device, 0x00c054);
  79. u32 P = 0;
  80. switch (src) {
  81. case nv_clk_src_crystal:
  82. return device->crystal;
  83. case nv_clk_src_href:
  84. return 100000; /* PCIE reference clock */
  85. case nv_clk_src_hclkm4:
  86. return nvkm_clk_read(&clk->base, nv_clk_src_href) * 4;
  87. case nv_clk_src_hclkm2d3:
  88. return nvkm_clk_read(&clk->base, nv_clk_src_href) * 2 / 3;
  89. case nv_clk_src_host:
  90. switch (mast & 0x000c0000) {
  91. case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm2d3);
  92. case 0x00040000: break;
  93. case 0x00080000: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm4);
  94. case 0x000c0000: return nvkm_clk_read(&clk->base, nv_clk_src_cclk);
  95. }
  96. break;
  97. case nv_clk_src_core:
  98. P = (nvkm_rd32(device, 0x004028) & 0x00070000) >> 16;
  99. switch (mast & 0x00000003) {
  100. case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
  101. case 0x00000001: return 0;
  102. case 0x00000002: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm4) >> P;
  103. case 0x00000003: return read_pll(clk, 0x004028) >> P;
  104. }
  105. break;
  106. case nv_clk_src_cclk:
  107. if ((mast & 0x03000000) != 0x03000000)
  108. return nvkm_clk_read(&clk->base, nv_clk_src_core);
  109. if ((mast & 0x00000200) == 0x00000000)
  110. return nvkm_clk_read(&clk->base, nv_clk_src_core);
  111. switch (mast & 0x00000c00) {
  112. case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_href);
  113. case 0x00000400: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm4);
  114. case 0x00000800: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm2d3);
  115. default: return 0;
  116. }
  117. case nv_clk_src_shader:
  118. P = (nvkm_rd32(device, 0x004020) & 0x00070000) >> 16;
  119. switch (mast & 0x00000030) {
  120. case 0x00000000:
  121. if (mast & 0x00000040)
  122. return nvkm_clk_read(&clk->base, nv_clk_src_href) >> P;
  123. return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
  124. case 0x00000010: break;
  125. case 0x00000020: return read_pll(clk, 0x004028) >> P;
  126. case 0x00000030: return read_pll(clk, 0x004020) >> P;
  127. }
  128. break;
  129. case nv_clk_src_mem:
  130. return 0;
  131. break;
  132. case nv_clk_src_vdec:
  133. P = (read_div(clk) & 0x00000700) >> 8;
  134. switch (mast & 0x00400000) {
  135. case 0x00400000:
  136. return nvkm_clk_read(&clk->base, nv_clk_src_core) >> P;
  137. break;
  138. default:
  139. return 500000 >> P;
  140. break;
  141. }
  142. break;
  143. default:
  144. break;
  145. }
  146. nvkm_debug(subdev, "unknown clock source %d %08x\n", src, mast);
  147. return 0;
  148. }
  149. static u32
  150. calc_pll(struct mcp77_clk *clk, u32 reg,
  151. u32 clock, int *N, int *M, int *P)
  152. {
  153. struct nvkm_subdev *subdev = &clk->base.subdev;
  154. struct nvbios_pll pll;
  155. int ret;
  156. ret = nvbios_pll_parse(subdev->device->bios, reg, &pll);
  157. if (ret)
  158. return 0;
  159. pll.vco2.max_freq = 0;
  160. pll.refclk = nvkm_clk_read(&clk->base, nv_clk_src_href);
  161. if (!pll.refclk)
  162. return 0;
  163. return nv04_pll_calc(subdev, &pll, clock, N, M, NULL, NULL, P);
  164. }
  165. static inline u32
  166. calc_P(u32 src, u32 target, int *div)
  167. {
  168. u32 clk0 = src, clk1 = src;
  169. for (*div = 0; *div <= 7; (*div)++) {
  170. if (clk0 <= target) {
  171. clk1 = clk0 << (*div ? 1 : 0);
  172. break;
  173. }
  174. clk0 >>= 1;
  175. }
  176. if (target - clk0 <= clk1 - target)
  177. return clk0;
  178. (*div)--;
  179. return clk1;
  180. }
  181. static int
  182. mcp77_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
  183. {
  184. struct mcp77_clk *clk = mcp77_clk(base);
  185. const int shader = cstate->domain[nv_clk_src_shader];
  186. const int core = cstate->domain[nv_clk_src_core];
  187. const int vdec = cstate->domain[nv_clk_src_vdec];
  188. struct nvkm_subdev *subdev = &clk->base.subdev;
  189. u32 out = 0, clock = 0;
  190. int N, M, P1, P2 = 0;
  191. int divs = 0;
  192. /* cclk: find suitable source, disable PLL if we can */
  193. if (core < nvkm_clk_read(&clk->base, nv_clk_src_hclkm4))
  194. out = calc_P(nvkm_clk_read(&clk->base, nv_clk_src_hclkm4), core, &divs);
  195. /* Calculate clock * 2, so shader clock can use it too */
  196. clock = calc_pll(clk, 0x4028, (core << 1), &N, &M, &P1);
  197. if (abs(core - out) <= abs(core - (clock >> 1))) {
  198. clk->csrc = nv_clk_src_hclkm4;
  199. clk->cctrl = divs << 16;
  200. } else {
  201. /* NVCTRL is actually used _after_ NVPOST, and after what we
  202. * call NVPLL. To make matters worse, NVPOST is an integer
  203. * divider instead of a right-shift number. */
  204. if(P1 > 2) {
  205. P2 = P1 - 2;
  206. P1 = 2;
  207. }
  208. clk->csrc = nv_clk_src_core;
  209. clk->ccoef = (N << 8) | M;
  210. clk->cctrl = (P2 + 1) << 16;
  211. clk->cpost = (1 << P1) << 16;
  212. }
  213. /* sclk: nvpll + divisor, href or spll */
  214. out = 0;
  215. if (shader == nvkm_clk_read(&clk->base, nv_clk_src_href)) {
  216. clk->ssrc = nv_clk_src_href;
  217. } else {
  218. clock = calc_pll(clk, 0x4020, shader, &N, &M, &P1);
  219. if (clk->csrc == nv_clk_src_core)
  220. out = calc_P((core << 1), shader, &divs);
  221. if (abs(shader - out) <=
  222. abs(shader - clock) &&
  223. (divs + P2) <= 7) {
  224. clk->ssrc = nv_clk_src_core;
  225. clk->sctrl = (divs + P2) << 16;
  226. } else {
  227. clk->ssrc = nv_clk_src_shader;
  228. clk->scoef = (N << 8) | M;
  229. clk->sctrl = P1 << 16;
  230. }
  231. }
  232. /* vclk */
  233. out = calc_P(core, vdec, &divs);
  234. clock = calc_P(500000, vdec, &P1);
  235. if(abs(vdec - out) <= abs(vdec - clock)) {
  236. clk->vsrc = nv_clk_src_cclk;
  237. clk->vdiv = divs << 16;
  238. } else {
  239. clk->vsrc = nv_clk_src_vdec;
  240. clk->vdiv = P1 << 16;
  241. }
  242. /* Print strategy! */
  243. nvkm_debug(subdev, "nvpll: %08x %08x %08x\n",
  244. clk->ccoef, clk->cpost, clk->cctrl);
  245. nvkm_debug(subdev, " spll: %08x %08x %08x\n",
  246. clk->scoef, clk->spost, clk->sctrl);
  247. nvkm_debug(subdev, " vdiv: %08x\n", clk->vdiv);
  248. if (clk->csrc == nv_clk_src_hclkm4)
  249. nvkm_debug(subdev, "core: hrefm4\n");
  250. else
  251. nvkm_debug(subdev, "core: nvpll\n");
  252. if (clk->ssrc == nv_clk_src_hclkm4)
  253. nvkm_debug(subdev, "shader: hrefm4\n");
  254. else if (clk->ssrc == nv_clk_src_core)
  255. nvkm_debug(subdev, "shader: nvpll\n");
  256. else
  257. nvkm_debug(subdev, "shader: spll\n");
  258. if (clk->vsrc == nv_clk_src_hclkm4)
  259. nvkm_debug(subdev, "vdec: 500MHz\n");
  260. else
  261. nvkm_debug(subdev, "vdec: core\n");
  262. return 0;
  263. }
  264. static int
  265. mcp77_clk_prog(struct nvkm_clk *base)
  266. {
  267. struct mcp77_clk *clk = mcp77_clk(base);
  268. struct nvkm_subdev *subdev = &clk->base.subdev;
  269. struct nvkm_device *device = subdev->device;
  270. u32 pllmask = 0, mast;
  271. unsigned long flags;
  272. unsigned long *f = &flags;
  273. int ret = 0;
  274. ret = gt215_clk_pre(&clk->base, f);
  275. if (ret)
  276. goto out;
  277. /* First switch to safe clocks: href */
  278. mast = nvkm_mask(device, 0xc054, 0x03400e70, 0x03400640);
  279. mast &= ~0x00400e73;
  280. mast |= 0x03000000;
  281. switch (clk->csrc) {
  282. case nv_clk_src_hclkm4:
  283. nvkm_mask(device, 0x4028, 0x00070000, clk->cctrl);
  284. mast |= 0x00000002;
  285. break;
  286. case nv_clk_src_core:
  287. nvkm_wr32(device, 0x402c, clk->ccoef);
  288. nvkm_wr32(device, 0x4028, 0x80000000 | clk->cctrl);
  289. nvkm_wr32(device, 0x4040, clk->cpost);
  290. pllmask |= (0x3 << 8);
  291. mast |= 0x00000003;
  292. break;
  293. default:
  294. nvkm_warn(subdev, "Reclocking failed: unknown core clock\n");
  295. goto resume;
  296. }
  297. switch (clk->ssrc) {
  298. case nv_clk_src_href:
  299. nvkm_mask(device, 0x4020, 0x00070000, 0x00000000);
  300. /* mast |= 0x00000000; */
  301. break;
  302. case nv_clk_src_core:
  303. nvkm_mask(device, 0x4020, 0x00070000, clk->sctrl);
  304. mast |= 0x00000020;
  305. break;
  306. case nv_clk_src_shader:
  307. nvkm_wr32(device, 0x4024, clk->scoef);
  308. nvkm_wr32(device, 0x4020, 0x80000000 | clk->sctrl);
  309. nvkm_wr32(device, 0x4070, clk->spost);
  310. pllmask |= (0x3 << 12);
  311. mast |= 0x00000030;
  312. break;
  313. default:
  314. nvkm_warn(subdev, "Reclocking failed: unknown sclk clock\n");
  315. goto resume;
  316. }
  317. if (nvkm_msec(device, 2000,
  318. u32 tmp = nvkm_rd32(device, 0x004080) & pllmask;
  319. if (tmp == pllmask)
  320. break;
  321. ) < 0)
  322. goto resume;
  323. switch (clk->vsrc) {
  324. case nv_clk_src_cclk:
  325. mast |= 0x00400000;
  326. default:
  327. nvkm_wr32(device, 0x4600, clk->vdiv);
  328. }
  329. nvkm_wr32(device, 0xc054, mast);
  330. resume:
  331. /* Disable some PLLs and dividers when unused */
  332. if (clk->csrc != nv_clk_src_core) {
  333. nvkm_wr32(device, 0x4040, 0x00000000);
  334. nvkm_mask(device, 0x4028, 0x80000000, 0x00000000);
  335. }
  336. if (clk->ssrc != nv_clk_src_shader) {
  337. nvkm_wr32(device, 0x4070, 0x00000000);
  338. nvkm_mask(device, 0x4020, 0x80000000, 0x00000000);
  339. }
  340. out:
  341. if (ret == -EBUSY)
  342. f = NULL;
  343. gt215_clk_post(&clk->base, f);
  344. return ret;
  345. }
  346. static void
  347. mcp77_clk_tidy(struct nvkm_clk *base)
  348. {
  349. }
  350. static const struct nvkm_clk_func
  351. mcp77_clk = {
  352. .read = mcp77_clk_read,
  353. .calc = mcp77_clk_calc,
  354. .prog = mcp77_clk_prog,
  355. .tidy = mcp77_clk_tidy,
  356. .domains = {
  357. { nv_clk_src_crystal, 0xff },
  358. { nv_clk_src_href , 0xff },
  359. { nv_clk_src_core , 0xff, 0, "core", 1000 },
  360. { nv_clk_src_shader , 0xff, 0, "shader", 1000 },
  361. { nv_clk_src_vdec , 0xff, 0, "vdec", 1000 },
  362. { nv_clk_src_max }
  363. }
  364. };
  365. int
  366. mcp77_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
  367. {
  368. struct mcp77_clk *clk;
  369. if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
  370. return -ENOMEM;
  371. *pclk = &clk->base;
  372. return nvkm_clk_ctor(&mcp77_clk, device, index, true, &clk->base);
  373. }