base.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615
  1. /*
  2. * Copyright 2013 Red Hat Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Ben Skeggs
  23. */
  24. #include "priv.h"
  25. #include <subdev/bios.h>
  26. #include <subdev/bios/boost.h>
  27. #include <subdev/bios/cstep.h>
  28. #include <subdev/bios/perf.h>
  29. #include <subdev/fb.h>
  30. #include <subdev/therm.h>
  31. #include <subdev/volt.h>
  32. #include <core/option.h>
  33. /******************************************************************************
  34. * misc
  35. *****************************************************************************/
  36. static u32
  37. nvkm_clk_adjust(struct nvkm_clk *clk, bool adjust,
  38. u8 pstate, u8 domain, u32 input)
  39. {
  40. struct nvkm_bios *bios = clk->subdev.device->bios;
  41. struct nvbios_boostE boostE;
  42. u8 ver, hdr, cnt, len;
  43. u16 data;
  44. data = nvbios_boostEm(bios, pstate, &ver, &hdr, &cnt, &len, &boostE);
  45. if (data) {
  46. struct nvbios_boostS boostS;
  47. u8 idx = 0, sver, shdr;
  48. u16 subd;
  49. input = max(boostE.min, input);
  50. input = min(boostE.max, input);
  51. do {
  52. sver = ver;
  53. shdr = hdr;
  54. subd = nvbios_boostSp(bios, idx++, data, &sver, &shdr,
  55. cnt, len, &boostS);
  56. if (subd && boostS.domain == domain) {
  57. if (adjust)
  58. input = input * boostS.percent / 100;
  59. input = max(boostS.min, input);
  60. input = min(boostS.max, input);
  61. break;
  62. }
  63. } while (subd);
  64. }
  65. return input;
  66. }
  67. /******************************************************************************
  68. * C-States
  69. *****************************************************************************/
  70. static int
  71. nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
  72. {
  73. struct nvkm_subdev *subdev = &clk->subdev;
  74. struct nvkm_device *device = subdev->device;
  75. struct nvkm_therm *therm = device->therm;
  76. struct nvkm_volt *volt = device->volt;
  77. struct nvkm_cstate *cstate;
  78. int ret;
  79. if (!list_empty(&pstate->list)) {
  80. cstate = list_entry(pstate->list.prev, typeof(*cstate), head);
  81. } else {
  82. cstate = &pstate->base;
  83. }
  84. if (therm) {
  85. ret = nvkm_therm_cstate(therm, pstate->fanspeed, +1);
  86. if (ret && ret != -ENODEV) {
  87. nvkm_error(subdev, "failed to raise fan speed: %d\n", ret);
  88. return ret;
  89. }
  90. }
  91. if (volt) {
  92. ret = nvkm_volt_set_id(volt, cstate->voltage, +1);
  93. if (ret && ret != -ENODEV) {
  94. nvkm_error(subdev, "failed to raise voltage: %d\n", ret);
  95. return ret;
  96. }
  97. }
  98. ret = clk->func->calc(clk, cstate);
  99. if (ret == 0) {
  100. ret = clk->func->prog(clk);
  101. clk->func->tidy(clk);
  102. }
  103. if (volt) {
  104. ret = nvkm_volt_set_id(volt, cstate->voltage, -1);
  105. if (ret && ret != -ENODEV)
  106. nvkm_error(subdev, "failed to lower voltage: %d\n", ret);
  107. }
  108. if (therm) {
  109. ret = nvkm_therm_cstate(therm, pstate->fanspeed, -1);
  110. if (ret && ret != -ENODEV)
  111. nvkm_error(subdev, "failed to lower fan speed: %d\n", ret);
  112. }
  113. return ret;
  114. }
  115. static void
  116. nvkm_cstate_del(struct nvkm_cstate *cstate)
  117. {
  118. list_del(&cstate->head);
  119. kfree(cstate);
  120. }
  121. static int
  122. nvkm_cstate_new(struct nvkm_clk *clk, int idx, struct nvkm_pstate *pstate)
  123. {
  124. struct nvkm_bios *bios = clk->subdev.device->bios;
  125. const struct nvkm_domain *domain = clk->domains;
  126. struct nvkm_cstate *cstate = NULL;
  127. struct nvbios_cstepX cstepX;
  128. u8 ver, hdr;
  129. u16 data;
  130. data = nvbios_cstepXp(bios, idx, &ver, &hdr, &cstepX);
  131. if (!data)
  132. return -ENOENT;
  133. cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
  134. if (!cstate)
  135. return -ENOMEM;
  136. *cstate = pstate->base;
  137. cstate->voltage = cstepX.voltage;
  138. while (domain && domain->name != nv_clk_src_max) {
  139. if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) {
  140. u32 freq = nvkm_clk_adjust(clk, true, pstate->pstate,
  141. domain->bios, cstepX.freq);
  142. cstate->domain[domain->name] = freq;
  143. }
  144. domain++;
  145. }
  146. list_add(&cstate->head, &pstate->list);
  147. return 0;
  148. }
  149. /******************************************************************************
  150. * P-States
  151. *****************************************************************************/
  152. static int
  153. nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
  154. {
  155. struct nvkm_subdev *subdev = &clk->subdev;
  156. struct nvkm_ram *ram = subdev->device->fb->ram;
  157. struct nvkm_pstate *pstate;
  158. int ret, idx = 0;
  159. list_for_each_entry(pstate, &clk->states, head) {
  160. if (idx++ == pstatei)
  161. break;
  162. }
  163. nvkm_debug(subdev, "setting performance state %d\n", pstatei);
  164. clk->pstate = pstatei;
  165. if (ram && ram->func->calc) {
  166. int khz = pstate->base.domain[nv_clk_src_mem];
  167. do {
  168. ret = ram->func->calc(ram, khz);
  169. if (ret == 0)
  170. ret = ram->func->prog(ram);
  171. } while (ret > 0);
  172. ram->func->tidy(ram);
  173. }
  174. return nvkm_cstate_prog(clk, pstate, 0);
  175. }
  176. static void
  177. nvkm_pstate_work(struct work_struct *work)
  178. {
  179. struct nvkm_clk *clk = container_of(work, typeof(*clk), work);
  180. struct nvkm_subdev *subdev = &clk->subdev;
  181. int pstate;
  182. if (!atomic_xchg(&clk->waiting, 0))
  183. return;
  184. clk->pwrsrc = power_supply_is_system_supplied();
  185. nvkm_trace(subdev, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d D %d\n",
  186. clk->pstate, clk->pwrsrc, clk->ustate_ac, clk->ustate_dc,
  187. clk->astate, clk->tstate, clk->dstate);
  188. pstate = clk->pwrsrc ? clk->ustate_ac : clk->ustate_dc;
  189. if (clk->state_nr && pstate != -1) {
  190. pstate = (pstate < 0) ? clk->astate : pstate;
  191. pstate = min(pstate, clk->state_nr - 1 + clk->tstate);
  192. pstate = max(pstate, clk->dstate);
  193. } else {
  194. pstate = clk->pstate = -1;
  195. }
  196. nvkm_trace(subdev, "-> %d\n", pstate);
  197. if (pstate != clk->pstate) {
  198. int ret = nvkm_pstate_prog(clk, pstate);
  199. if (ret) {
  200. nvkm_error(subdev, "error setting pstate %d: %d\n",
  201. pstate, ret);
  202. }
  203. }
  204. wake_up_all(&clk->wait);
  205. nvkm_notify_get(&clk->pwrsrc_ntfy);
  206. }
  207. static int
  208. nvkm_pstate_calc(struct nvkm_clk *clk, bool wait)
  209. {
  210. atomic_set(&clk->waiting, 1);
  211. schedule_work(&clk->work);
  212. if (wait)
  213. wait_event(clk->wait, !atomic_read(&clk->waiting));
  214. return 0;
  215. }
  216. static void
  217. nvkm_pstate_info(struct nvkm_clk *clk, struct nvkm_pstate *pstate)
  218. {
  219. const struct nvkm_domain *clock = clk->domains - 1;
  220. struct nvkm_cstate *cstate;
  221. struct nvkm_subdev *subdev = &clk->subdev;
  222. char info[3][32] = { "", "", "" };
  223. char name[4] = "--";
  224. int i = -1;
  225. if (pstate->pstate != 0xff)
  226. snprintf(name, sizeof(name), "%02x", pstate->pstate);
  227. while ((++clock)->name != nv_clk_src_max) {
  228. u32 lo = pstate->base.domain[clock->name];
  229. u32 hi = lo;
  230. if (hi == 0)
  231. continue;
  232. nvkm_debug(subdev, "%02x: %10d KHz\n", clock->name, lo);
  233. list_for_each_entry(cstate, &pstate->list, head) {
  234. u32 freq = cstate->domain[clock->name];
  235. lo = min(lo, freq);
  236. hi = max(hi, freq);
  237. nvkm_debug(subdev, "%10d KHz\n", freq);
  238. }
  239. if (clock->mname && ++i < ARRAY_SIZE(info)) {
  240. lo /= clock->mdiv;
  241. hi /= clock->mdiv;
  242. if (lo == hi) {
  243. snprintf(info[i], sizeof(info[i]), "%s %d MHz",
  244. clock->mname, lo);
  245. } else {
  246. snprintf(info[i], sizeof(info[i]),
  247. "%s %d-%d MHz", clock->mname, lo, hi);
  248. }
  249. }
  250. }
  251. nvkm_debug(subdev, "%s: %s %s %s\n", name, info[0], info[1], info[2]);
  252. }
  253. static void
  254. nvkm_pstate_del(struct nvkm_pstate *pstate)
  255. {
  256. struct nvkm_cstate *cstate, *temp;
  257. list_for_each_entry_safe(cstate, temp, &pstate->list, head) {
  258. nvkm_cstate_del(cstate);
  259. }
  260. list_del(&pstate->head);
  261. kfree(pstate);
  262. }
  263. static int
  264. nvkm_pstate_new(struct nvkm_clk *clk, int idx)
  265. {
  266. struct nvkm_bios *bios = clk->subdev.device->bios;
  267. const struct nvkm_domain *domain = clk->domains - 1;
  268. struct nvkm_pstate *pstate;
  269. struct nvkm_cstate *cstate;
  270. struct nvbios_cstepE cstepE;
  271. struct nvbios_perfE perfE;
  272. u8 ver, hdr, cnt, len;
  273. u16 data;
  274. data = nvbios_perfEp(bios, idx, &ver, &hdr, &cnt, &len, &perfE);
  275. if (!data)
  276. return -EINVAL;
  277. if (perfE.pstate == 0xff)
  278. return 0;
  279. pstate = kzalloc(sizeof(*pstate), GFP_KERNEL);
  280. cstate = &pstate->base;
  281. if (!pstate)
  282. return -ENOMEM;
  283. INIT_LIST_HEAD(&pstate->list);
  284. pstate->pstate = perfE.pstate;
  285. pstate->fanspeed = perfE.fanspeed;
  286. cstate->voltage = perfE.voltage;
  287. cstate->domain[nv_clk_src_core] = perfE.core;
  288. cstate->domain[nv_clk_src_shader] = perfE.shader;
  289. cstate->domain[nv_clk_src_mem] = perfE.memory;
  290. cstate->domain[nv_clk_src_vdec] = perfE.vdec;
  291. cstate->domain[nv_clk_src_dom6] = perfE.disp;
  292. while (ver >= 0x40 && (++domain)->name != nv_clk_src_max) {
  293. struct nvbios_perfS perfS;
  294. u8 sver = ver, shdr = hdr;
  295. u32 perfSe = nvbios_perfSp(bios, data, domain->bios,
  296. &sver, &shdr, cnt, len, &perfS);
  297. if (perfSe == 0 || sver != 0x40)
  298. continue;
  299. if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) {
  300. perfS.v40.freq = nvkm_clk_adjust(clk, false,
  301. pstate->pstate,
  302. domain->bios,
  303. perfS.v40.freq);
  304. }
  305. cstate->domain[domain->name] = perfS.v40.freq;
  306. }
  307. data = nvbios_cstepEm(bios, pstate->pstate, &ver, &hdr, &cstepE);
  308. if (data) {
  309. int idx = cstepE.index;
  310. do {
  311. nvkm_cstate_new(clk, idx, pstate);
  312. } while(idx--);
  313. }
  314. nvkm_pstate_info(clk, pstate);
  315. list_add_tail(&pstate->head, &clk->states);
  316. clk->state_nr++;
  317. return 0;
  318. }
  319. /******************************************************************************
  320. * Adjustment triggers
  321. *****************************************************************************/
  322. static int
  323. nvkm_clk_ustate_update(struct nvkm_clk *clk, int req)
  324. {
  325. struct nvkm_pstate *pstate;
  326. int i = 0;
  327. if (!clk->allow_reclock)
  328. return -ENOSYS;
  329. if (req != -1 && req != -2) {
  330. list_for_each_entry(pstate, &clk->states, head) {
  331. if (pstate->pstate == req)
  332. break;
  333. i++;
  334. }
  335. if (pstate->pstate != req)
  336. return -EINVAL;
  337. req = i;
  338. }
  339. return req + 2;
  340. }
  341. static int
  342. nvkm_clk_nstate(struct nvkm_clk *clk, const char *mode, int arglen)
  343. {
  344. int ret = 1;
  345. if (clk->allow_reclock && !strncasecmpz(mode, "auto", arglen))
  346. return -2;
  347. if (strncasecmpz(mode, "disabled", arglen)) {
  348. char save = mode[arglen];
  349. long v;
  350. ((char *)mode)[arglen] = '\0';
  351. if (!kstrtol(mode, 0, &v)) {
  352. ret = nvkm_clk_ustate_update(clk, v);
  353. if (ret < 0)
  354. ret = 1;
  355. }
  356. ((char *)mode)[arglen] = save;
  357. }
  358. return ret - 2;
  359. }
  360. int
  361. nvkm_clk_ustate(struct nvkm_clk *clk, int req, int pwr)
  362. {
  363. int ret = nvkm_clk_ustate_update(clk, req);
  364. if (ret >= 0) {
  365. if (ret -= 2, pwr) clk->ustate_ac = ret;
  366. else clk->ustate_dc = ret;
  367. return nvkm_pstate_calc(clk, true);
  368. }
  369. return ret;
  370. }
  371. int
  372. nvkm_clk_astate(struct nvkm_clk *clk, int req, int rel, bool wait)
  373. {
  374. if (!rel) clk->astate = req;
  375. if ( rel) clk->astate += rel;
  376. clk->astate = min(clk->astate, clk->state_nr - 1);
  377. clk->astate = max(clk->astate, 0);
  378. return nvkm_pstate_calc(clk, wait);
  379. }
  380. int
  381. nvkm_clk_tstate(struct nvkm_clk *clk, int req, int rel)
  382. {
  383. if (!rel) clk->tstate = req;
  384. if ( rel) clk->tstate += rel;
  385. clk->tstate = min(clk->tstate, 0);
  386. clk->tstate = max(clk->tstate, -(clk->state_nr - 1));
  387. return nvkm_pstate_calc(clk, true);
  388. }
  389. int
  390. nvkm_clk_dstate(struct nvkm_clk *clk, int req, int rel)
  391. {
  392. if (!rel) clk->dstate = req;
  393. if ( rel) clk->dstate += rel;
  394. clk->dstate = min(clk->dstate, clk->state_nr - 1);
  395. clk->dstate = max(clk->dstate, 0);
  396. return nvkm_pstate_calc(clk, true);
  397. }
  398. static int
  399. nvkm_clk_pwrsrc(struct nvkm_notify *notify)
  400. {
  401. struct nvkm_clk *clk =
  402. container_of(notify, typeof(*clk), pwrsrc_ntfy);
  403. nvkm_pstate_calc(clk, false);
  404. return NVKM_NOTIFY_DROP;
  405. }
  406. /******************************************************************************
  407. * subdev base class implementation
  408. *****************************************************************************/
  409. int
  410. nvkm_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
  411. {
  412. return clk->func->read(clk, src);
  413. }
  414. static int
  415. nvkm_clk_fini(struct nvkm_subdev *subdev, bool suspend)
  416. {
  417. struct nvkm_clk *clk = nvkm_clk(subdev);
  418. nvkm_notify_put(&clk->pwrsrc_ntfy);
  419. flush_work(&clk->work);
  420. if (clk->func->fini)
  421. clk->func->fini(clk);
  422. return 0;
  423. }
  424. static int
  425. nvkm_clk_init(struct nvkm_subdev *subdev)
  426. {
  427. struct nvkm_clk *clk = nvkm_clk(subdev);
  428. const struct nvkm_domain *clock = clk->domains;
  429. int ret;
  430. memset(&clk->bstate, 0x00, sizeof(clk->bstate));
  431. INIT_LIST_HEAD(&clk->bstate.list);
  432. clk->bstate.pstate = 0xff;
  433. while (clock->name != nv_clk_src_max) {
  434. ret = nvkm_clk_read(clk, clock->name);
  435. if (ret < 0) {
  436. nvkm_error(subdev, "%02x freq unknown\n", clock->name);
  437. return ret;
  438. }
  439. clk->bstate.base.domain[clock->name] = ret;
  440. clock++;
  441. }
  442. nvkm_pstate_info(clk, &clk->bstate);
  443. if (clk->func->init)
  444. return clk->func->init(clk);
  445. clk->astate = clk->state_nr - 1;
  446. clk->tstate = 0;
  447. clk->dstate = 0;
  448. clk->pstate = -1;
  449. nvkm_pstate_calc(clk, true);
  450. return 0;
  451. }
  452. static void *
  453. nvkm_clk_dtor(struct nvkm_subdev *subdev)
  454. {
  455. struct nvkm_clk *clk = nvkm_clk(subdev);
  456. struct nvkm_pstate *pstate, *temp;
  457. nvkm_notify_fini(&clk->pwrsrc_ntfy);
  458. /* Early return if the pstates have been provided statically */
  459. if (clk->func->pstates)
  460. return clk;
  461. list_for_each_entry_safe(pstate, temp, &clk->states, head) {
  462. nvkm_pstate_del(pstate);
  463. }
  464. return clk;
  465. }
  466. static const struct nvkm_subdev_func
  467. nvkm_clk = {
  468. .dtor = nvkm_clk_dtor,
  469. .init = nvkm_clk_init,
  470. .fini = nvkm_clk_fini,
  471. };
  472. int
  473. nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device,
  474. int index, bool allow_reclock, struct nvkm_clk *clk)
  475. {
  476. int ret, idx, arglen;
  477. const char *mode;
  478. nvkm_subdev_ctor(&nvkm_clk, device, index, 0, &clk->subdev);
  479. clk->func = func;
  480. INIT_LIST_HEAD(&clk->states);
  481. clk->domains = func->domains;
  482. clk->ustate_ac = -1;
  483. clk->ustate_dc = -1;
  484. clk->allow_reclock = allow_reclock;
  485. INIT_WORK(&clk->work, nvkm_pstate_work);
  486. init_waitqueue_head(&clk->wait);
  487. atomic_set(&clk->waiting, 0);
  488. /* If no pstates are provided, try and fetch them from the BIOS */
  489. if (!func->pstates) {
  490. idx = 0;
  491. do {
  492. ret = nvkm_pstate_new(clk, idx++);
  493. } while (ret == 0);
  494. } else {
  495. for (idx = 0; idx < func->nr_pstates; idx++)
  496. list_add_tail(&func->pstates[idx].head, &clk->states);
  497. clk->state_nr = func->nr_pstates;
  498. }
  499. ret = nvkm_notify_init(NULL, &device->event, nvkm_clk_pwrsrc, true,
  500. NULL, 0, 0, &clk->pwrsrc_ntfy);
  501. if (ret)
  502. return ret;
  503. mode = nvkm_stropt(device->cfgopt, "NvClkMode", &arglen);
  504. if (mode) {
  505. clk->ustate_ac = nvkm_clk_nstate(clk, mode, arglen);
  506. clk->ustate_dc = nvkm_clk_nstate(clk, mode, arglen);
  507. }
  508. mode = nvkm_stropt(device->cfgopt, "NvClkModeAC", &arglen);
  509. if (mode)
  510. clk->ustate_ac = nvkm_clk_nstate(clk, mode, arglen);
  511. mode = nvkm_stropt(device->cfgopt, "NvClkModeDC", &arglen);
  512. if (mode)
  513. clk->ustate_dc = nvkm_clk_nstate(clk, mode, arglen);
  514. return 0;
  515. }
  516. int
  517. nvkm_clk_new_(const struct nvkm_clk_func *func, struct nvkm_device *device,
  518. int index, bool allow_reclock, struct nvkm_clk **pclk)
  519. {
  520. if (!(*pclk = kzalloc(sizeof(**pclk), GFP_KERNEL)))
  521. return -ENOMEM;
  522. return nvkm_clk_ctor(func, device, index, allow_reclock, *pclk);
  523. }