base.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725
  1. /*
  2. * Copyright 2013 Red Hat Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Ben Skeggs
  23. */
  24. #include "priv.h"
  25. #include <subdev/bios.h>
  26. #include <subdev/bios/boost.h>
  27. #include <subdev/bios/cstep.h>
  28. #include <subdev/bios/perf.h>
  29. #include <subdev/bios/vpstate.h>
  30. #include <subdev/fb.h>
  31. #include <subdev/therm.h>
  32. #include <subdev/volt.h>
  33. #include <core/option.h>
  34. /******************************************************************************
  35. * misc
  36. *****************************************************************************/
  37. static u32
  38. nvkm_clk_adjust(struct nvkm_clk *clk, bool adjust,
  39. u8 pstate, u8 domain, u32 input)
  40. {
  41. struct nvkm_bios *bios = clk->subdev.device->bios;
  42. struct nvbios_boostE boostE;
  43. u8 ver, hdr, cnt, len;
  44. u32 data;
  45. data = nvbios_boostEm(bios, pstate, &ver, &hdr, &cnt, &len, &boostE);
  46. if (data) {
  47. struct nvbios_boostS boostS;
  48. u8 idx = 0, sver, shdr;
  49. u32 subd;
  50. input = max(boostE.min, input);
  51. input = min(boostE.max, input);
  52. do {
  53. sver = ver;
  54. shdr = hdr;
  55. subd = nvbios_boostSp(bios, idx++, data, &sver, &shdr,
  56. cnt, len, &boostS);
  57. if (subd && boostS.domain == domain) {
  58. if (adjust)
  59. input = input * boostS.percent / 100;
  60. input = max(boostS.min, input);
  61. input = min(boostS.max, input);
  62. break;
  63. }
  64. } while (subd);
  65. }
  66. return input;
  67. }
  68. /******************************************************************************
  69. * C-States
  70. *****************************************************************************/
  71. static bool
  72. nvkm_cstate_valid(struct nvkm_clk *clk, struct nvkm_cstate *cstate,
  73. u32 max_volt, int temp)
  74. {
  75. const struct nvkm_domain *domain = clk->domains;
  76. struct nvkm_volt *volt = clk->subdev.device->volt;
  77. int voltage;
  78. while (domain && domain->name != nv_clk_src_max) {
  79. if (domain->flags & NVKM_CLK_DOM_FLAG_VPSTATE) {
  80. u32 freq = cstate->domain[domain->name];
  81. switch (clk->boost_mode) {
  82. case NVKM_CLK_BOOST_NONE:
  83. if (clk->base_khz && freq > clk->base_khz)
  84. return false;
  85. case NVKM_CLK_BOOST_BIOS:
  86. if (clk->boost_khz && freq > clk->boost_khz)
  87. return false;
  88. }
  89. }
  90. domain++;
  91. }
  92. if (!volt)
  93. return true;
  94. voltage = nvkm_volt_map(volt, cstate->voltage, temp);
  95. if (voltage < 0)
  96. return false;
  97. return voltage <= min(max_volt, volt->max_uv);
  98. }
  99. static struct nvkm_cstate *
  100. nvkm_cstate_find_best(struct nvkm_clk *clk, struct nvkm_pstate *pstate,
  101. struct nvkm_cstate *start)
  102. {
  103. struct nvkm_device *device = clk->subdev.device;
  104. struct nvkm_volt *volt = device->volt;
  105. struct nvkm_cstate *cstate;
  106. int max_volt;
  107. if (!pstate || !start)
  108. return NULL;
  109. if (!volt)
  110. return start;
  111. max_volt = volt->max_uv;
  112. if (volt->max0_id != 0xff)
  113. max_volt = min(max_volt,
  114. nvkm_volt_map(volt, volt->max0_id, clk->temp));
  115. if (volt->max1_id != 0xff)
  116. max_volt = min(max_volt,
  117. nvkm_volt_map(volt, volt->max1_id, clk->temp));
  118. if (volt->max2_id != 0xff)
  119. max_volt = min(max_volt,
  120. nvkm_volt_map(volt, volt->max2_id, clk->temp));
  121. for (cstate = start; &cstate->head != &pstate->list;
  122. cstate = list_entry(cstate->head.prev, typeof(*cstate), head)) {
  123. if (nvkm_cstate_valid(clk, cstate, max_volt, clk->temp))
  124. break;
  125. }
  126. return cstate;
  127. }
  128. static struct nvkm_cstate *
  129. nvkm_cstate_get(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
  130. {
  131. struct nvkm_cstate *cstate;
  132. if (cstatei == NVKM_CLK_CSTATE_HIGHEST)
  133. return list_last_entry(&pstate->list, typeof(*cstate), head);
  134. else {
  135. list_for_each_entry(cstate, &pstate->list, head) {
  136. if (cstate->id == cstatei)
  137. return cstate;
  138. }
  139. }
  140. return NULL;
  141. }
  142. static int
  143. nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
  144. {
  145. struct nvkm_subdev *subdev = &clk->subdev;
  146. struct nvkm_device *device = subdev->device;
  147. struct nvkm_therm *therm = device->therm;
  148. struct nvkm_volt *volt = device->volt;
  149. struct nvkm_cstate *cstate;
  150. int ret;
  151. if (!list_empty(&pstate->list)) {
  152. cstate = nvkm_cstate_get(clk, pstate, cstatei);
  153. cstate = nvkm_cstate_find_best(clk, pstate, cstate);
  154. } else {
  155. cstate = &pstate->base;
  156. }
  157. if (therm) {
  158. ret = nvkm_therm_cstate(therm, pstate->fanspeed, +1);
  159. if (ret && ret != -ENODEV) {
  160. nvkm_error(subdev, "failed to raise fan speed: %d\n", ret);
  161. return ret;
  162. }
  163. }
  164. if (volt) {
  165. ret = nvkm_volt_set_id(volt, cstate->voltage,
  166. pstate->base.voltage, clk->temp, +1);
  167. if (ret && ret != -ENODEV) {
  168. nvkm_error(subdev, "failed to raise voltage: %d\n", ret);
  169. return ret;
  170. }
  171. }
  172. ret = clk->func->calc(clk, cstate);
  173. if (ret == 0) {
  174. ret = clk->func->prog(clk);
  175. clk->func->tidy(clk);
  176. }
  177. if (volt) {
  178. ret = nvkm_volt_set_id(volt, cstate->voltage,
  179. pstate->base.voltage, clk->temp, -1);
  180. if (ret && ret != -ENODEV)
  181. nvkm_error(subdev, "failed to lower voltage: %d\n", ret);
  182. }
  183. if (therm) {
  184. ret = nvkm_therm_cstate(therm, pstate->fanspeed, -1);
  185. if (ret && ret != -ENODEV)
  186. nvkm_error(subdev, "failed to lower fan speed: %d\n", ret);
  187. }
  188. return ret;
  189. }
  190. static void
  191. nvkm_cstate_del(struct nvkm_cstate *cstate)
  192. {
  193. list_del(&cstate->head);
  194. kfree(cstate);
  195. }
  196. static int
  197. nvkm_cstate_new(struct nvkm_clk *clk, int idx, struct nvkm_pstate *pstate)
  198. {
  199. struct nvkm_bios *bios = clk->subdev.device->bios;
  200. struct nvkm_volt *volt = clk->subdev.device->volt;
  201. const struct nvkm_domain *domain = clk->domains;
  202. struct nvkm_cstate *cstate = NULL;
  203. struct nvbios_cstepX cstepX;
  204. u8 ver, hdr;
  205. u32 data;
  206. data = nvbios_cstepXp(bios, idx, &ver, &hdr, &cstepX);
  207. if (!data)
  208. return -ENOENT;
  209. if (volt && nvkm_volt_map_min(volt, cstepX.voltage) > volt->max_uv)
  210. return -EINVAL;
  211. cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
  212. if (!cstate)
  213. return -ENOMEM;
  214. *cstate = pstate->base;
  215. cstate->voltage = cstepX.voltage;
  216. cstate->id = idx;
  217. while (domain && domain->name != nv_clk_src_max) {
  218. if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) {
  219. u32 freq = nvkm_clk_adjust(clk, true, pstate->pstate,
  220. domain->bios, cstepX.freq);
  221. cstate->domain[domain->name] = freq;
  222. }
  223. domain++;
  224. }
  225. list_add(&cstate->head, &pstate->list);
  226. return 0;
  227. }
  228. /******************************************************************************
  229. * P-States
  230. *****************************************************************************/
  231. static int
  232. nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
  233. {
  234. struct nvkm_subdev *subdev = &clk->subdev;
  235. struct nvkm_fb *fb = subdev->device->fb;
  236. struct nvkm_pci *pci = subdev->device->pci;
  237. struct nvkm_pstate *pstate;
  238. int ret, idx = 0;
  239. list_for_each_entry(pstate, &clk->states, head) {
  240. if (idx++ == pstatei)
  241. break;
  242. }
  243. nvkm_debug(subdev, "setting performance state %d\n", pstatei);
  244. clk->pstate = pstatei;
  245. nvkm_pcie_set_link(pci, pstate->pcie_speed, pstate->pcie_width);
  246. if (fb && fb->ram && fb->ram->func->calc) {
  247. struct nvkm_ram *ram = fb->ram;
  248. int khz = pstate->base.domain[nv_clk_src_mem];
  249. do {
  250. ret = ram->func->calc(ram, khz);
  251. if (ret == 0)
  252. ret = ram->func->prog(ram);
  253. } while (ret > 0);
  254. ram->func->tidy(ram);
  255. }
  256. return nvkm_cstate_prog(clk, pstate, NVKM_CLK_CSTATE_HIGHEST);
  257. }
  258. static void
  259. nvkm_pstate_work(struct work_struct *work)
  260. {
  261. struct nvkm_clk *clk = container_of(work, typeof(*clk), work);
  262. struct nvkm_subdev *subdev = &clk->subdev;
  263. int pstate;
  264. if (!atomic_xchg(&clk->waiting, 0))
  265. return;
  266. clk->pwrsrc = power_supply_is_system_supplied();
  267. nvkm_trace(subdev, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d°C D %d\n",
  268. clk->pstate, clk->pwrsrc, clk->ustate_ac, clk->ustate_dc,
  269. clk->astate, clk->temp, clk->dstate);
  270. pstate = clk->pwrsrc ? clk->ustate_ac : clk->ustate_dc;
  271. if (clk->state_nr && pstate != -1) {
  272. pstate = (pstate < 0) ? clk->astate : pstate;
  273. pstate = min(pstate, clk->state_nr - 1);
  274. pstate = max(pstate, clk->dstate);
  275. } else {
  276. pstate = clk->pstate = -1;
  277. }
  278. nvkm_trace(subdev, "-> %d\n", pstate);
  279. if (pstate != clk->pstate) {
  280. int ret = nvkm_pstate_prog(clk, pstate);
  281. if (ret) {
  282. nvkm_error(subdev, "error setting pstate %d: %d\n",
  283. pstate, ret);
  284. }
  285. }
  286. wake_up_all(&clk->wait);
  287. nvkm_notify_get(&clk->pwrsrc_ntfy);
  288. }
  289. static int
  290. nvkm_pstate_calc(struct nvkm_clk *clk, bool wait)
  291. {
  292. atomic_set(&clk->waiting, 1);
  293. schedule_work(&clk->work);
  294. if (wait)
  295. wait_event(clk->wait, !atomic_read(&clk->waiting));
  296. return 0;
  297. }
  298. static void
  299. nvkm_pstate_info(struct nvkm_clk *clk, struct nvkm_pstate *pstate)
  300. {
  301. const struct nvkm_domain *clock = clk->domains - 1;
  302. struct nvkm_cstate *cstate;
  303. struct nvkm_subdev *subdev = &clk->subdev;
  304. char info[3][32] = { "", "", "" };
  305. char name[4] = "--";
  306. int i = -1;
  307. if (pstate->pstate != 0xff)
  308. snprintf(name, sizeof(name), "%02x", pstate->pstate);
  309. while ((++clock)->name != nv_clk_src_max) {
  310. u32 lo = pstate->base.domain[clock->name];
  311. u32 hi = lo;
  312. if (hi == 0)
  313. continue;
  314. nvkm_debug(subdev, "%02x: %10d KHz\n", clock->name, lo);
  315. list_for_each_entry(cstate, &pstate->list, head) {
  316. u32 freq = cstate->domain[clock->name];
  317. lo = min(lo, freq);
  318. hi = max(hi, freq);
  319. nvkm_debug(subdev, "%10d KHz\n", freq);
  320. }
  321. if (clock->mname && ++i < ARRAY_SIZE(info)) {
  322. lo /= clock->mdiv;
  323. hi /= clock->mdiv;
  324. if (lo == hi) {
  325. snprintf(info[i], sizeof(info[i]), "%s %d MHz",
  326. clock->mname, lo);
  327. } else {
  328. snprintf(info[i], sizeof(info[i]),
  329. "%s %d-%d MHz", clock->mname, lo, hi);
  330. }
  331. }
  332. }
  333. nvkm_debug(subdev, "%s: %s %s %s\n", name, info[0], info[1], info[2]);
  334. }
  335. static void
  336. nvkm_pstate_del(struct nvkm_pstate *pstate)
  337. {
  338. struct nvkm_cstate *cstate, *temp;
  339. list_for_each_entry_safe(cstate, temp, &pstate->list, head) {
  340. nvkm_cstate_del(cstate);
  341. }
  342. list_del(&pstate->head);
  343. kfree(pstate);
  344. }
  345. static int
  346. nvkm_pstate_new(struct nvkm_clk *clk, int idx)
  347. {
  348. struct nvkm_bios *bios = clk->subdev.device->bios;
  349. const struct nvkm_domain *domain = clk->domains - 1;
  350. struct nvkm_pstate *pstate;
  351. struct nvkm_cstate *cstate;
  352. struct nvbios_cstepE cstepE;
  353. struct nvbios_perfE perfE;
  354. u8 ver, hdr, cnt, len;
  355. u32 data;
  356. data = nvbios_perfEp(bios, idx, &ver, &hdr, &cnt, &len, &perfE);
  357. if (!data)
  358. return -EINVAL;
  359. if (perfE.pstate == 0xff)
  360. return 0;
  361. pstate = kzalloc(sizeof(*pstate), GFP_KERNEL);
  362. cstate = &pstate->base;
  363. if (!pstate)
  364. return -ENOMEM;
  365. INIT_LIST_HEAD(&pstate->list);
  366. pstate->pstate = perfE.pstate;
  367. pstate->fanspeed = perfE.fanspeed;
  368. pstate->pcie_speed = perfE.pcie_speed;
  369. pstate->pcie_width = perfE.pcie_width;
  370. cstate->voltage = perfE.voltage;
  371. cstate->domain[nv_clk_src_core] = perfE.core;
  372. cstate->domain[nv_clk_src_shader] = perfE.shader;
  373. cstate->domain[nv_clk_src_mem] = perfE.memory;
  374. cstate->domain[nv_clk_src_vdec] = perfE.vdec;
  375. cstate->domain[nv_clk_src_dom6] = perfE.disp;
  376. while (ver >= 0x40 && (++domain)->name != nv_clk_src_max) {
  377. struct nvbios_perfS perfS;
  378. u8 sver = ver, shdr = hdr;
  379. u32 perfSe = nvbios_perfSp(bios, data, domain->bios,
  380. &sver, &shdr, cnt, len, &perfS);
  381. if (perfSe == 0 || sver != 0x40)
  382. continue;
  383. if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) {
  384. perfS.v40.freq = nvkm_clk_adjust(clk, false,
  385. pstate->pstate,
  386. domain->bios,
  387. perfS.v40.freq);
  388. }
  389. cstate->domain[domain->name] = perfS.v40.freq;
  390. }
  391. data = nvbios_cstepEm(bios, pstate->pstate, &ver, &hdr, &cstepE);
  392. if (data) {
  393. int idx = cstepE.index;
  394. do {
  395. nvkm_cstate_new(clk, idx, pstate);
  396. } while(idx--);
  397. }
  398. nvkm_pstate_info(clk, pstate);
  399. list_add_tail(&pstate->head, &clk->states);
  400. clk->state_nr++;
  401. return 0;
  402. }
  403. /******************************************************************************
  404. * Adjustment triggers
  405. *****************************************************************************/
  406. static int
  407. nvkm_clk_ustate_update(struct nvkm_clk *clk, int req)
  408. {
  409. struct nvkm_pstate *pstate;
  410. int i = 0;
  411. if (!clk->allow_reclock)
  412. return -ENOSYS;
  413. if (req != -1 && req != -2) {
  414. list_for_each_entry(pstate, &clk->states, head) {
  415. if (pstate->pstate == req)
  416. break;
  417. i++;
  418. }
  419. if (pstate->pstate != req)
  420. return -EINVAL;
  421. req = i;
  422. }
  423. return req + 2;
  424. }
  425. static int
  426. nvkm_clk_nstate(struct nvkm_clk *clk, const char *mode, int arglen)
  427. {
  428. int ret = 1;
  429. if (clk->allow_reclock && !strncasecmpz(mode, "auto", arglen))
  430. return -2;
  431. if (strncasecmpz(mode, "disabled", arglen)) {
  432. char save = mode[arglen];
  433. long v;
  434. ((char *)mode)[arglen] = '\0';
  435. if (!kstrtol(mode, 0, &v)) {
  436. ret = nvkm_clk_ustate_update(clk, v);
  437. if (ret < 0)
  438. ret = 1;
  439. }
  440. ((char *)mode)[arglen] = save;
  441. }
  442. return ret - 2;
  443. }
  444. int
  445. nvkm_clk_ustate(struct nvkm_clk *clk, int req, int pwr)
  446. {
  447. int ret = nvkm_clk_ustate_update(clk, req);
  448. if (ret >= 0) {
  449. if (ret -= 2, pwr) clk->ustate_ac = ret;
  450. else clk->ustate_dc = ret;
  451. return nvkm_pstate_calc(clk, true);
  452. }
  453. return ret;
  454. }
  455. int
  456. nvkm_clk_astate(struct nvkm_clk *clk, int req, int rel, bool wait)
  457. {
  458. if (!rel) clk->astate = req;
  459. if ( rel) clk->astate += rel;
  460. clk->astate = min(clk->astate, clk->state_nr - 1);
  461. clk->astate = max(clk->astate, 0);
  462. return nvkm_pstate_calc(clk, wait);
  463. }
  464. int
  465. nvkm_clk_tstate(struct nvkm_clk *clk, u8 temp)
  466. {
  467. if (clk->temp == temp)
  468. return 0;
  469. clk->temp = temp;
  470. return nvkm_pstate_calc(clk, false);
  471. }
  472. int
  473. nvkm_clk_dstate(struct nvkm_clk *clk, int req, int rel)
  474. {
  475. if (!rel) clk->dstate = req;
  476. if ( rel) clk->dstate += rel;
  477. clk->dstate = min(clk->dstate, clk->state_nr - 1);
  478. clk->dstate = max(clk->dstate, 0);
  479. return nvkm_pstate_calc(clk, true);
  480. }
  481. static int
  482. nvkm_clk_pwrsrc(struct nvkm_notify *notify)
  483. {
  484. struct nvkm_clk *clk =
  485. container_of(notify, typeof(*clk), pwrsrc_ntfy);
  486. nvkm_pstate_calc(clk, false);
  487. return NVKM_NOTIFY_DROP;
  488. }
  489. /******************************************************************************
  490. * subdev base class implementation
  491. *****************************************************************************/
  492. int
  493. nvkm_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
  494. {
  495. return clk->func->read(clk, src);
  496. }
  497. static int
  498. nvkm_clk_fini(struct nvkm_subdev *subdev, bool suspend)
  499. {
  500. struct nvkm_clk *clk = nvkm_clk(subdev);
  501. nvkm_notify_put(&clk->pwrsrc_ntfy);
  502. flush_work(&clk->work);
  503. if (clk->func->fini)
  504. clk->func->fini(clk);
  505. return 0;
  506. }
  507. static int
  508. nvkm_clk_init(struct nvkm_subdev *subdev)
  509. {
  510. struct nvkm_clk *clk = nvkm_clk(subdev);
  511. const struct nvkm_domain *clock = clk->domains;
  512. int ret;
  513. memset(&clk->bstate, 0x00, sizeof(clk->bstate));
  514. INIT_LIST_HEAD(&clk->bstate.list);
  515. clk->bstate.pstate = 0xff;
  516. while (clock->name != nv_clk_src_max) {
  517. ret = nvkm_clk_read(clk, clock->name);
  518. if (ret < 0) {
  519. nvkm_error(subdev, "%02x freq unknown\n", clock->name);
  520. return ret;
  521. }
  522. clk->bstate.base.domain[clock->name] = ret;
  523. clock++;
  524. }
  525. nvkm_pstate_info(clk, &clk->bstate);
  526. if (clk->func->init)
  527. return clk->func->init(clk);
  528. clk->astate = clk->state_nr - 1;
  529. clk->dstate = 0;
  530. clk->pstate = -1;
  531. clk->temp = 90; /* reasonable default value */
  532. nvkm_pstate_calc(clk, true);
  533. return 0;
  534. }
  535. static void *
  536. nvkm_clk_dtor(struct nvkm_subdev *subdev)
  537. {
  538. struct nvkm_clk *clk = nvkm_clk(subdev);
  539. struct nvkm_pstate *pstate, *temp;
  540. nvkm_notify_fini(&clk->pwrsrc_ntfy);
  541. /* Early return if the pstates have been provided statically */
  542. if (clk->func->pstates)
  543. return clk;
  544. list_for_each_entry_safe(pstate, temp, &clk->states, head) {
  545. nvkm_pstate_del(pstate);
  546. }
  547. return clk;
  548. }
  549. static const struct nvkm_subdev_func
  550. nvkm_clk = {
  551. .dtor = nvkm_clk_dtor,
  552. .init = nvkm_clk_init,
  553. .fini = nvkm_clk_fini,
  554. };
  555. int
  556. nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device,
  557. int index, bool allow_reclock, struct nvkm_clk *clk)
  558. {
  559. struct nvkm_subdev *subdev = &clk->subdev;
  560. struct nvkm_bios *bios = device->bios;
  561. int ret, idx, arglen;
  562. const char *mode;
  563. struct nvbios_vpstate_header h;
  564. nvkm_subdev_ctor(&nvkm_clk, device, index, subdev);
  565. if (bios && !nvbios_vpstate_parse(bios, &h)) {
  566. struct nvbios_vpstate_entry base, boost;
  567. if (!nvbios_vpstate_entry(bios, &h, h.boost_id, &boost))
  568. clk->boost_khz = boost.clock_mhz * 1000;
  569. if (!nvbios_vpstate_entry(bios, &h, h.base_id, &base))
  570. clk->base_khz = base.clock_mhz * 1000;
  571. }
  572. clk->func = func;
  573. INIT_LIST_HEAD(&clk->states);
  574. clk->domains = func->domains;
  575. clk->ustate_ac = -1;
  576. clk->ustate_dc = -1;
  577. clk->allow_reclock = allow_reclock;
  578. INIT_WORK(&clk->work, nvkm_pstate_work);
  579. init_waitqueue_head(&clk->wait);
  580. atomic_set(&clk->waiting, 0);
  581. /* If no pstates are provided, try and fetch them from the BIOS */
  582. if (!func->pstates) {
  583. idx = 0;
  584. do {
  585. ret = nvkm_pstate_new(clk, idx++);
  586. } while (ret == 0);
  587. } else {
  588. for (idx = 0; idx < func->nr_pstates; idx++)
  589. list_add_tail(&func->pstates[idx].head, &clk->states);
  590. clk->state_nr = func->nr_pstates;
  591. }
  592. ret = nvkm_notify_init(NULL, &device->event, nvkm_clk_pwrsrc, true,
  593. NULL, 0, 0, &clk->pwrsrc_ntfy);
  594. if (ret)
  595. return ret;
  596. mode = nvkm_stropt(device->cfgopt, "NvClkMode", &arglen);
  597. if (mode) {
  598. clk->ustate_ac = nvkm_clk_nstate(clk, mode, arglen);
  599. clk->ustate_dc = nvkm_clk_nstate(clk, mode, arglen);
  600. }
  601. mode = nvkm_stropt(device->cfgopt, "NvClkModeAC", &arglen);
  602. if (mode)
  603. clk->ustate_ac = nvkm_clk_nstate(clk, mode, arglen);
  604. mode = nvkm_stropt(device->cfgopt, "NvClkModeDC", &arglen);
  605. if (mode)
  606. clk->ustate_dc = nvkm_clk_nstate(clk, mode, arglen);
  607. clk->boost_mode = nvkm_longopt(device->cfgopt, "NvBoost",
  608. NVKM_CLK_BOOST_NONE);
  609. return 0;
  610. }
  611. int
  612. nvkm_clk_new_(const struct nvkm_clk_func *func, struct nvkm_device *device,
  613. int index, bool allow_reclock, struct nvkm_clk **pclk)
  614. {
  615. if (!(*pclk = kzalloc(sizeof(**pclk), GFP_KERNEL)))
  616. return -ENOMEM;
  617. return nvkm_clk_ctor(func, device, index, allow_reclock, *pclk);
  618. }