base.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867
  1. /*
  2. * Copyright 2013 Red Hat Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Ben Skeggs
  23. */
  24. #include "priv.h"
  25. #include <core/client.h>
  26. #include <core/option.h>
  27. #include <nvif/class.h>
  28. #include <nvif/if0002.h>
  29. #include <nvif/if0003.h>
  30. #include <nvif/ioctl.h>
  31. #include <nvif/unpack.h>
  32. static u8
  33. nvkm_pm_count_perfdom(struct nvkm_pm *pm)
  34. {
  35. struct nvkm_perfdom *dom;
  36. u8 domain_nr = 0;
  37. list_for_each_entry(dom, &pm->domains, head)
  38. domain_nr++;
  39. return domain_nr;
  40. }
  41. static u16
  42. nvkm_perfdom_count_perfsig(struct nvkm_perfdom *dom)
  43. {
  44. u16 signal_nr = 0;
  45. int i;
  46. if (dom) {
  47. for (i = 0; i < dom->signal_nr; i++) {
  48. if (dom->signal[i].name)
  49. signal_nr++;
  50. }
  51. }
  52. return signal_nr;
  53. }
  54. static struct nvkm_perfdom *
  55. nvkm_perfdom_find(struct nvkm_pm *pm, int di)
  56. {
  57. struct nvkm_perfdom *dom;
  58. int tmp = 0;
  59. list_for_each_entry(dom, &pm->domains, head) {
  60. if (tmp++ == di)
  61. return dom;
  62. }
  63. return NULL;
  64. }
  65. static struct nvkm_perfsig *
  66. nvkm_perfsig_find(struct nvkm_pm *pm, u8 di, u8 si, struct nvkm_perfdom **pdom)
  67. {
  68. struct nvkm_perfdom *dom = *pdom;
  69. if (dom == NULL) {
  70. dom = nvkm_perfdom_find(pm, di);
  71. if (dom == NULL)
  72. return NULL;
  73. *pdom = dom;
  74. }
  75. if (!dom->signal[si].name)
  76. return NULL;
  77. return &dom->signal[si];
  78. }
  79. static u8
  80. nvkm_perfsig_count_perfsrc(struct nvkm_perfsig *sig)
  81. {
  82. u8 source_nr = 0, i;
  83. for (i = 0; i < ARRAY_SIZE(sig->source); i++) {
  84. if (sig->source[i])
  85. source_nr++;
  86. }
  87. return source_nr;
  88. }
  89. static struct nvkm_perfsrc *
  90. nvkm_perfsrc_find(struct nvkm_pm *pm, struct nvkm_perfsig *sig, int si)
  91. {
  92. struct nvkm_perfsrc *src;
  93. bool found = false;
  94. int tmp = 1; /* Sources ID start from 1 */
  95. u8 i;
  96. for (i = 0; i < ARRAY_SIZE(sig->source) && sig->source[i]; i++) {
  97. if (sig->source[i] == si) {
  98. found = true;
  99. break;
  100. }
  101. }
  102. if (found) {
  103. list_for_each_entry(src, &pm->sources, head) {
  104. if (tmp++ == si)
  105. return src;
  106. }
  107. }
  108. return NULL;
  109. }
  110. static int
  111. nvkm_perfsrc_enable(struct nvkm_pm *pm, struct nvkm_perfctr *ctr)
  112. {
  113. struct nvkm_subdev *subdev = &pm->engine.subdev;
  114. struct nvkm_device *device = subdev->device;
  115. struct nvkm_perfdom *dom = NULL;
  116. struct nvkm_perfsig *sig;
  117. struct nvkm_perfsrc *src;
  118. u32 mask, value;
  119. int i, j;
  120. for (i = 0; i < 4; i++) {
  121. for (j = 0; j < 8 && ctr->source[i][j]; j++) {
  122. sig = nvkm_perfsig_find(pm, ctr->domain,
  123. ctr->signal[i], &dom);
  124. if (!sig)
  125. return -EINVAL;
  126. src = nvkm_perfsrc_find(pm, sig, ctr->source[i][j]);
  127. if (!src)
  128. return -EINVAL;
  129. /* set enable bit if needed */
  130. mask = value = 0x00000000;
  131. if (src->enable)
  132. mask = value = 0x80000000;
  133. mask |= (src->mask << src->shift);
  134. value |= ((ctr->source[i][j] >> 32) << src->shift);
  135. /* enable the source */
  136. nvkm_mask(device, src->addr, mask, value);
  137. nvkm_debug(subdev,
  138. "enabled source %08x %08x %08x\n",
  139. src->addr, mask, value);
  140. }
  141. }
  142. return 0;
  143. }
  144. static int
  145. nvkm_perfsrc_disable(struct nvkm_pm *pm, struct nvkm_perfctr *ctr)
  146. {
  147. struct nvkm_subdev *subdev = &pm->engine.subdev;
  148. struct nvkm_device *device = subdev->device;
  149. struct nvkm_perfdom *dom = NULL;
  150. struct nvkm_perfsig *sig;
  151. struct nvkm_perfsrc *src;
  152. u32 mask;
  153. int i, j;
  154. for (i = 0; i < 4; i++) {
  155. for (j = 0; j < 8 && ctr->source[i][j]; j++) {
  156. sig = nvkm_perfsig_find(pm, ctr->domain,
  157. ctr->signal[i], &dom);
  158. if (!sig)
  159. return -EINVAL;
  160. src = nvkm_perfsrc_find(pm, sig, ctr->source[i][j]);
  161. if (!src)
  162. return -EINVAL;
  163. /* unset enable bit if needed */
  164. mask = 0x00000000;
  165. if (src->enable)
  166. mask = 0x80000000;
  167. mask |= (src->mask << src->shift);
  168. /* disable the source */
  169. nvkm_mask(device, src->addr, mask, 0);
  170. nvkm_debug(subdev, "disabled source %08x %08x\n",
  171. src->addr, mask);
  172. }
  173. }
  174. return 0;
  175. }
  176. /*******************************************************************************
  177. * Perfdom object classes
  178. ******************************************************************************/
  179. static int
  180. nvkm_perfdom_init(struct nvkm_perfdom *dom, void *data, u32 size)
  181. {
  182. union {
  183. struct nvif_perfdom_init none;
  184. } *args = data;
  185. struct nvkm_object *object = &dom->object;
  186. struct nvkm_pm *pm = dom->perfmon->pm;
  187. int ret = -ENOSYS, i;
  188. nvif_ioctl(object, "perfdom init size %d\n", size);
  189. if (!(ret = nvif_unvers(ret, &data, &size, args->none))) {
  190. nvif_ioctl(object, "perfdom init\n");
  191. } else
  192. return ret;
  193. for (i = 0; i < 4; i++) {
  194. if (dom->ctr[i]) {
  195. dom->func->init(pm, dom, dom->ctr[i]);
  196. /* enable sources */
  197. nvkm_perfsrc_enable(pm, dom->ctr[i]);
  198. }
  199. }
  200. /* start next batch of counters for sampling */
  201. dom->func->next(pm, dom);
  202. return 0;
  203. }
  204. static int
  205. nvkm_perfdom_sample(struct nvkm_perfdom *dom, void *data, u32 size)
  206. {
  207. union {
  208. struct nvif_perfdom_sample none;
  209. } *args = data;
  210. struct nvkm_object *object = &dom->object;
  211. struct nvkm_pm *pm = dom->perfmon->pm;
  212. int ret = -ENOSYS;
  213. nvif_ioctl(object, "perfdom sample size %d\n", size);
  214. if (!(ret = nvif_unvers(ret, &data, &size, args->none))) {
  215. nvif_ioctl(object, "perfdom sample\n");
  216. } else
  217. return ret;
  218. pm->sequence++;
  219. /* sample previous batch of counters */
  220. list_for_each_entry(dom, &pm->domains, head)
  221. dom->func->next(pm, dom);
  222. return 0;
  223. }
  224. static int
  225. nvkm_perfdom_read(struct nvkm_perfdom *dom, void *data, u32 size)
  226. {
  227. union {
  228. struct nvif_perfdom_read_v0 v0;
  229. } *args = data;
  230. struct nvkm_object *object = &dom->object;
  231. struct nvkm_pm *pm = dom->perfmon->pm;
  232. int ret = -ENOSYS, i;
  233. nvif_ioctl(object, "perfdom read size %d\n", size);
  234. if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
  235. nvif_ioctl(object, "perfdom read vers %d\n", args->v0.version);
  236. } else
  237. return ret;
  238. for (i = 0; i < 4; i++) {
  239. if (dom->ctr[i])
  240. dom->func->read(pm, dom, dom->ctr[i]);
  241. }
  242. if (!dom->clk)
  243. return -EAGAIN;
  244. for (i = 0; i < 4; i++)
  245. if (dom->ctr[i])
  246. args->v0.ctr[i] = dom->ctr[i]->ctr;
  247. args->v0.clk = dom->clk;
  248. return 0;
  249. }
  250. static int
  251. nvkm_perfdom_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
  252. {
  253. struct nvkm_perfdom *dom = nvkm_perfdom(object);
  254. switch (mthd) {
  255. case NVIF_PERFDOM_V0_INIT:
  256. return nvkm_perfdom_init(dom, data, size);
  257. case NVIF_PERFDOM_V0_SAMPLE:
  258. return nvkm_perfdom_sample(dom, data, size);
  259. case NVIF_PERFDOM_V0_READ:
  260. return nvkm_perfdom_read(dom, data, size);
  261. default:
  262. break;
  263. }
  264. return -EINVAL;
  265. }
  266. static void *
  267. nvkm_perfdom_dtor(struct nvkm_object *object)
  268. {
  269. struct nvkm_perfdom *dom = nvkm_perfdom(object);
  270. struct nvkm_pm *pm = dom->perfmon->pm;
  271. int i;
  272. for (i = 0; i < 4; i++) {
  273. struct nvkm_perfctr *ctr = dom->ctr[i];
  274. if (ctr) {
  275. nvkm_perfsrc_disable(pm, ctr);
  276. if (ctr->head.next)
  277. list_del(&ctr->head);
  278. }
  279. kfree(ctr);
  280. }
  281. return dom;
  282. }
  283. static int
  284. nvkm_perfctr_new(struct nvkm_perfdom *dom, int slot, u8 domain,
  285. struct nvkm_perfsig *signal[4], u64 source[4][8],
  286. u16 logic_op, struct nvkm_perfctr **pctr)
  287. {
  288. struct nvkm_perfctr *ctr;
  289. int i, j;
  290. if (!dom)
  291. return -EINVAL;
  292. ctr = *pctr = kzalloc(sizeof(*ctr), GFP_KERNEL);
  293. if (!ctr)
  294. return -ENOMEM;
  295. ctr->domain = domain;
  296. ctr->logic_op = logic_op;
  297. ctr->slot = slot;
  298. for (i = 0; i < 4; i++) {
  299. if (signal[i]) {
  300. ctr->signal[i] = signal[i] - dom->signal;
  301. for (j = 0; j < 8; j++)
  302. ctr->source[i][j] = source[i][j];
  303. }
  304. }
  305. list_add_tail(&ctr->head, &dom->list);
  306. return 0;
  307. }
  308. static const struct nvkm_object_func
  309. nvkm_perfdom = {
  310. .dtor = nvkm_perfdom_dtor,
  311. .mthd = nvkm_perfdom_mthd,
  312. };
  313. static int
  314. nvkm_perfdom_new_(struct nvkm_perfmon *perfmon,
  315. const struct nvkm_oclass *oclass, void *data, u32 size,
  316. struct nvkm_object **pobject)
  317. {
  318. union {
  319. struct nvif_perfdom_v0 v0;
  320. } *args = data;
  321. struct nvkm_pm *pm = perfmon->pm;
  322. struct nvkm_object *parent = oclass->parent;
  323. struct nvkm_perfdom *sdom = NULL;
  324. struct nvkm_perfctr *ctr[4] = {};
  325. struct nvkm_perfdom *dom;
  326. int c, s, m;
  327. int ret = -ENOSYS;
  328. nvif_ioctl(parent, "create perfdom size %d\n", size);
  329. if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
  330. nvif_ioctl(parent, "create perfdom vers %d dom %d mode %02x\n",
  331. args->v0.version, args->v0.domain, args->v0.mode);
  332. } else
  333. return ret;
  334. for (c = 0; c < ARRAY_SIZE(args->v0.ctr); c++) {
  335. struct nvkm_perfsig *sig[4] = {};
  336. u64 src[4][8] = {};
  337. for (s = 0; s < ARRAY_SIZE(args->v0.ctr[c].signal); s++) {
  338. sig[s] = nvkm_perfsig_find(pm, args->v0.domain,
  339. args->v0.ctr[c].signal[s],
  340. &sdom);
  341. if (args->v0.ctr[c].signal[s] && !sig[s])
  342. return -EINVAL;
  343. for (m = 0; m < 8; m++) {
  344. src[s][m] = args->v0.ctr[c].source[s][m];
  345. if (src[s][m] && !nvkm_perfsrc_find(pm, sig[s],
  346. src[s][m]))
  347. return -EINVAL;
  348. }
  349. }
  350. ret = nvkm_perfctr_new(sdom, c, args->v0.domain, sig, src,
  351. args->v0.ctr[c].logic_op, &ctr[c]);
  352. if (ret)
  353. return ret;
  354. }
  355. if (!sdom)
  356. return -EINVAL;
  357. if (!(dom = kzalloc(sizeof(*dom), GFP_KERNEL)))
  358. return -ENOMEM;
  359. nvkm_object_ctor(&nvkm_perfdom, oclass, &dom->object);
  360. dom->perfmon = perfmon;
  361. *pobject = &dom->object;
  362. dom->func = sdom->func;
  363. dom->addr = sdom->addr;
  364. dom->mode = args->v0.mode;
  365. for (c = 0; c < ARRAY_SIZE(ctr); c++)
  366. dom->ctr[c] = ctr[c];
  367. return 0;
  368. }
  369. /*******************************************************************************
  370. * Perfmon object classes
  371. ******************************************************************************/
  372. static int
  373. nvkm_perfmon_mthd_query_domain(struct nvkm_perfmon *perfmon,
  374. void *data, u32 size)
  375. {
  376. union {
  377. struct nvif_perfmon_query_domain_v0 v0;
  378. } *args = data;
  379. struct nvkm_object *object = &perfmon->object;
  380. struct nvkm_pm *pm = perfmon->pm;
  381. struct nvkm_perfdom *dom;
  382. u8 domain_nr;
  383. int di, ret = -ENOSYS;
  384. nvif_ioctl(object, "perfmon query domain size %d\n", size);
  385. if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
  386. nvif_ioctl(object, "perfmon domain vers %d iter %02x\n",
  387. args->v0.version, args->v0.iter);
  388. di = (args->v0.iter & 0xff) - 1;
  389. } else
  390. return ret;
  391. domain_nr = nvkm_pm_count_perfdom(pm);
  392. if (di >= (int)domain_nr)
  393. return -EINVAL;
  394. if (di >= 0) {
  395. dom = nvkm_perfdom_find(pm, di);
  396. if (dom == NULL)
  397. return -EINVAL;
  398. args->v0.id = di;
  399. args->v0.signal_nr = nvkm_perfdom_count_perfsig(dom);
  400. strncpy(args->v0.name, dom->name, sizeof(args->v0.name) - 1);
  401. /* Currently only global counters (PCOUNTER) are implemented
  402. * but this will be different for local counters (MP). */
  403. args->v0.counter_nr = 4;
  404. }
  405. if (++di < domain_nr) {
  406. args->v0.iter = ++di;
  407. return 0;
  408. }
  409. args->v0.iter = 0xff;
  410. return 0;
  411. }
  412. static int
  413. nvkm_perfmon_mthd_query_signal(struct nvkm_perfmon *perfmon,
  414. void *data, u32 size)
  415. {
  416. union {
  417. struct nvif_perfmon_query_signal_v0 v0;
  418. } *args = data;
  419. struct nvkm_object *object = &perfmon->object;
  420. struct nvkm_pm *pm = perfmon->pm;
  421. struct nvkm_device *device = pm->engine.subdev.device;
  422. struct nvkm_perfdom *dom;
  423. struct nvkm_perfsig *sig;
  424. const bool all = nvkm_boolopt(device->cfgopt, "NvPmShowAll", false);
  425. const bool raw = nvkm_boolopt(device->cfgopt, "NvPmUnnamed", all);
  426. int ret = -ENOSYS, si;
  427. nvif_ioctl(object, "perfmon query signal size %d\n", size);
  428. if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
  429. nvif_ioctl(object,
  430. "perfmon query signal vers %d dom %d iter %04x\n",
  431. args->v0.version, args->v0.domain, args->v0.iter);
  432. si = (args->v0.iter & 0xffff) - 1;
  433. } else
  434. return ret;
  435. dom = nvkm_perfdom_find(pm, args->v0.domain);
  436. if (dom == NULL || si >= (int)dom->signal_nr)
  437. return -EINVAL;
  438. if (si >= 0) {
  439. sig = &dom->signal[si];
  440. if (raw || !sig->name) {
  441. snprintf(args->v0.name, sizeof(args->v0.name),
  442. "/%s/%02x", dom->name, si);
  443. } else {
  444. strncpy(args->v0.name, sig->name,
  445. sizeof(args->v0.name) - 1);
  446. }
  447. args->v0.signal = si;
  448. args->v0.source_nr = nvkm_perfsig_count_perfsrc(sig);
  449. }
  450. while (++si < dom->signal_nr) {
  451. if (all || dom->signal[si].name) {
  452. args->v0.iter = ++si;
  453. return 0;
  454. }
  455. }
  456. args->v0.iter = 0xffff;
  457. return 0;
  458. }
  459. static int
  460. nvkm_perfmon_mthd_query_source(struct nvkm_perfmon *perfmon,
  461. void *data, u32 size)
  462. {
  463. union {
  464. struct nvif_perfmon_query_source_v0 v0;
  465. } *args = data;
  466. struct nvkm_object *object = &perfmon->object;
  467. struct nvkm_pm *pm = perfmon->pm;
  468. struct nvkm_perfdom *dom = NULL;
  469. struct nvkm_perfsig *sig;
  470. struct nvkm_perfsrc *src;
  471. u8 source_nr = 0;
  472. int si, ret = -ENOSYS;
  473. nvif_ioctl(object, "perfmon query source size %d\n", size);
  474. if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
  475. nvif_ioctl(object,
  476. "perfmon source vers %d dom %d sig %02x iter %02x\n",
  477. args->v0.version, args->v0.domain, args->v0.signal,
  478. args->v0.iter);
  479. si = (args->v0.iter & 0xff) - 1;
  480. } else
  481. return ret;
  482. sig = nvkm_perfsig_find(pm, args->v0.domain, args->v0.signal, &dom);
  483. if (!sig)
  484. return -EINVAL;
  485. source_nr = nvkm_perfsig_count_perfsrc(sig);
  486. if (si >= (int)source_nr)
  487. return -EINVAL;
  488. if (si >= 0) {
  489. src = nvkm_perfsrc_find(pm, sig, sig->source[si]);
  490. if (!src)
  491. return -EINVAL;
  492. args->v0.source = sig->source[si];
  493. args->v0.mask = src->mask;
  494. strncpy(args->v0.name, src->name, sizeof(args->v0.name) - 1);
  495. }
  496. if (++si < source_nr) {
  497. args->v0.iter = ++si;
  498. return 0;
  499. }
  500. args->v0.iter = 0xff;
  501. return 0;
  502. }
  503. static int
  504. nvkm_perfmon_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
  505. {
  506. struct nvkm_perfmon *perfmon = nvkm_perfmon(object);
  507. switch (mthd) {
  508. case NVIF_PERFMON_V0_QUERY_DOMAIN:
  509. return nvkm_perfmon_mthd_query_domain(perfmon, data, size);
  510. case NVIF_PERFMON_V0_QUERY_SIGNAL:
  511. return nvkm_perfmon_mthd_query_signal(perfmon, data, size);
  512. case NVIF_PERFMON_V0_QUERY_SOURCE:
  513. return nvkm_perfmon_mthd_query_source(perfmon, data, size);
  514. default:
  515. break;
  516. }
  517. return -EINVAL;
  518. }
  519. static int
  520. nvkm_perfmon_child_new(const struct nvkm_oclass *oclass, void *data, u32 size,
  521. struct nvkm_object **pobject)
  522. {
  523. struct nvkm_perfmon *perfmon = nvkm_perfmon(oclass->parent);
  524. return nvkm_perfdom_new_(perfmon, oclass, data, size, pobject);
  525. }
  526. static int
  527. nvkm_perfmon_child_get(struct nvkm_object *object, int index,
  528. struct nvkm_oclass *oclass)
  529. {
  530. if (index == 0) {
  531. oclass->base.oclass = NVIF_CLASS_PERFDOM;
  532. oclass->base.minver = 0;
  533. oclass->base.maxver = 0;
  534. oclass->ctor = nvkm_perfmon_child_new;
  535. return 0;
  536. }
  537. return -EINVAL;
  538. }
  539. static void *
  540. nvkm_perfmon_dtor(struct nvkm_object *object)
  541. {
  542. struct nvkm_perfmon *perfmon = nvkm_perfmon(object);
  543. struct nvkm_pm *pm = perfmon->pm;
  544. mutex_lock(&pm->engine.subdev.mutex);
  545. if (pm->perfmon == &perfmon->object)
  546. pm->perfmon = NULL;
  547. mutex_unlock(&pm->engine.subdev.mutex);
  548. return perfmon;
  549. }
  550. static const struct nvkm_object_func
  551. nvkm_perfmon = {
  552. .dtor = nvkm_perfmon_dtor,
  553. .mthd = nvkm_perfmon_mthd,
  554. .sclass = nvkm_perfmon_child_get,
  555. };
  556. static int
  557. nvkm_perfmon_new(struct nvkm_pm *pm, const struct nvkm_oclass *oclass,
  558. void *data, u32 size, struct nvkm_object **pobject)
  559. {
  560. struct nvkm_perfmon *perfmon;
  561. if (!(perfmon = kzalloc(sizeof(*perfmon), GFP_KERNEL)))
  562. return -ENOMEM;
  563. nvkm_object_ctor(&nvkm_perfmon, oclass, &perfmon->object);
  564. perfmon->pm = pm;
  565. *pobject = &perfmon->object;
  566. return 0;
  567. }
  568. /*******************************************************************************
  569. * PPM engine/subdev functions
  570. ******************************************************************************/
  571. static int
  572. nvkm_pm_oclass_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
  573. void *data, u32 size, struct nvkm_object **pobject)
  574. {
  575. struct nvkm_pm *pm = nvkm_pm(oclass->engine);
  576. int ret;
  577. ret = nvkm_perfmon_new(pm, oclass, data, size, pobject);
  578. if (ret)
  579. return ret;
  580. mutex_lock(&pm->engine.subdev.mutex);
  581. if (pm->perfmon == NULL)
  582. pm->perfmon = *pobject;
  583. ret = (pm->perfmon == *pobject) ? 0 : -EBUSY;
  584. mutex_unlock(&pm->engine.subdev.mutex);
  585. return ret;
  586. }
  587. static const struct nvkm_device_oclass
  588. nvkm_pm_oclass = {
  589. .base.oclass = NVIF_CLASS_PERFMON,
  590. .base.minver = -1,
  591. .base.maxver = -1,
  592. .ctor = nvkm_pm_oclass_new,
  593. };
  594. static int
  595. nvkm_pm_oclass_get(struct nvkm_oclass *oclass, int index,
  596. const struct nvkm_device_oclass **class)
  597. {
  598. if (index == 0) {
  599. oclass->base = nvkm_pm_oclass.base;
  600. *class = &nvkm_pm_oclass;
  601. return index;
  602. }
  603. return 1;
  604. }
  605. static int
  606. nvkm_perfsrc_new(struct nvkm_pm *pm, struct nvkm_perfsig *sig,
  607. const struct nvkm_specsrc *spec)
  608. {
  609. const struct nvkm_specsrc *ssrc;
  610. const struct nvkm_specmux *smux;
  611. struct nvkm_perfsrc *src;
  612. u8 source_nr = 0;
  613. if (!spec) {
  614. /* No sources are defined for this signal. */
  615. return 0;
  616. }
  617. ssrc = spec;
  618. while (ssrc->name) {
  619. smux = ssrc->mux;
  620. while (smux->name) {
  621. bool found = false;
  622. u8 source_id = 0;
  623. u32 len;
  624. list_for_each_entry(src, &pm->sources, head) {
  625. if (src->addr == ssrc->addr &&
  626. src->shift == smux->shift) {
  627. found = true;
  628. break;
  629. }
  630. source_id++;
  631. }
  632. if (!found) {
  633. src = kzalloc(sizeof(*src), GFP_KERNEL);
  634. if (!src)
  635. return -ENOMEM;
  636. src->addr = ssrc->addr;
  637. src->mask = smux->mask;
  638. src->shift = smux->shift;
  639. src->enable = smux->enable;
  640. len = strlen(ssrc->name) +
  641. strlen(smux->name) + 2;
  642. src->name = kzalloc(len, GFP_KERNEL);
  643. if (!src->name) {
  644. kfree(src);
  645. return -ENOMEM;
  646. }
  647. snprintf(src->name, len, "%s_%s", ssrc->name,
  648. smux->name);
  649. list_add_tail(&src->head, &pm->sources);
  650. }
  651. sig->source[source_nr++] = source_id + 1;
  652. smux++;
  653. }
  654. ssrc++;
  655. }
  656. return 0;
  657. }
  658. int
  659. nvkm_perfdom_new(struct nvkm_pm *pm, const char *name, u32 mask,
  660. u32 base, u32 size_unit, u32 size_domain,
  661. const struct nvkm_specdom *spec)
  662. {
  663. const struct nvkm_specdom *sdom;
  664. const struct nvkm_specsig *ssig;
  665. struct nvkm_perfdom *dom;
  666. int ret, i;
  667. for (i = 0; i == 0 || mask; i++) {
  668. u32 addr = base + (i * size_unit);
  669. if (i && !(mask & (1 << i)))
  670. continue;
  671. sdom = spec;
  672. while (sdom->signal_nr) {
  673. dom = kzalloc(struct_size(dom, signal, sdom->signal_nr),
  674. GFP_KERNEL);
  675. if (!dom)
  676. return -ENOMEM;
  677. if (mask) {
  678. snprintf(dom->name, sizeof(dom->name),
  679. "%s/%02x/%02x", name, i,
  680. (int)(sdom - spec));
  681. } else {
  682. snprintf(dom->name, sizeof(dom->name),
  683. "%s/%02x", name, (int)(sdom - spec));
  684. }
  685. list_add_tail(&dom->head, &pm->domains);
  686. INIT_LIST_HEAD(&dom->list);
  687. dom->func = sdom->func;
  688. dom->addr = addr;
  689. dom->signal_nr = sdom->signal_nr;
  690. ssig = (sdom++)->signal;
  691. while (ssig->name) {
  692. struct nvkm_perfsig *sig =
  693. &dom->signal[ssig->signal];
  694. sig->name = ssig->name;
  695. ret = nvkm_perfsrc_new(pm, sig, ssig->source);
  696. if (ret)
  697. return ret;
  698. ssig++;
  699. }
  700. addr += size_domain;
  701. }
  702. mask &= ~(1 << i);
  703. }
  704. return 0;
  705. }
  706. static int
  707. nvkm_pm_fini(struct nvkm_engine *engine, bool suspend)
  708. {
  709. struct nvkm_pm *pm = nvkm_pm(engine);
  710. if (pm->func->fini)
  711. pm->func->fini(pm);
  712. return 0;
  713. }
  714. static void *
  715. nvkm_pm_dtor(struct nvkm_engine *engine)
  716. {
  717. struct nvkm_pm *pm = nvkm_pm(engine);
  718. struct nvkm_perfdom *dom, *next_dom;
  719. struct nvkm_perfsrc *src, *next_src;
  720. list_for_each_entry_safe(dom, next_dom, &pm->domains, head) {
  721. list_del(&dom->head);
  722. kfree(dom);
  723. }
  724. list_for_each_entry_safe(src, next_src, &pm->sources, head) {
  725. list_del(&src->head);
  726. kfree(src->name);
  727. kfree(src);
  728. }
  729. return pm;
  730. }
  731. static const struct nvkm_engine_func
  732. nvkm_pm = {
  733. .dtor = nvkm_pm_dtor,
  734. .fini = nvkm_pm_fini,
  735. .base.sclass = nvkm_pm_oclass_get,
  736. };
  737. int
  738. nvkm_pm_ctor(const struct nvkm_pm_func *func, struct nvkm_device *device,
  739. int index, struct nvkm_pm *pm)
  740. {
  741. pm->func = func;
  742. INIT_LIST_HEAD(&pm->domains);
  743. INIT_LIST_HEAD(&pm->sources);
  744. return nvkm_engine_ctor(&nvkm_pm, device, index, true, &pm->engine);
  745. }