base.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865
  1. /*
  2. * Copyright 2013 Red Hat Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Ben Skeggs
  23. */
  24. #include "priv.h"
  25. #include <core/client.h>
  26. #include <core/option.h>
  27. #include <nvif/class.h>
  28. #include <nvif/ioctl.h>
  29. #include <nvif/unpack.h>
  30. static u8
  31. nvkm_pm_count_perfdom(struct nvkm_pm *pm)
  32. {
  33. struct nvkm_perfdom *dom;
  34. u8 domain_nr = 0;
  35. list_for_each_entry(dom, &pm->domains, head)
  36. domain_nr++;
  37. return domain_nr;
  38. }
  39. static u16
  40. nvkm_perfdom_count_perfsig(struct nvkm_perfdom *dom)
  41. {
  42. u16 signal_nr = 0;
  43. int i;
  44. if (dom) {
  45. for (i = 0; i < dom->signal_nr; i++) {
  46. if (dom->signal[i].name)
  47. signal_nr++;
  48. }
  49. }
  50. return signal_nr;
  51. }
  52. static struct nvkm_perfdom *
  53. nvkm_perfdom_find(struct nvkm_pm *pm, int di)
  54. {
  55. struct nvkm_perfdom *dom;
  56. int tmp = 0;
  57. list_for_each_entry(dom, &pm->domains, head) {
  58. if (tmp++ == di)
  59. return dom;
  60. }
  61. return NULL;
  62. }
  63. struct nvkm_perfsig *
  64. nvkm_perfsig_find(struct nvkm_pm *pm, u8 di, u8 si, struct nvkm_perfdom **pdom)
  65. {
  66. struct nvkm_perfdom *dom = *pdom;
  67. if (dom == NULL) {
  68. dom = nvkm_perfdom_find(pm, di);
  69. if (dom == NULL)
  70. return NULL;
  71. *pdom = dom;
  72. }
  73. if (!dom->signal[si].name)
  74. return NULL;
  75. return &dom->signal[si];
  76. }
  77. static u8
  78. nvkm_perfsig_count_perfsrc(struct nvkm_perfsig *sig)
  79. {
  80. u8 source_nr = 0, i;
  81. for (i = 0; i < ARRAY_SIZE(sig->source); i++) {
  82. if (sig->source[i])
  83. source_nr++;
  84. }
  85. return source_nr;
  86. }
  87. static struct nvkm_perfsrc *
  88. nvkm_perfsrc_find(struct nvkm_pm *pm, struct nvkm_perfsig *sig, int si)
  89. {
  90. struct nvkm_perfsrc *src;
  91. bool found = false;
  92. int tmp = 1; /* Sources ID start from 1 */
  93. u8 i;
  94. for (i = 0; i < ARRAY_SIZE(sig->source) && sig->source[i]; i++) {
  95. if (sig->source[i] == si) {
  96. found = true;
  97. break;
  98. }
  99. }
  100. if (found) {
  101. list_for_each_entry(src, &pm->sources, head) {
  102. if (tmp++ == si)
  103. return src;
  104. }
  105. }
  106. return NULL;
  107. }
  108. static int
  109. nvkm_perfsrc_enable(struct nvkm_pm *pm, struct nvkm_perfctr *ctr)
  110. {
  111. struct nvkm_subdev *subdev = &pm->engine.subdev;
  112. struct nvkm_device *device = subdev->device;
  113. struct nvkm_perfdom *dom = NULL;
  114. struct nvkm_perfsig *sig;
  115. struct nvkm_perfsrc *src;
  116. u32 mask, value;
  117. int i, j;
  118. for (i = 0; i < 4; i++) {
  119. for (j = 0; j < 8 && ctr->source[i][j]; j++) {
  120. sig = nvkm_perfsig_find(pm, ctr->domain,
  121. ctr->signal[i], &dom);
  122. if (!sig)
  123. return -EINVAL;
  124. src = nvkm_perfsrc_find(pm, sig, ctr->source[i][j]);
  125. if (!src)
  126. return -EINVAL;
  127. /* set enable bit if needed */
  128. mask = value = 0x00000000;
  129. if (src->enable)
  130. mask = value = 0x80000000;
  131. mask |= (src->mask << src->shift);
  132. value |= ((ctr->source[i][j] >> 32) << src->shift);
  133. /* enable the source */
  134. nvkm_mask(device, src->addr, mask, value);
  135. nvkm_debug(subdev,
  136. "enabled source %08x %08x %08x\n",
  137. src->addr, mask, value);
  138. }
  139. }
  140. return 0;
  141. }
  142. static int
  143. nvkm_perfsrc_disable(struct nvkm_pm *pm, struct nvkm_perfctr *ctr)
  144. {
  145. struct nvkm_subdev *subdev = &pm->engine.subdev;
  146. struct nvkm_device *device = subdev->device;
  147. struct nvkm_perfdom *dom = NULL;
  148. struct nvkm_perfsig *sig;
  149. struct nvkm_perfsrc *src;
  150. u32 mask;
  151. int i, j;
  152. for (i = 0; i < 4; i++) {
  153. for (j = 0; j < 8 && ctr->source[i][j]; j++) {
  154. sig = nvkm_perfsig_find(pm, ctr->domain,
  155. ctr->signal[i], &dom);
  156. if (!sig)
  157. return -EINVAL;
  158. src = nvkm_perfsrc_find(pm, sig, ctr->source[i][j]);
  159. if (!src)
  160. return -EINVAL;
  161. /* unset enable bit if needed */
  162. mask = 0x00000000;
  163. if (src->enable)
  164. mask = 0x80000000;
  165. mask |= (src->mask << src->shift);
  166. /* disable the source */
  167. nvkm_mask(device, src->addr, mask, 0);
  168. nvkm_debug(subdev, "disabled source %08x %08x\n",
  169. src->addr, mask);
  170. }
  171. }
  172. return 0;
  173. }
  174. /*******************************************************************************
  175. * Perfdom object classes
  176. ******************************************************************************/
  177. static int
  178. nvkm_perfdom_init(struct nvkm_perfdom *dom, void *data, u32 size)
  179. {
  180. union {
  181. struct nvif_perfdom_init none;
  182. } *args = data;
  183. struct nvkm_object *object = &dom->object;
  184. struct nvkm_pm *pm = dom->perfmon->pm;
  185. int ret, i;
  186. nvif_ioctl(object, "perfdom init size %d\n", size);
  187. if (nvif_unvers(args->none)) {
  188. nvif_ioctl(object, "perfdom init\n");
  189. } else
  190. return ret;
  191. for (i = 0; i < 4; i++) {
  192. if (dom->ctr[i]) {
  193. dom->func->init(pm, dom, dom->ctr[i]);
  194. /* enable sources */
  195. nvkm_perfsrc_enable(pm, dom->ctr[i]);
  196. }
  197. }
  198. /* start next batch of counters for sampling */
  199. dom->func->next(pm, dom);
  200. return 0;
  201. }
  202. static int
  203. nvkm_perfdom_sample(struct nvkm_perfdom *dom, void *data, u32 size)
  204. {
  205. union {
  206. struct nvif_perfdom_sample none;
  207. } *args = data;
  208. struct nvkm_object *object = &dom->object;
  209. struct nvkm_pm *pm = dom->perfmon->pm;
  210. int ret;
  211. nvif_ioctl(object, "perfdom sample size %d\n", size);
  212. if (nvif_unvers(args->none)) {
  213. nvif_ioctl(object, "perfdom sample\n");
  214. } else
  215. return ret;
  216. pm->sequence++;
  217. /* sample previous batch of counters */
  218. list_for_each_entry(dom, &pm->domains, head)
  219. dom->func->next(pm, dom);
  220. return 0;
  221. }
  222. static int
  223. nvkm_perfdom_read(struct nvkm_perfdom *dom, void *data, u32 size)
  224. {
  225. union {
  226. struct nvif_perfdom_read_v0 v0;
  227. } *args = data;
  228. struct nvkm_object *object = &dom->object;
  229. struct nvkm_pm *pm = dom->perfmon->pm;
  230. int ret, i;
  231. nvif_ioctl(object, "perfdom read size %d\n", size);
  232. if (nvif_unpack(args->v0, 0, 0, false)) {
  233. nvif_ioctl(object, "perfdom read vers %d\n", args->v0.version);
  234. } else
  235. return ret;
  236. for (i = 0; i < 4; i++) {
  237. if (dom->ctr[i])
  238. dom->func->read(pm, dom, dom->ctr[i]);
  239. }
  240. if (!dom->clk)
  241. return -EAGAIN;
  242. for (i = 0; i < 4; i++)
  243. if (dom->ctr[i])
  244. args->v0.ctr[i] = dom->ctr[i]->ctr;
  245. args->v0.clk = dom->clk;
  246. return 0;
  247. }
  248. static int
  249. nvkm_perfdom_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
  250. {
  251. struct nvkm_perfdom *dom = nvkm_perfdom(object);
  252. switch (mthd) {
  253. case NVIF_PERFDOM_V0_INIT:
  254. return nvkm_perfdom_init(dom, data, size);
  255. case NVIF_PERFDOM_V0_SAMPLE:
  256. return nvkm_perfdom_sample(dom, data, size);
  257. case NVIF_PERFDOM_V0_READ:
  258. return nvkm_perfdom_read(dom, data, size);
  259. default:
  260. break;
  261. }
  262. return -EINVAL;
  263. }
  264. static void *
  265. nvkm_perfdom_dtor(struct nvkm_object *object)
  266. {
  267. struct nvkm_perfdom *dom = nvkm_perfdom(object);
  268. struct nvkm_pm *pm = dom->perfmon->pm;
  269. int i;
  270. for (i = 0; i < 4; i++) {
  271. struct nvkm_perfctr *ctr = dom->ctr[i];
  272. if (ctr) {
  273. nvkm_perfsrc_disable(pm, ctr);
  274. if (ctr->head.next)
  275. list_del(&ctr->head);
  276. }
  277. kfree(ctr);
  278. }
  279. return dom;
  280. }
  281. static int
  282. nvkm_perfctr_new(struct nvkm_perfdom *dom, int slot, u8 domain,
  283. struct nvkm_perfsig *signal[4], u64 source[4][8],
  284. u16 logic_op, struct nvkm_perfctr **pctr)
  285. {
  286. struct nvkm_perfctr *ctr;
  287. int i, j;
  288. if (!dom)
  289. return -EINVAL;
  290. ctr = *pctr = kzalloc(sizeof(*ctr), GFP_KERNEL);
  291. if (!ctr)
  292. return -ENOMEM;
  293. ctr->domain = domain;
  294. ctr->logic_op = logic_op;
  295. ctr->slot = slot;
  296. for (i = 0; i < 4; i++) {
  297. if (signal[i]) {
  298. ctr->signal[i] = signal[i] - dom->signal;
  299. for (j = 0; j < 8; j++)
  300. ctr->source[i][j] = source[i][j];
  301. }
  302. }
  303. list_add_tail(&ctr->head, &dom->list);
  304. return 0;
  305. }
  306. static const struct nvkm_object_func
  307. nvkm_perfdom = {
  308. .dtor = nvkm_perfdom_dtor,
  309. .mthd = nvkm_perfdom_mthd,
  310. };
  311. static int
  312. nvkm_perfdom_new_(struct nvkm_perfmon *perfmon,
  313. const struct nvkm_oclass *oclass, void *data, u32 size,
  314. struct nvkm_object **pobject)
  315. {
  316. union {
  317. struct nvif_perfdom_v0 v0;
  318. } *args = data;
  319. struct nvkm_pm *pm = perfmon->pm;
  320. struct nvkm_object *parent = oclass->parent;
  321. struct nvkm_perfdom *sdom = NULL;
  322. struct nvkm_perfctr *ctr[4] = {};
  323. struct nvkm_perfdom *dom;
  324. int c, s, m;
  325. int ret;
  326. nvif_ioctl(parent, "create perfdom size %d\n", size);
  327. if (nvif_unpack(args->v0, 0, 0, false)) {
  328. nvif_ioctl(parent, "create perfdom vers %d dom %d mode %02x\n",
  329. args->v0.version, args->v0.domain, args->v0.mode);
  330. } else
  331. return ret;
  332. for (c = 0; c < ARRAY_SIZE(args->v0.ctr); c++) {
  333. struct nvkm_perfsig *sig[4] = {};
  334. u64 src[4][8] = {};
  335. for (s = 0; s < ARRAY_SIZE(args->v0.ctr[c].signal); s++) {
  336. sig[s] = nvkm_perfsig_find(pm, args->v0.domain,
  337. args->v0.ctr[c].signal[s],
  338. &sdom);
  339. if (args->v0.ctr[c].signal[s] && !sig[s])
  340. return -EINVAL;
  341. for (m = 0; m < 8; m++) {
  342. src[s][m] = args->v0.ctr[c].source[s][m];
  343. if (src[s][m] && !nvkm_perfsrc_find(pm, sig[s],
  344. src[s][m]))
  345. return -EINVAL;
  346. }
  347. }
  348. ret = nvkm_perfctr_new(sdom, c, args->v0.domain, sig, src,
  349. args->v0.ctr[c].logic_op, &ctr[c]);
  350. if (ret)
  351. return ret;
  352. }
  353. if (!sdom)
  354. return -EINVAL;
  355. if (!(dom = kzalloc(sizeof(*dom), GFP_KERNEL)))
  356. return -ENOMEM;
  357. nvkm_object_ctor(&nvkm_perfdom, oclass, &dom->object);
  358. dom->perfmon = perfmon;
  359. *pobject = &dom->object;
  360. dom->func = sdom->func;
  361. dom->addr = sdom->addr;
  362. dom->mode = args->v0.mode;
  363. for (c = 0; c < ARRAY_SIZE(ctr); c++)
  364. dom->ctr[c] = ctr[c];
  365. return 0;
  366. }
  367. /*******************************************************************************
  368. * Perfmon object classes
  369. ******************************************************************************/
  370. static int
  371. nvkm_perfmon_mthd_query_domain(struct nvkm_perfmon *perfmon,
  372. void *data, u32 size)
  373. {
  374. union {
  375. struct nvif_perfmon_query_domain_v0 v0;
  376. } *args = data;
  377. struct nvkm_object *object = &perfmon->object;
  378. struct nvkm_pm *pm = perfmon->pm;
  379. struct nvkm_perfdom *dom;
  380. u8 domain_nr;
  381. int di, ret;
  382. nvif_ioctl(object, "perfmon query domain size %d\n", size);
  383. if (nvif_unpack(args->v0, 0, 0, false)) {
  384. nvif_ioctl(object, "perfmon domain vers %d iter %02x\n",
  385. args->v0.version, args->v0.iter);
  386. di = (args->v0.iter & 0xff) - 1;
  387. } else
  388. return ret;
  389. domain_nr = nvkm_pm_count_perfdom(pm);
  390. if (di >= (int)domain_nr)
  391. return -EINVAL;
  392. if (di >= 0) {
  393. dom = nvkm_perfdom_find(pm, di);
  394. if (dom == NULL)
  395. return -EINVAL;
  396. args->v0.id = di;
  397. args->v0.signal_nr = nvkm_perfdom_count_perfsig(dom);
  398. strncpy(args->v0.name, dom->name, sizeof(args->v0.name));
  399. /* Currently only global counters (PCOUNTER) are implemented
  400. * but this will be different for local counters (MP). */
  401. args->v0.counter_nr = 4;
  402. }
  403. if (++di < domain_nr) {
  404. args->v0.iter = ++di;
  405. return 0;
  406. }
  407. args->v0.iter = 0xff;
  408. return 0;
  409. }
  410. static int
  411. nvkm_perfmon_mthd_query_signal(struct nvkm_perfmon *perfmon,
  412. void *data, u32 size)
  413. {
  414. union {
  415. struct nvif_perfmon_query_signal_v0 v0;
  416. } *args = data;
  417. struct nvkm_object *object = &perfmon->object;
  418. struct nvkm_pm *pm = perfmon->pm;
  419. struct nvkm_device *device = pm->engine.subdev.device;
  420. struct nvkm_perfdom *dom;
  421. struct nvkm_perfsig *sig;
  422. const bool all = nvkm_boolopt(device->cfgopt, "NvPmShowAll", false);
  423. const bool raw = nvkm_boolopt(device->cfgopt, "NvPmUnnamed", all);
  424. int ret, si;
  425. nvif_ioctl(object, "perfmon query signal size %d\n", size);
  426. if (nvif_unpack(args->v0, 0, 0, false)) {
  427. nvif_ioctl(object,
  428. "perfmon query signal vers %d dom %d iter %04x\n",
  429. args->v0.version, args->v0.domain, args->v0.iter);
  430. si = (args->v0.iter & 0xffff) - 1;
  431. } else
  432. return ret;
  433. dom = nvkm_perfdom_find(pm, args->v0.domain);
  434. if (dom == NULL || si >= (int)dom->signal_nr)
  435. return -EINVAL;
  436. if (si >= 0) {
  437. sig = &dom->signal[si];
  438. if (raw || !sig->name) {
  439. snprintf(args->v0.name, sizeof(args->v0.name),
  440. "/%s/%02x", dom->name, si);
  441. } else {
  442. strncpy(args->v0.name, sig->name,
  443. sizeof(args->v0.name));
  444. }
  445. args->v0.signal = si;
  446. args->v0.source_nr = nvkm_perfsig_count_perfsrc(sig);
  447. }
  448. while (++si < dom->signal_nr) {
  449. if (all || dom->signal[si].name) {
  450. args->v0.iter = ++si;
  451. return 0;
  452. }
  453. }
  454. args->v0.iter = 0xffff;
  455. return 0;
  456. }
  457. static int
  458. nvkm_perfmon_mthd_query_source(struct nvkm_perfmon *perfmon,
  459. void *data, u32 size)
  460. {
  461. union {
  462. struct nvif_perfmon_query_source_v0 v0;
  463. } *args = data;
  464. struct nvkm_object *object = &perfmon->object;
  465. struct nvkm_pm *pm = perfmon->pm;
  466. struct nvkm_perfdom *dom = NULL;
  467. struct nvkm_perfsig *sig;
  468. struct nvkm_perfsrc *src;
  469. u8 source_nr = 0;
  470. int si, ret;
  471. nvif_ioctl(object, "perfmon query source size %d\n", size);
  472. if (nvif_unpack(args->v0, 0, 0, false)) {
  473. nvif_ioctl(object,
  474. "perfmon source vers %d dom %d sig %02x iter %02x\n",
  475. args->v0.version, args->v0.domain, args->v0.signal,
  476. args->v0.iter);
  477. si = (args->v0.iter & 0xff) - 1;
  478. } else
  479. return ret;
  480. sig = nvkm_perfsig_find(pm, args->v0.domain, args->v0.signal, &dom);
  481. if (!sig)
  482. return -EINVAL;
  483. source_nr = nvkm_perfsig_count_perfsrc(sig);
  484. if (si >= (int)source_nr)
  485. return -EINVAL;
  486. if (si >= 0) {
  487. src = nvkm_perfsrc_find(pm, sig, sig->source[si]);
  488. if (!src)
  489. return -EINVAL;
  490. args->v0.source = sig->source[si];
  491. args->v0.mask = src->mask;
  492. strncpy(args->v0.name, src->name, sizeof(args->v0.name));
  493. }
  494. if (++si < source_nr) {
  495. args->v0.iter = ++si;
  496. return 0;
  497. }
  498. args->v0.iter = 0xff;
  499. return 0;
  500. }
  501. static int
  502. nvkm_perfmon_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
  503. {
  504. struct nvkm_perfmon *perfmon = nvkm_perfmon(object);
  505. switch (mthd) {
  506. case NVIF_PERFMON_V0_QUERY_DOMAIN:
  507. return nvkm_perfmon_mthd_query_domain(perfmon, data, size);
  508. case NVIF_PERFMON_V0_QUERY_SIGNAL:
  509. return nvkm_perfmon_mthd_query_signal(perfmon, data, size);
  510. case NVIF_PERFMON_V0_QUERY_SOURCE:
  511. return nvkm_perfmon_mthd_query_source(perfmon, data, size);
  512. default:
  513. break;
  514. }
  515. return -EINVAL;
  516. }
  517. static int
  518. nvkm_perfmon_child_new(const struct nvkm_oclass *oclass, void *data, u32 size,
  519. struct nvkm_object **pobject)
  520. {
  521. struct nvkm_perfmon *perfmon = nvkm_perfmon(oclass->parent);
  522. return nvkm_perfdom_new_(perfmon, oclass, data, size, pobject);
  523. }
  524. static int
  525. nvkm_perfmon_child_get(struct nvkm_object *object, int index,
  526. struct nvkm_oclass *oclass)
  527. {
  528. if (index == 0) {
  529. oclass->base.oclass = NVIF_IOCTL_NEW_V0_PERFDOM;
  530. oclass->base.minver = 0;
  531. oclass->base.maxver = 0;
  532. oclass->ctor = nvkm_perfmon_child_new;
  533. return 0;
  534. }
  535. return -EINVAL;
  536. }
  537. static void *
  538. nvkm_perfmon_dtor(struct nvkm_object *object)
  539. {
  540. struct nvkm_perfmon *perfmon = nvkm_perfmon(object);
  541. struct nvkm_pm *pm = perfmon->pm;
  542. mutex_lock(&pm->engine.subdev.mutex);
  543. if (pm->perfmon == &perfmon->object)
  544. pm->perfmon = NULL;
  545. mutex_unlock(&pm->engine.subdev.mutex);
  546. return perfmon;
  547. }
  548. static struct nvkm_object_func
  549. nvkm_perfmon = {
  550. .dtor = nvkm_perfmon_dtor,
  551. .mthd = nvkm_perfmon_mthd,
  552. .sclass = nvkm_perfmon_child_get,
  553. };
  554. static int
  555. nvkm_perfmon_new(struct nvkm_pm *pm, const struct nvkm_oclass *oclass,
  556. void *data, u32 size, struct nvkm_object **pobject)
  557. {
  558. struct nvkm_perfmon *perfmon;
  559. if (!(perfmon = kzalloc(sizeof(*perfmon), GFP_KERNEL)))
  560. return -ENOMEM;
  561. nvkm_object_ctor(&nvkm_perfmon, oclass, &perfmon->object);
  562. perfmon->pm = pm;
  563. *pobject = &perfmon->object;
  564. return 0;
  565. }
  566. /*******************************************************************************
  567. * PPM engine/subdev functions
  568. ******************************************************************************/
  569. static int
  570. nvkm_pm_oclass_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
  571. void *data, u32 size, struct nvkm_object **pobject)
  572. {
  573. struct nvkm_pm *pm = nvkm_pm(oclass->engine);
  574. int ret;
  575. ret = nvkm_perfmon_new(pm, oclass, data, size, pobject);
  576. if (ret)
  577. return ret;
  578. mutex_lock(&pm->engine.subdev.mutex);
  579. if (pm->perfmon == NULL)
  580. pm->perfmon = *pobject;
  581. ret = (pm->perfmon == *pobject) ? 0 : -EBUSY;
  582. mutex_unlock(&pm->engine.subdev.mutex);
  583. return ret;
  584. }
  585. static const struct nvkm_device_oclass
  586. nvkm_pm_oclass = {
  587. .base.oclass = NVIF_IOCTL_NEW_V0_PERFMON,
  588. .base.minver = -1,
  589. .base.maxver = -1,
  590. .ctor = nvkm_pm_oclass_new,
  591. };
  592. static int
  593. nvkm_pm_oclass_get(struct nvkm_oclass *oclass, int index,
  594. const struct nvkm_device_oclass **class)
  595. {
  596. if (index == 0) {
  597. oclass->base = nvkm_pm_oclass.base;
  598. *class = &nvkm_pm_oclass;
  599. return index;
  600. }
  601. return 1;
  602. }
  603. int
  604. nvkm_perfsrc_new(struct nvkm_pm *pm, struct nvkm_perfsig *sig,
  605. const struct nvkm_specsrc *spec)
  606. {
  607. const struct nvkm_specsrc *ssrc;
  608. const struct nvkm_specmux *smux;
  609. struct nvkm_perfsrc *src;
  610. u8 source_nr = 0;
  611. if (!spec) {
  612. /* No sources are defined for this signal. */
  613. return 0;
  614. }
  615. ssrc = spec;
  616. while (ssrc->name) {
  617. smux = ssrc->mux;
  618. while (smux->name) {
  619. bool found = false;
  620. u8 source_id = 0;
  621. u32 len;
  622. list_for_each_entry(src, &pm->sources, head) {
  623. if (src->addr == ssrc->addr &&
  624. src->shift == smux->shift) {
  625. found = true;
  626. break;
  627. }
  628. source_id++;
  629. }
  630. if (!found) {
  631. src = kzalloc(sizeof(*src), GFP_KERNEL);
  632. if (!src)
  633. return -ENOMEM;
  634. src->addr = ssrc->addr;
  635. src->mask = smux->mask;
  636. src->shift = smux->shift;
  637. src->enable = smux->enable;
  638. len = strlen(ssrc->name) +
  639. strlen(smux->name) + 2;
  640. src->name = kzalloc(len, GFP_KERNEL);
  641. if (!src->name) {
  642. kfree(src);
  643. return -ENOMEM;
  644. }
  645. snprintf(src->name, len, "%s_%s", ssrc->name,
  646. smux->name);
  647. list_add_tail(&src->head, &pm->sources);
  648. }
  649. sig->source[source_nr++] = source_id + 1;
  650. smux++;
  651. }
  652. ssrc++;
  653. }
  654. return 0;
  655. }
  656. int
  657. nvkm_perfdom_new(struct nvkm_pm *pm, const char *name, u32 mask,
  658. u32 base, u32 size_unit, u32 size_domain,
  659. const struct nvkm_specdom *spec)
  660. {
  661. const struct nvkm_specdom *sdom;
  662. const struct nvkm_specsig *ssig;
  663. struct nvkm_perfdom *dom;
  664. int ret, i;
  665. for (i = 0; i == 0 || mask; i++) {
  666. u32 addr = base + (i * size_unit);
  667. if (i && !(mask & (1 << i)))
  668. continue;
  669. sdom = spec;
  670. while (sdom->signal_nr) {
  671. dom = kzalloc(sizeof(*dom) + sdom->signal_nr *
  672. sizeof(*dom->signal), GFP_KERNEL);
  673. if (!dom)
  674. return -ENOMEM;
  675. if (mask) {
  676. snprintf(dom->name, sizeof(dom->name),
  677. "%s/%02x/%02x", name, i,
  678. (int)(sdom - spec));
  679. } else {
  680. snprintf(dom->name, sizeof(dom->name),
  681. "%s/%02x", name, (int)(sdom - spec));
  682. }
  683. list_add_tail(&dom->head, &pm->domains);
  684. INIT_LIST_HEAD(&dom->list);
  685. dom->func = sdom->func;
  686. dom->addr = addr;
  687. dom->signal_nr = sdom->signal_nr;
  688. ssig = (sdom++)->signal;
  689. while (ssig->name) {
  690. struct nvkm_perfsig *sig =
  691. &dom->signal[ssig->signal];
  692. sig->name = ssig->name;
  693. ret = nvkm_perfsrc_new(pm, sig, ssig->source);
  694. if (ret)
  695. return ret;
  696. ssig++;
  697. }
  698. addr += size_domain;
  699. }
  700. mask &= ~(1 << i);
  701. }
  702. return 0;
  703. }
  704. static int
  705. nvkm_pm_fini(struct nvkm_engine *engine, bool suspend)
  706. {
  707. struct nvkm_pm *pm = nvkm_pm(engine);
  708. if (pm->func->fini)
  709. pm->func->fini(pm);
  710. return 0;
  711. }
  712. static void *
  713. nvkm_pm_dtor(struct nvkm_engine *engine)
  714. {
  715. struct nvkm_pm *pm = nvkm_pm(engine);
  716. struct nvkm_perfdom *dom, *next_dom;
  717. struct nvkm_perfsrc *src, *next_src;
  718. list_for_each_entry_safe(dom, next_dom, &pm->domains, head) {
  719. list_del(&dom->head);
  720. kfree(dom);
  721. }
  722. list_for_each_entry_safe(src, next_src, &pm->sources, head) {
  723. list_del(&src->head);
  724. kfree(src->name);
  725. kfree(src);
  726. }
  727. return pm;
  728. }
  729. static const struct nvkm_engine_func
  730. nvkm_pm = {
  731. .dtor = nvkm_pm_dtor,
  732. .fini = nvkm_pm_fini,
  733. .base.sclass = nvkm_pm_oclass_get,
  734. };
  735. int
  736. nvkm_pm_ctor(const struct nvkm_pm_func *func, struct nvkm_device *device,
  737. int index, struct nvkm_pm *pm)
  738. {
  739. pm->func = func;
  740. INIT_LIST_HEAD(&pm->domains);
  741. INIT_LIST_HEAD(&pm->sources);
  742. return nvkm_engine_ctor(&nvkm_pm, device, index, 0, true, &pm->engine);
  743. }