gf100.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643
  1. /*
  2. * Copyright 2012 Red Hat Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Ben Skeggs
  23. */
  24. #include "gf100.h"
  25. #include "changf100.h"
  26. #include <core/client.h>
  27. #include <core/enum.h>
  28. #include <core/gpuobj.h>
  29. #include <subdev/bar.h>
  30. #include <engine/sw.h>
  31. #include <nvif/class.h>
  32. static void
  33. gf100_fifo_uevent_init(struct nvkm_fifo *fifo)
  34. {
  35. struct nvkm_device *device = fifo->engine.subdev.device;
  36. nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
  37. }
  38. static void
  39. gf100_fifo_uevent_fini(struct nvkm_fifo *fifo)
  40. {
  41. struct nvkm_device *device = fifo->engine.subdev.device;
  42. nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
  43. }
  44. void
  45. gf100_fifo_runlist_update(struct gf100_fifo *fifo)
  46. {
  47. struct gf100_fifo_chan *chan;
  48. struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  49. struct nvkm_device *device = subdev->device;
  50. struct nvkm_memory *cur;
  51. int nr = 0;
  52. mutex_lock(&subdev->mutex);
  53. cur = fifo->runlist.mem[fifo->runlist.active];
  54. fifo->runlist.active = !fifo->runlist.active;
  55. nvkm_kmap(cur);
  56. list_for_each_entry(chan, &fifo->chan, head) {
  57. nvkm_wo32(cur, (nr * 8) + 0, chan->base.chid);
  58. nvkm_wo32(cur, (nr * 8) + 4, 0x00000004);
  59. nr++;
  60. }
  61. nvkm_done(cur);
  62. nvkm_wr32(device, 0x002270, nvkm_memory_addr(cur) >> 12);
  63. nvkm_wr32(device, 0x002274, 0x01f00000 | nr);
  64. if (wait_event_timeout(fifo->runlist.wait,
  65. !(nvkm_rd32(device, 0x00227c) & 0x00100000),
  66. msecs_to_jiffies(2000)) == 0)
  67. nvkm_error(subdev, "runlist update timeout\n");
  68. mutex_unlock(&subdev->mutex);
  69. }
  70. static inline int
  71. gf100_fifo_engidx(struct gf100_fifo *fifo, u32 engn)
  72. {
  73. switch (engn) {
  74. case NVKM_ENGINE_GR : engn = 0; break;
  75. case NVKM_ENGINE_MSVLD : engn = 1; break;
  76. case NVKM_ENGINE_MSPPP : engn = 2; break;
  77. case NVKM_ENGINE_MSPDEC: engn = 3; break;
  78. case NVKM_ENGINE_CE0 : engn = 4; break;
  79. case NVKM_ENGINE_CE1 : engn = 5; break;
  80. default:
  81. return -1;
  82. }
  83. return engn;
  84. }
  85. static inline struct nvkm_engine *
  86. gf100_fifo_engine(struct gf100_fifo *fifo, u32 engn)
  87. {
  88. struct nvkm_device *device = fifo->base.engine.subdev.device;
  89. switch (engn) {
  90. case 0: engn = NVKM_ENGINE_GR; break;
  91. case 1: engn = NVKM_ENGINE_MSVLD; break;
  92. case 2: engn = NVKM_ENGINE_MSPPP; break;
  93. case 3: engn = NVKM_ENGINE_MSPDEC; break;
  94. case 4: engn = NVKM_ENGINE_CE0; break;
  95. case 5: engn = NVKM_ENGINE_CE1; break;
  96. default:
  97. return NULL;
  98. }
  99. return nvkm_device_engine(device, engn);
  100. }
  101. static void
  102. gf100_fifo_recover_work(struct work_struct *work)
  103. {
  104. struct gf100_fifo *fifo = container_of(work, typeof(*fifo), fault);
  105. struct nvkm_device *device = fifo->base.engine.subdev.device;
  106. struct nvkm_engine *engine;
  107. unsigned long flags;
  108. u32 engn, engm = 0;
  109. u64 mask, todo;
  110. spin_lock_irqsave(&fifo->base.lock, flags);
  111. mask = fifo->mask;
  112. fifo->mask = 0ULL;
  113. spin_unlock_irqrestore(&fifo->base.lock, flags);
  114. for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
  115. engm |= 1 << gf100_fifo_engidx(fifo, engn);
  116. nvkm_mask(device, 0x002630, engm, engm);
  117. for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
  118. if ((engine = nvkm_device_engine(device, engn))) {
  119. nvkm_subdev_fini(&engine->subdev, false);
  120. WARN_ON(nvkm_subdev_init(&engine->subdev));
  121. }
  122. }
  123. gf100_fifo_runlist_update(fifo);
  124. nvkm_wr32(device, 0x00262c, engm);
  125. nvkm_mask(device, 0x002630, engm, 0x00000000);
  126. }
  127. static void
  128. gf100_fifo_recover(struct gf100_fifo *fifo, struct nvkm_engine *engine,
  129. struct gf100_fifo_chan *chan)
  130. {
  131. struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  132. struct nvkm_device *device = subdev->device;
  133. u32 chid = chan->base.chid;
  134. nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
  135. nvkm_subdev_name[engine->subdev.index], chid);
  136. assert_spin_locked(&fifo->base.lock);
  137. nvkm_mask(device, 0x003004 + (chid * 0x08), 0x00000001, 0x00000000);
  138. list_del_init(&chan->head);
  139. chan->killed = true;
  140. fifo->mask |= 1ULL << engine->subdev.index;
  141. schedule_work(&fifo->fault);
  142. }
  143. static const struct nvkm_enum
  144. gf100_fifo_sched_reason[] = {
  145. { 0x0a, "CTXSW_TIMEOUT" },
  146. {}
  147. };
  148. static void
  149. gf100_fifo_intr_sched_ctxsw(struct gf100_fifo *fifo)
  150. {
  151. struct nvkm_device *device = fifo->base.engine.subdev.device;
  152. struct nvkm_engine *engine;
  153. struct gf100_fifo_chan *chan;
  154. unsigned long flags;
  155. u32 engn;
  156. spin_lock_irqsave(&fifo->base.lock, flags);
  157. for (engn = 0; engn < 6; engn++) {
  158. u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04));
  159. u32 busy = (stat & 0x80000000);
  160. u32 save = (stat & 0x00100000); /* maybe? */
  161. u32 unk0 = (stat & 0x00040000);
  162. u32 unk1 = (stat & 0x00001000);
  163. u32 chid = (stat & 0x0000007f);
  164. (void)save;
  165. if (busy && unk0 && unk1) {
  166. list_for_each_entry(chan, &fifo->chan, head) {
  167. if (chan->base.chid == chid) {
  168. engine = gf100_fifo_engine(fifo, engn);
  169. if (!engine)
  170. break;
  171. gf100_fifo_recover(fifo, engine, chan);
  172. break;
  173. }
  174. }
  175. }
  176. }
  177. spin_unlock_irqrestore(&fifo->base.lock, flags);
  178. }
  179. static void
  180. gf100_fifo_intr_sched(struct gf100_fifo *fifo)
  181. {
  182. struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  183. struct nvkm_device *device = subdev->device;
  184. u32 intr = nvkm_rd32(device, 0x00254c);
  185. u32 code = intr & 0x000000ff;
  186. const struct nvkm_enum *en;
  187. en = nvkm_enum_find(gf100_fifo_sched_reason, code);
  188. nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
  189. switch (code) {
  190. case 0x0a:
  191. gf100_fifo_intr_sched_ctxsw(fifo);
  192. break;
  193. default:
  194. break;
  195. }
  196. }
  197. static const struct nvkm_enum
  198. gf100_fifo_fault_engine[] = {
  199. { 0x00, "PGRAPH", NULL, NVKM_ENGINE_GR },
  200. { 0x03, "PEEPHOLE", NULL, NVKM_ENGINE_IFB },
  201. { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
  202. { 0x05, "BAR3", NULL, NVKM_SUBDEV_INSTMEM },
  203. { 0x07, "PFIFO", NULL, NVKM_ENGINE_FIFO },
  204. { 0x10, "PMSVLD", NULL, NVKM_ENGINE_MSVLD },
  205. { 0x11, "PMSPPP", NULL, NVKM_ENGINE_MSPPP },
  206. { 0x13, "PCOUNTER" },
  207. { 0x14, "PMSPDEC", NULL, NVKM_ENGINE_MSPDEC },
  208. { 0x15, "PCE0", NULL, NVKM_ENGINE_CE0 },
  209. { 0x16, "PCE1", NULL, NVKM_ENGINE_CE1 },
  210. { 0x17, "PDAEMON" },
  211. {}
  212. };
  213. static const struct nvkm_enum
  214. gf100_fifo_fault_reason[] = {
  215. { 0x00, "PT_NOT_PRESENT" },
  216. { 0x01, "PT_TOO_SHORT" },
  217. { 0x02, "PAGE_NOT_PRESENT" },
  218. { 0x03, "VM_LIMIT_EXCEEDED" },
  219. { 0x04, "NO_CHANNEL" },
  220. { 0x05, "PAGE_SYSTEM_ONLY" },
  221. { 0x06, "PAGE_READ_ONLY" },
  222. { 0x0a, "COMPRESSED_SYSRAM" },
  223. { 0x0c, "INVALID_STORAGE_TYPE" },
  224. {}
  225. };
  226. static const struct nvkm_enum
  227. gf100_fifo_fault_hubclient[] = {
  228. { 0x01, "PCOPY0" },
  229. { 0x02, "PCOPY1" },
  230. { 0x04, "DISPATCH" },
  231. { 0x05, "CTXCTL" },
  232. { 0x06, "PFIFO" },
  233. { 0x07, "BAR_READ" },
  234. { 0x08, "BAR_WRITE" },
  235. { 0x0b, "PVP" },
  236. { 0x0c, "PMSPPP" },
  237. { 0x0d, "PMSVLD" },
  238. { 0x11, "PCOUNTER" },
  239. { 0x12, "PDAEMON" },
  240. { 0x14, "CCACHE" },
  241. { 0x15, "CCACHE_POST" },
  242. {}
  243. };
  244. static const struct nvkm_enum
  245. gf100_fifo_fault_gpcclient[] = {
  246. { 0x01, "TEX" },
  247. { 0x0c, "ESETUP" },
  248. { 0x0e, "CTXCTL" },
  249. { 0x0f, "PROP" },
  250. {}
  251. };
  252. static void
  253. gf100_fifo_intr_fault(struct gf100_fifo *fifo, int unit)
  254. {
  255. struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  256. struct nvkm_device *device = subdev->device;
  257. u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
  258. u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
  259. u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
  260. u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10));
  261. u32 gpc = (stat & 0x1f000000) >> 24;
  262. u32 client = (stat & 0x00001f00) >> 8;
  263. u32 write = (stat & 0x00000080);
  264. u32 hub = (stat & 0x00000040);
  265. u32 reason = (stat & 0x0000000f);
  266. const struct nvkm_enum *er, *eu, *ec;
  267. struct nvkm_engine *engine = NULL;
  268. struct nvkm_fifo_chan *chan;
  269. unsigned long flags;
  270. char gpcid[8] = "";
  271. er = nvkm_enum_find(gf100_fifo_fault_reason, reason);
  272. eu = nvkm_enum_find(gf100_fifo_fault_engine, unit);
  273. if (hub) {
  274. ec = nvkm_enum_find(gf100_fifo_fault_hubclient, client);
  275. } else {
  276. ec = nvkm_enum_find(gf100_fifo_fault_gpcclient, client);
  277. snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
  278. }
  279. if (eu) {
  280. switch (eu->data2) {
  281. case NVKM_SUBDEV_BAR:
  282. nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
  283. break;
  284. case NVKM_SUBDEV_INSTMEM:
  285. nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
  286. break;
  287. case NVKM_ENGINE_IFB:
  288. nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
  289. break;
  290. default:
  291. engine = nvkm_device_engine(device, eu->data2);
  292. break;
  293. }
  294. }
  295. chan = nvkm_fifo_chan_inst(&fifo->base, (u64)inst << 12, &flags);
  296. nvkm_error(subdev,
  297. "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
  298. "reason %02x [%s] on channel %d [%010llx %s]\n",
  299. write ? "write" : "read", (u64)vahi << 32 | valo,
  300. unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "",
  301. reason, er ? er->name : "", chan ? chan->chid : -1,
  302. (u64)inst << 12,
  303. chan ? chan->object.client->name : "unknown");
  304. if (engine && chan)
  305. gf100_fifo_recover(fifo, engine, (void *)chan);
  306. nvkm_fifo_chan_put(&fifo->base, flags, &chan);
  307. }
  308. static const struct nvkm_bitfield
  309. gf100_fifo_pbdma_intr[] = {
  310. /* { 0x00008000, "" } seen with null ib push */
  311. { 0x00200000, "ILLEGAL_MTHD" },
  312. { 0x00800000, "EMPTY_SUBC" },
  313. {}
  314. };
  315. static void
  316. gf100_fifo_intr_pbdma(struct gf100_fifo *fifo, int unit)
  317. {
  318. struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  319. struct nvkm_device *device = subdev->device;
  320. u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000));
  321. u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
  322. u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
  323. u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0x7f;
  324. u32 subc = (addr & 0x00070000) >> 16;
  325. u32 mthd = (addr & 0x00003ffc);
  326. struct nvkm_fifo_chan *chan;
  327. unsigned long flags;
  328. u32 show= stat;
  329. char msg[128];
  330. if (stat & 0x00800000) {
  331. if (device->sw) {
  332. if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data))
  333. show &= ~0x00800000;
  334. }
  335. }
  336. if (show) {
  337. nvkm_snprintbf(msg, sizeof(msg), gf100_fifo_pbdma_intr, show);
  338. chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
  339. nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] "
  340. "subc %d mthd %04x data %08x\n",
  341. unit, show, msg, chid, chan ? chan->inst->addr : 0,
  342. chan ? chan->object.client->name : "unknown",
  343. subc, mthd, data);
  344. nvkm_fifo_chan_put(&fifo->base, flags, &chan);
  345. }
  346. nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
  347. nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
  348. }
  349. static void
  350. gf100_fifo_intr_runlist(struct gf100_fifo *fifo)
  351. {
  352. struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  353. struct nvkm_device *device = subdev->device;
  354. u32 intr = nvkm_rd32(device, 0x002a00);
  355. if (intr & 0x10000000) {
  356. wake_up(&fifo->runlist.wait);
  357. nvkm_wr32(device, 0x002a00, 0x10000000);
  358. intr &= ~0x10000000;
  359. }
  360. if (intr) {
  361. nvkm_error(subdev, "RUNLIST %08x\n", intr);
  362. nvkm_wr32(device, 0x002a00, intr);
  363. }
  364. }
  365. static void
  366. gf100_fifo_intr_engine_unit(struct gf100_fifo *fifo, int engn)
  367. {
  368. struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  369. struct nvkm_device *device = subdev->device;
  370. u32 intr = nvkm_rd32(device, 0x0025a8 + (engn * 0x04));
  371. u32 inte = nvkm_rd32(device, 0x002628);
  372. u32 unkn;
  373. nvkm_wr32(device, 0x0025a8 + (engn * 0x04), intr);
  374. for (unkn = 0; unkn < 8; unkn++) {
  375. u32 ints = (intr >> (unkn * 0x04)) & inte;
  376. if (ints & 0x1) {
  377. nvkm_fifo_uevent(&fifo->base);
  378. ints &= ~1;
  379. }
  380. if (ints) {
  381. nvkm_error(subdev, "ENGINE %d %d %01x",
  382. engn, unkn, ints);
  383. nvkm_mask(device, 0x002628, ints, 0);
  384. }
  385. }
  386. }
  387. void
  388. gf100_fifo_intr_engine(struct gf100_fifo *fifo)
  389. {
  390. struct nvkm_device *device = fifo->base.engine.subdev.device;
  391. u32 mask = nvkm_rd32(device, 0x0025a4);
  392. while (mask) {
  393. u32 unit = __ffs(mask);
  394. gf100_fifo_intr_engine_unit(fifo, unit);
  395. mask &= ~(1 << unit);
  396. }
  397. }
  398. static void
  399. gf100_fifo_intr(struct nvkm_fifo *base)
  400. {
  401. struct gf100_fifo *fifo = gf100_fifo(base);
  402. struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  403. struct nvkm_device *device = subdev->device;
  404. u32 mask = nvkm_rd32(device, 0x002140);
  405. u32 stat = nvkm_rd32(device, 0x002100) & mask;
  406. if (stat & 0x00000001) {
  407. u32 intr = nvkm_rd32(device, 0x00252c);
  408. nvkm_warn(subdev, "INTR 00000001: %08x\n", intr);
  409. nvkm_wr32(device, 0x002100, 0x00000001);
  410. stat &= ~0x00000001;
  411. }
  412. if (stat & 0x00000100) {
  413. gf100_fifo_intr_sched(fifo);
  414. nvkm_wr32(device, 0x002100, 0x00000100);
  415. stat &= ~0x00000100;
  416. }
  417. if (stat & 0x00010000) {
  418. u32 intr = nvkm_rd32(device, 0x00256c);
  419. nvkm_warn(subdev, "INTR 00010000: %08x\n", intr);
  420. nvkm_wr32(device, 0x002100, 0x00010000);
  421. stat &= ~0x00010000;
  422. }
  423. if (stat & 0x01000000) {
  424. u32 intr = nvkm_rd32(device, 0x00258c);
  425. nvkm_warn(subdev, "INTR 01000000: %08x\n", intr);
  426. nvkm_wr32(device, 0x002100, 0x01000000);
  427. stat &= ~0x01000000;
  428. }
  429. if (stat & 0x10000000) {
  430. u32 mask = nvkm_rd32(device, 0x00259c);
  431. while (mask) {
  432. u32 unit = __ffs(mask);
  433. gf100_fifo_intr_fault(fifo, unit);
  434. nvkm_wr32(device, 0x00259c, (1 << unit));
  435. mask &= ~(1 << unit);
  436. }
  437. stat &= ~0x10000000;
  438. }
  439. if (stat & 0x20000000) {
  440. u32 mask = nvkm_rd32(device, 0x0025a0);
  441. while (mask) {
  442. u32 unit = __ffs(mask);
  443. gf100_fifo_intr_pbdma(fifo, unit);
  444. nvkm_wr32(device, 0x0025a0, (1 << unit));
  445. mask &= ~(1 << unit);
  446. }
  447. stat &= ~0x20000000;
  448. }
  449. if (stat & 0x40000000) {
  450. gf100_fifo_intr_runlist(fifo);
  451. stat &= ~0x40000000;
  452. }
  453. if (stat & 0x80000000) {
  454. gf100_fifo_intr_engine(fifo);
  455. stat &= ~0x80000000;
  456. }
  457. if (stat) {
  458. nvkm_error(subdev, "INTR %08x\n", stat);
  459. nvkm_mask(device, 0x002140, stat, 0x00000000);
  460. nvkm_wr32(device, 0x002100, stat);
  461. }
  462. }
  463. static int
  464. gf100_fifo_oneinit(struct nvkm_fifo *base)
  465. {
  466. struct gf100_fifo *fifo = gf100_fifo(base);
  467. struct nvkm_device *device = fifo->base.engine.subdev.device;
  468. int ret;
  469. ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
  470. false, &fifo->runlist.mem[0]);
  471. if (ret)
  472. return ret;
  473. ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
  474. false, &fifo->runlist.mem[1]);
  475. if (ret)
  476. return ret;
  477. init_waitqueue_head(&fifo->runlist.wait);
  478. ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 0x1000,
  479. 0x1000, false, &fifo->user.mem);
  480. if (ret)
  481. return ret;
  482. ret = nvkm_bar_umap(device->bar, 128 * 0x1000, 12, &fifo->user.bar);
  483. if (ret)
  484. return ret;
  485. nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0);
  486. return 0;
  487. }
  488. static void
  489. gf100_fifo_fini(struct nvkm_fifo *base)
  490. {
  491. struct gf100_fifo *fifo = gf100_fifo(base);
  492. flush_work(&fifo->fault);
  493. }
  494. static void
  495. gf100_fifo_init(struct nvkm_fifo *base)
  496. {
  497. struct gf100_fifo *fifo = gf100_fifo(base);
  498. struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  499. struct nvkm_device *device = subdev->device;
  500. int i;
  501. nvkm_wr32(device, 0x000204, 0xffffffff);
  502. nvkm_wr32(device, 0x002204, 0xffffffff);
  503. fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x002204));
  504. nvkm_debug(subdev, "%d PBDMA unit(s)\n", fifo->spoon_nr);
  505. /* assign engines to PBDMAs */
  506. if (fifo->spoon_nr >= 3) {
  507. nvkm_wr32(device, 0x002208, ~(1 << 0)); /* PGRAPH */
  508. nvkm_wr32(device, 0x00220c, ~(1 << 1)); /* PVP */
  509. nvkm_wr32(device, 0x002210, ~(1 << 1)); /* PMSPP */
  510. nvkm_wr32(device, 0x002214, ~(1 << 1)); /* PMSVLD */
  511. nvkm_wr32(device, 0x002218, ~(1 << 2)); /* PCE0 */
  512. nvkm_wr32(device, 0x00221c, ~(1 << 1)); /* PCE1 */
  513. }
  514. /* PBDMA[n] */
  515. for (i = 0; i < fifo->spoon_nr; i++) {
  516. nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
  517. nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
  518. nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
  519. }
  520. nvkm_mask(device, 0x002200, 0x00000001, 0x00000001);
  521. nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
  522. nvkm_wr32(device, 0x002100, 0xffffffff);
  523. nvkm_wr32(device, 0x002140, 0x7fffffff);
  524. nvkm_wr32(device, 0x002628, 0x00000001); /* ENGINE_INTR_EN */
  525. }
  526. static void *
  527. gf100_fifo_dtor(struct nvkm_fifo *base)
  528. {
  529. struct gf100_fifo *fifo = gf100_fifo(base);
  530. nvkm_vm_put(&fifo->user.bar);
  531. nvkm_memory_del(&fifo->user.mem);
  532. nvkm_memory_del(&fifo->runlist.mem[0]);
  533. nvkm_memory_del(&fifo->runlist.mem[1]);
  534. return fifo;
  535. }
  536. static const struct nvkm_fifo_func
  537. gf100_fifo = {
  538. .dtor = gf100_fifo_dtor,
  539. .oneinit = gf100_fifo_oneinit,
  540. .init = gf100_fifo_init,
  541. .fini = gf100_fifo_fini,
  542. .intr = gf100_fifo_intr,
  543. .uevent_init = gf100_fifo_uevent_init,
  544. .uevent_fini = gf100_fifo_uevent_fini,
  545. .chan = {
  546. &gf100_fifo_gpfifo_oclass,
  547. NULL
  548. },
  549. };
  550. int
  551. gf100_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
  552. {
  553. struct gf100_fifo *fifo;
  554. if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
  555. return -ENOMEM;
  556. INIT_LIST_HEAD(&fifo->chan);
  557. INIT_WORK(&fifo->fault, gf100_fifo_recover_work);
  558. *pfifo = &fifo->base;
  559. return nvkm_fifo_ctor(&gf100_fifo, device, index, 128, &fifo->base);
  560. }