gf100.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668
  1. /*
  2. * Copyright 2012 Red Hat Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Ben Skeggs
  23. */
  24. #include "gf100.h"
  25. #include "changf100.h"
  26. #include <core/client.h>
  27. #include <core/enum.h>
  28. #include <core/gpuobj.h>
  29. #include <subdev/bar.h>
  30. #include <engine/sw.h>
  31. #include <nvif/class.h>
  32. static void
  33. gf100_fifo_uevent_init(struct nvkm_fifo *fifo)
  34. {
  35. struct nvkm_device *device = fifo->engine.subdev.device;
  36. nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
  37. }
  38. static void
  39. gf100_fifo_uevent_fini(struct nvkm_fifo *fifo)
  40. {
  41. struct nvkm_device *device = fifo->engine.subdev.device;
  42. nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
  43. }
  44. void
  45. gf100_fifo_runlist_commit(struct gf100_fifo *fifo)
  46. {
  47. struct gf100_fifo_chan *chan;
  48. struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  49. struct nvkm_device *device = subdev->device;
  50. struct nvkm_memory *cur;
  51. int nr = 0;
  52. int target;
  53. mutex_lock(&subdev->mutex);
  54. cur = fifo->runlist.mem[fifo->runlist.active];
  55. fifo->runlist.active = !fifo->runlist.active;
  56. nvkm_kmap(cur);
  57. list_for_each_entry(chan, &fifo->chan, head) {
  58. nvkm_wo32(cur, (nr * 8) + 0, chan->base.chid);
  59. nvkm_wo32(cur, (nr * 8) + 4, 0x00000004);
  60. nr++;
  61. }
  62. nvkm_done(cur);
  63. target = (nvkm_memory_target(cur) == NVKM_MEM_TARGET_HOST) ? 0x3 : 0x0;
  64. nvkm_wr32(device, 0x002270, (nvkm_memory_addr(cur) >> 12) |
  65. (target << 28));
  66. nvkm_wr32(device, 0x002274, 0x01f00000 | nr);
  67. if (wait_event_timeout(fifo->runlist.wait,
  68. !(nvkm_rd32(device, 0x00227c) & 0x00100000),
  69. msecs_to_jiffies(2000)) == 0)
  70. nvkm_error(subdev, "runlist update timeout\n");
  71. mutex_unlock(&subdev->mutex);
  72. }
  73. void
  74. gf100_fifo_runlist_remove(struct gf100_fifo *fifo, struct gf100_fifo_chan *chan)
  75. {
  76. mutex_lock(&fifo->base.engine.subdev.mutex);
  77. list_del_init(&chan->head);
  78. mutex_unlock(&fifo->base.engine.subdev.mutex);
  79. }
  80. void
  81. gf100_fifo_runlist_insert(struct gf100_fifo *fifo, struct gf100_fifo_chan *chan)
  82. {
  83. mutex_lock(&fifo->base.engine.subdev.mutex);
  84. list_add_tail(&chan->head, &fifo->chan);
  85. mutex_unlock(&fifo->base.engine.subdev.mutex);
  86. }
  87. static inline int
  88. gf100_fifo_engidx(struct gf100_fifo *fifo, u32 engn)
  89. {
  90. switch (engn) {
  91. case NVKM_ENGINE_GR : engn = 0; break;
  92. case NVKM_ENGINE_MSVLD : engn = 1; break;
  93. case NVKM_ENGINE_MSPPP : engn = 2; break;
  94. case NVKM_ENGINE_MSPDEC: engn = 3; break;
  95. case NVKM_ENGINE_CE0 : engn = 4; break;
  96. case NVKM_ENGINE_CE1 : engn = 5; break;
  97. default:
  98. return -1;
  99. }
  100. return engn;
  101. }
  102. static inline struct nvkm_engine *
  103. gf100_fifo_engine(struct gf100_fifo *fifo, u32 engn)
  104. {
  105. struct nvkm_device *device = fifo->base.engine.subdev.device;
  106. switch (engn) {
  107. case 0: engn = NVKM_ENGINE_GR; break;
  108. case 1: engn = NVKM_ENGINE_MSVLD; break;
  109. case 2: engn = NVKM_ENGINE_MSPPP; break;
  110. case 3: engn = NVKM_ENGINE_MSPDEC; break;
  111. case 4: engn = NVKM_ENGINE_CE0; break;
  112. case 5: engn = NVKM_ENGINE_CE1; break;
  113. default:
  114. return NULL;
  115. }
  116. return nvkm_device_engine(device, engn);
  117. }
  118. static void
  119. gf100_fifo_recover_work(struct work_struct *w)
  120. {
  121. struct gf100_fifo *fifo = container_of(w, typeof(*fifo), recover.work);
  122. struct nvkm_device *device = fifo->base.engine.subdev.device;
  123. struct nvkm_engine *engine;
  124. unsigned long flags;
  125. u32 engn, engm = 0;
  126. u64 mask, todo;
  127. spin_lock_irqsave(&fifo->base.lock, flags);
  128. mask = fifo->recover.mask;
  129. fifo->recover.mask = 0ULL;
  130. spin_unlock_irqrestore(&fifo->base.lock, flags);
  131. for (todo = mask; engn = __ffs64(todo), todo; todo &= ~BIT_ULL(engn))
  132. engm |= 1 << gf100_fifo_engidx(fifo, engn);
  133. nvkm_mask(device, 0x002630, engm, engm);
  134. for (todo = mask; engn = __ffs64(todo), todo; todo &= ~BIT_ULL(engn)) {
  135. if ((engine = nvkm_device_engine(device, engn))) {
  136. nvkm_subdev_fini(&engine->subdev, false);
  137. WARN_ON(nvkm_subdev_init(&engine->subdev));
  138. }
  139. }
  140. gf100_fifo_runlist_commit(fifo);
  141. nvkm_wr32(device, 0x00262c, engm);
  142. nvkm_mask(device, 0x002630, engm, 0x00000000);
  143. }
  144. static void
  145. gf100_fifo_recover(struct gf100_fifo *fifo, struct nvkm_engine *engine,
  146. struct gf100_fifo_chan *chan)
  147. {
  148. struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  149. struct nvkm_device *device = subdev->device;
  150. u32 chid = chan->base.chid;
  151. nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
  152. nvkm_subdev_name[engine->subdev.index], chid);
  153. assert_spin_locked(&fifo->base.lock);
  154. nvkm_mask(device, 0x003004 + (chid * 0x08), 0x00000001, 0x00000000);
  155. list_del_init(&chan->head);
  156. chan->killed = true;
  157. if (engine != &fifo->base.engine)
  158. fifo->recover.mask |= 1ULL << engine->subdev.index;
  159. schedule_work(&fifo->recover.work);
  160. }
  161. static const struct nvkm_enum
  162. gf100_fifo_sched_reason[] = {
  163. { 0x0a, "CTXSW_TIMEOUT" },
  164. {}
  165. };
  166. static void
  167. gf100_fifo_intr_sched_ctxsw(struct gf100_fifo *fifo)
  168. {
  169. struct nvkm_device *device = fifo->base.engine.subdev.device;
  170. struct nvkm_engine *engine;
  171. struct gf100_fifo_chan *chan;
  172. unsigned long flags;
  173. u32 engn;
  174. spin_lock_irqsave(&fifo->base.lock, flags);
  175. for (engn = 0; engn < 6; engn++) {
  176. u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04));
  177. u32 busy = (stat & 0x80000000);
  178. u32 save = (stat & 0x00100000); /* maybe? */
  179. u32 unk0 = (stat & 0x00040000);
  180. u32 unk1 = (stat & 0x00001000);
  181. u32 chid = (stat & 0x0000007f);
  182. (void)save;
  183. if (busy && unk0 && unk1) {
  184. list_for_each_entry(chan, &fifo->chan, head) {
  185. if (chan->base.chid == chid) {
  186. engine = gf100_fifo_engine(fifo, engn);
  187. if (!engine)
  188. break;
  189. gf100_fifo_recover(fifo, engine, chan);
  190. break;
  191. }
  192. }
  193. }
  194. }
  195. spin_unlock_irqrestore(&fifo->base.lock, flags);
  196. }
  197. static void
  198. gf100_fifo_intr_sched(struct gf100_fifo *fifo)
  199. {
  200. struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  201. struct nvkm_device *device = subdev->device;
  202. u32 intr = nvkm_rd32(device, 0x00254c);
  203. u32 code = intr & 0x000000ff;
  204. const struct nvkm_enum *en;
  205. en = nvkm_enum_find(gf100_fifo_sched_reason, code);
  206. nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
  207. switch (code) {
  208. case 0x0a:
  209. gf100_fifo_intr_sched_ctxsw(fifo);
  210. break;
  211. default:
  212. break;
  213. }
  214. }
  215. static const struct nvkm_enum
  216. gf100_fifo_fault_engine[] = {
  217. { 0x00, "PGRAPH", NULL, NVKM_ENGINE_GR },
  218. { 0x03, "PEEPHOLE", NULL, NVKM_ENGINE_IFB },
  219. { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
  220. { 0x05, "BAR3", NULL, NVKM_SUBDEV_INSTMEM },
  221. { 0x07, "PFIFO", NULL, NVKM_ENGINE_FIFO },
  222. { 0x10, "PMSVLD", NULL, NVKM_ENGINE_MSVLD },
  223. { 0x11, "PMSPPP", NULL, NVKM_ENGINE_MSPPP },
  224. { 0x13, "PCOUNTER" },
  225. { 0x14, "PMSPDEC", NULL, NVKM_ENGINE_MSPDEC },
  226. { 0x15, "PCE0", NULL, NVKM_ENGINE_CE0 },
  227. { 0x16, "PCE1", NULL, NVKM_ENGINE_CE1 },
  228. { 0x17, "PMU" },
  229. {}
  230. };
  231. static const struct nvkm_enum
  232. gf100_fifo_fault_reason[] = {
  233. { 0x00, "PT_NOT_PRESENT" },
  234. { 0x01, "PT_TOO_SHORT" },
  235. { 0x02, "PAGE_NOT_PRESENT" },
  236. { 0x03, "VM_LIMIT_EXCEEDED" },
  237. { 0x04, "NO_CHANNEL" },
  238. { 0x05, "PAGE_SYSTEM_ONLY" },
  239. { 0x06, "PAGE_READ_ONLY" },
  240. { 0x0a, "COMPRESSED_SYSRAM" },
  241. { 0x0c, "INVALID_STORAGE_TYPE" },
  242. {}
  243. };
  244. static const struct nvkm_enum
  245. gf100_fifo_fault_hubclient[] = {
  246. { 0x01, "PCOPY0" },
  247. { 0x02, "PCOPY1" },
  248. { 0x04, "DISPATCH" },
  249. { 0x05, "CTXCTL" },
  250. { 0x06, "PFIFO" },
  251. { 0x07, "BAR_READ" },
  252. { 0x08, "BAR_WRITE" },
  253. { 0x0b, "PVP" },
  254. { 0x0c, "PMSPPP" },
  255. { 0x0d, "PMSVLD" },
  256. { 0x11, "PCOUNTER" },
  257. { 0x12, "PMU" },
  258. { 0x14, "CCACHE" },
  259. { 0x15, "CCACHE_POST" },
  260. {}
  261. };
  262. static const struct nvkm_enum
  263. gf100_fifo_fault_gpcclient[] = {
  264. { 0x01, "TEX" },
  265. { 0x0c, "ESETUP" },
  266. { 0x0e, "CTXCTL" },
  267. { 0x0f, "PROP" },
  268. {}
  269. };
  270. static void
  271. gf100_fifo_intr_fault(struct gf100_fifo *fifo, int unit)
  272. {
  273. struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  274. struct nvkm_device *device = subdev->device;
  275. u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
  276. u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
  277. u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
  278. u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10));
  279. u32 gpc = (stat & 0x1f000000) >> 24;
  280. u32 client = (stat & 0x00001f00) >> 8;
  281. u32 write = (stat & 0x00000080);
  282. u32 hub = (stat & 0x00000040);
  283. u32 reason = (stat & 0x0000000f);
  284. const struct nvkm_enum *er, *eu, *ec;
  285. struct nvkm_engine *engine = NULL;
  286. struct nvkm_fifo_chan *chan;
  287. unsigned long flags;
  288. char gpcid[8] = "";
  289. er = nvkm_enum_find(gf100_fifo_fault_reason, reason);
  290. eu = nvkm_enum_find(gf100_fifo_fault_engine, unit);
  291. if (hub) {
  292. ec = nvkm_enum_find(gf100_fifo_fault_hubclient, client);
  293. } else {
  294. ec = nvkm_enum_find(gf100_fifo_fault_gpcclient, client);
  295. snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
  296. }
  297. if (eu && eu->data2) {
  298. switch (eu->data2) {
  299. case NVKM_SUBDEV_BAR:
  300. nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
  301. break;
  302. case NVKM_SUBDEV_INSTMEM:
  303. nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
  304. break;
  305. case NVKM_ENGINE_IFB:
  306. nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
  307. break;
  308. default:
  309. engine = nvkm_device_engine(device, eu->data2);
  310. break;
  311. }
  312. }
  313. chan = nvkm_fifo_chan_inst(&fifo->base, (u64)inst << 12, &flags);
  314. nvkm_error(subdev,
  315. "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
  316. "reason %02x [%s] on channel %d [%010llx %s]\n",
  317. write ? "write" : "read", (u64)vahi << 32 | valo,
  318. unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "",
  319. reason, er ? er->name : "", chan ? chan->chid : -1,
  320. (u64)inst << 12,
  321. chan ? chan->object.client->name : "unknown");
  322. if (engine && chan)
  323. gf100_fifo_recover(fifo, engine, (void *)chan);
  324. nvkm_fifo_chan_put(&fifo->base, flags, &chan);
  325. }
  326. static const struct nvkm_bitfield
  327. gf100_fifo_pbdma_intr[] = {
  328. /* { 0x00008000, "" } seen with null ib push */
  329. { 0x00200000, "ILLEGAL_MTHD" },
  330. { 0x00800000, "EMPTY_SUBC" },
  331. {}
  332. };
  333. static void
  334. gf100_fifo_intr_pbdma(struct gf100_fifo *fifo, int unit)
  335. {
  336. struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  337. struct nvkm_device *device = subdev->device;
  338. u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000));
  339. u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
  340. u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
  341. u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0x7f;
  342. u32 subc = (addr & 0x00070000) >> 16;
  343. u32 mthd = (addr & 0x00003ffc);
  344. struct nvkm_fifo_chan *chan;
  345. unsigned long flags;
  346. u32 show= stat;
  347. char msg[128];
  348. if (stat & 0x00800000) {
  349. if (device->sw) {
  350. if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data))
  351. show &= ~0x00800000;
  352. }
  353. }
  354. if (show) {
  355. nvkm_snprintbf(msg, sizeof(msg), gf100_fifo_pbdma_intr, show);
  356. chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
  357. nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] "
  358. "subc %d mthd %04x data %08x\n",
  359. unit, show, msg, chid, chan ? chan->inst->addr : 0,
  360. chan ? chan->object.client->name : "unknown",
  361. subc, mthd, data);
  362. nvkm_fifo_chan_put(&fifo->base, flags, &chan);
  363. }
  364. nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
  365. nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
  366. }
  367. static void
  368. gf100_fifo_intr_runlist(struct gf100_fifo *fifo)
  369. {
  370. struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  371. struct nvkm_device *device = subdev->device;
  372. u32 intr = nvkm_rd32(device, 0x002a00);
  373. if (intr & 0x10000000) {
  374. wake_up(&fifo->runlist.wait);
  375. nvkm_wr32(device, 0x002a00, 0x10000000);
  376. intr &= ~0x10000000;
  377. }
  378. if (intr) {
  379. nvkm_error(subdev, "RUNLIST %08x\n", intr);
  380. nvkm_wr32(device, 0x002a00, intr);
  381. }
  382. }
  383. static void
  384. gf100_fifo_intr_engine_unit(struct gf100_fifo *fifo, int engn)
  385. {
  386. struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  387. struct nvkm_device *device = subdev->device;
  388. u32 intr = nvkm_rd32(device, 0x0025a8 + (engn * 0x04));
  389. u32 inte = nvkm_rd32(device, 0x002628);
  390. u32 unkn;
  391. nvkm_wr32(device, 0x0025a8 + (engn * 0x04), intr);
  392. for (unkn = 0; unkn < 8; unkn++) {
  393. u32 ints = (intr >> (unkn * 0x04)) & inte;
  394. if (ints & 0x1) {
  395. nvkm_fifo_uevent(&fifo->base);
  396. ints &= ~1;
  397. }
  398. if (ints) {
  399. nvkm_error(subdev, "ENGINE %d %d %01x",
  400. engn, unkn, ints);
  401. nvkm_mask(device, 0x002628, ints, 0);
  402. }
  403. }
  404. }
  405. void
  406. gf100_fifo_intr_engine(struct gf100_fifo *fifo)
  407. {
  408. struct nvkm_device *device = fifo->base.engine.subdev.device;
  409. u32 mask = nvkm_rd32(device, 0x0025a4);
  410. while (mask) {
  411. u32 unit = __ffs(mask);
  412. gf100_fifo_intr_engine_unit(fifo, unit);
  413. mask &= ~(1 << unit);
  414. }
  415. }
  416. static void
  417. gf100_fifo_intr(struct nvkm_fifo *base)
  418. {
  419. struct gf100_fifo *fifo = gf100_fifo(base);
  420. struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  421. struct nvkm_device *device = subdev->device;
  422. u32 mask = nvkm_rd32(device, 0x002140);
  423. u32 stat = nvkm_rd32(device, 0x002100) & mask;
  424. if (stat & 0x00000001) {
  425. u32 intr = nvkm_rd32(device, 0x00252c);
  426. nvkm_warn(subdev, "INTR 00000001: %08x\n", intr);
  427. nvkm_wr32(device, 0x002100, 0x00000001);
  428. stat &= ~0x00000001;
  429. }
  430. if (stat & 0x00000100) {
  431. gf100_fifo_intr_sched(fifo);
  432. nvkm_wr32(device, 0x002100, 0x00000100);
  433. stat &= ~0x00000100;
  434. }
  435. if (stat & 0x00010000) {
  436. u32 intr = nvkm_rd32(device, 0x00256c);
  437. nvkm_warn(subdev, "INTR 00010000: %08x\n", intr);
  438. nvkm_wr32(device, 0x002100, 0x00010000);
  439. stat &= ~0x00010000;
  440. }
  441. if (stat & 0x01000000) {
  442. u32 intr = nvkm_rd32(device, 0x00258c);
  443. nvkm_warn(subdev, "INTR 01000000: %08x\n", intr);
  444. nvkm_wr32(device, 0x002100, 0x01000000);
  445. stat &= ~0x01000000;
  446. }
  447. if (stat & 0x10000000) {
  448. u32 mask = nvkm_rd32(device, 0x00259c);
  449. while (mask) {
  450. u32 unit = __ffs(mask);
  451. gf100_fifo_intr_fault(fifo, unit);
  452. nvkm_wr32(device, 0x00259c, (1 << unit));
  453. mask &= ~(1 << unit);
  454. }
  455. stat &= ~0x10000000;
  456. }
  457. if (stat & 0x20000000) {
  458. u32 mask = nvkm_rd32(device, 0x0025a0);
  459. while (mask) {
  460. u32 unit = __ffs(mask);
  461. gf100_fifo_intr_pbdma(fifo, unit);
  462. nvkm_wr32(device, 0x0025a0, (1 << unit));
  463. mask &= ~(1 << unit);
  464. }
  465. stat &= ~0x20000000;
  466. }
  467. if (stat & 0x40000000) {
  468. gf100_fifo_intr_runlist(fifo);
  469. stat &= ~0x40000000;
  470. }
  471. if (stat & 0x80000000) {
  472. gf100_fifo_intr_engine(fifo);
  473. stat &= ~0x80000000;
  474. }
  475. if (stat) {
  476. nvkm_error(subdev, "INTR %08x\n", stat);
  477. nvkm_mask(device, 0x002140, stat, 0x00000000);
  478. nvkm_wr32(device, 0x002100, stat);
  479. }
  480. }
  481. static int
  482. gf100_fifo_oneinit(struct nvkm_fifo *base)
  483. {
  484. struct gf100_fifo *fifo = gf100_fifo(base);
  485. struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  486. struct nvkm_device *device = subdev->device;
  487. int ret;
  488. /* Determine number of PBDMAs by checking valid enable bits. */
  489. nvkm_wr32(device, 0x002204, 0xffffffff);
  490. fifo->pbdma_nr = hweight32(nvkm_rd32(device, 0x002204));
  491. nvkm_debug(subdev, "%d PBDMA(s)\n", fifo->pbdma_nr);
  492. ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
  493. false, &fifo->runlist.mem[0]);
  494. if (ret)
  495. return ret;
  496. ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
  497. false, &fifo->runlist.mem[1]);
  498. if (ret)
  499. return ret;
  500. init_waitqueue_head(&fifo->runlist.wait);
  501. ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 0x1000,
  502. 0x1000, false, &fifo->user.mem);
  503. if (ret)
  504. return ret;
  505. ret = nvkm_bar_umap(device->bar, 128 * 0x1000, 12, &fifo->user.bar);
  506. if (ret)
  507. return ret;
  508. nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0);
  509. return 0;
  510. }
  511. static void
  512. gf100_fifo_fini(struct nvkm_fifo *base)
  513. {
  514. struct gf100_fifo *fifo = gf100_fifo(base);
  515. flush_work(&fifo->recover.work);
  516. }
  517. static void
  518. gf100_fifo_init(struct nvkm_fifo *base)
  519. {
  520. struct gf100_fifo *fifo = gf100_fifo(base);
  521. struct nvkm_device *device = fifo->base.engine.subdev.device;
  522. int i;
  523. /* Enable PBDMAs. */
  524. nvkm_wr32(device, 0x000204, (1 << fifo->pbdma_nr) - 1);
  525. nvkm_wr32(device, 0x002204, (1 << fifo->pbdma_nr) - 1);
  526. /* Assign engines to PBDMAs. */
  527. if (fifo->pbdma_nr >= 3) {
  528. nvkm_wr32(device, 0x002208, ~(1 << 0)); /* PGRAPH */
  529. nvkm_wr32(device, 0x00220c, ~(1 << 1)); /* PVP */
  530. nvkm_wr32(device, 0x002210, ~(1 << 1)); /* PMSPP */
  531. nvkm_wr32(device, 0x002214, ~(1 << 1)); /* PMSVLD */
  532. nvkm_wr32(device, 0x002218, ~(1 << 2)); /* PCE0 */
  533. nvkm_wr32(device, 0x00221c, ~(1 << 1)); /* PCE1 */
  534. }
  535. /* PBDMA[n] */
  536. for (i = 0; i < fifo->pbdma_nr; i++) {
  537. nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
  538. nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
  539. nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
  540. }
  541. nvkm_mask(device, 0x002200, 0x00000001, 0x00000001);
  542. nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
  543. nvkm_wr32(device, 0x002100, 0xffffffff);
  544. nvkm_wr32(device, 0x002140, 0x7fffffff);
  545. nvkm_wr32(device, 0x002628, 0x00000001); /* ENGINE_INTR_EN */
  546. }
  547. static void *
  548. gf100_fifo_dtor(struct nvkm_fifo *base)
  549. {
  550. struct gf100_fifo *fifo = gf100_fifo(base);
  551. nvkm_vm_put(&fifo->user.bar);
  552. nvkm_memory_del(&fifo->user.mem);
  553. nvkm_memory_del(&fifo->runlist.mem[0]);
  554. nvkm_memory_del(&fifo->runlist.mem[1]);
  555. return fifo;
  556. }
  557. static const struct nvkm_fifo_func
  558. gf100_fifo = {
  559. .dtor = gf100_fifo_dtor,
  560. .oneinit = gf100_fifo_oneinit,
  561. .init = gf100_fifo_init,
  562. .fini = gf100_fifo_fini,
  563. .intr = gf100_fifo_intr,
  564. .uevent_init = gf100_fifo_uevent_init,
  565. .uevent_fini = gf100_fifo_uevent_fini,
  566. .chan = {
  567. &gf100_fifo_gpfifo_oclass,
  568. NULL
  569. },
  570. };
  571. int
  572. gf100_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
  573. {
  574. struct gf100_fifo *fifo;
  575. if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
  576. return -ENOMEM;
  577. INIT_LIST_HEAD(&fifo->chan);
  578. INIT_WORK(&fifo->recover.work, gf100_fifo_recover_work);
  579. *pfifo = &fifo->base;
  580. return nvkm_fifo_ctor(&gf100_fifo, device, index, 128, &fifo->base);
  581. }