gf100.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676
  1. /*
  2. * Copyright 2012 Red Hat Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Ben Skeggs
  23. */
  24. #include "gf100.h"
  25. #include "changf100.h"
  26. #include <core/client.h>
  27. #include <core/enum.h>
  28. #include <core/gpuobj.h>
  29. #include <subdev/bar.h>
  30. #include <engine/sw.h>
  31. #include <nvif/class.h>
  32. static void
  33. gf100_fifo_uevent_init(struct nvkm_fifo *fifo)
  34. {
  35. struct nvkm_device *device = fifo->engine.subdev.device;
  36. nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
  37. }
  38. static void
  39. gf100_fifo_uevent_fini(struct nvkm_fifo *fifo)
  40. {
  41. struct nvkm_device *device = fifo->engine.subdev.device;
  42. nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
  43. }
  44. void
  45. gf100_fifo_runlist_commit(struct gf100_fifo *fifo)
  46. {
  47. struct gf100_fifo_chan *chan;
  48. struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  49. struct nvkm_device *device = subdev->device;
  50. struct nvkm_memory *cur;
  51. int nr = 0;
  52. int target;
  53. mutex_lock(&subdev->mutex);
  54. cur = fifo->runlist.mem[fifo->runlist.active];
  55. fifo->runlist.active = !fifo->runlist.active;
  56. nvkm_kmap(cur);
  57. list_for_each_entry(chan, &fifo->chan, head) {
  58. nvkm_wo32(cur, (nr * 8) + 0, chan->base.chid);
  59. nvkm_wo32(cur, (nr * 8) + 4, 0x00000004);
  60. nr++;
  61. }
  62. nvkm_done(cur);
  63. switch (nvkm_memory_target(cur)) {
  64. case NVKM_MEM_TARGET_VRAM: target = 0; break;
  65. case NVKM_MEM_TARGET_NCOH: target = 3; break;
  66. default:
  67. mutex_unlock(&subdev->mutex);
  68. WARN_ON(1);
  69. return;
  70. }
  71. nvkm_wr32(device, 0x002270, (nvkm_memory_addr(cur) >> 12) |
  72. (target << 28));
  73. nvkm_wr32(device, 0x002274, 0x01f00000 | nr);
  74. if (wait_event_timeout(fifo->runlist.wait,
  75. !(nvkm_rd32(device, 0x00227c) & 0x00100000),
  76. msecs_to_jiffies(2000)) == 0)
  77. nvkm_error(subdev, "runlist update timeout\n");
  78. mutex_unlock(&subdev->mutex);
  79. }
  80. void
  81. gf100_fifo_runlist_remove(struct gf100_fifo *fifo, struct gf100_fifo_chan *chan)
  82. {
  83. mutex_lock(&fifo->base.engine.subdev.mutex);
  84. list_del_init(&chan->head);
  85. mutex_unlock(&fifo->base.engine.subdev.mutex);
  86. }
  87. void
  88. gf100_fifo_runlist_insert(struct gf100_fifo *fifo, struct gf100_fifo_chan *chan)
  89. {
  90. mutex_lock(&fifo->base.engine.subdev.mutex);
  91. list_add_tail(&chan->head, &fifo->chan);
  92. mutex_unlock(&fifo->base.engine.subdev.mutex);
  93. }
  94. static inline int
  95. gf100_fifo_engidx(struct gf100_fifo *fifo, u32 engn)
  96. {
  97. switch (engn) {
  98. case NVKM_ENGINE_GR : engn = 0; break;
  99. case NVKM_ENGINE_MSVLD : engn = 1; break;
  100. case NVKM_ENGINE_MSPPP : engn = 2; break;
  101. case NVKM_ENGINE_MSPDEC: engn = 3; break;
  102. case NVKM_ENGINE_CE0 : engn = 4; break;
  103. case NVKM_ENGINE_CE1 : engn = 5; break;
  104. default:
  105. return -1;
  106. }
  107. return engn;
  108. }
  109. static inline struct nvkm_engine *
  110. gf100_fifo_engine(struct gf100_fifo *fifo, u32 engn)
  111. {
  112. struct nvkm_device *device = fifo->base.engine.subdev.device;
  113. switch (engn) {
  114. case 0: engn = NVKM_ENGINE_GR; break;
  115. case 1: engn = NVKM_ENGINE_MSVLD; break;
  116. case 2: engn = NVKM_ENGINE_MSPPP; break;
  117. case 3: engn = NVKM_ENGINE_MSPDEC; break;
  118. case 4: engn = NVKM_ENGINE_CE0; break;
  119. case 5: engn = NVKM_ENGINE_CE1; break;
  120. default:
  121. return NULL;
  122. }
  123. return nvkm_device_engine(device, engn);
  124. }
  125. static void
  126. gf100_fifo_recover_work(struct work_struct *w)
  127. {
  128. struct gf100_fifo *fifo = container_of(w, typeof(*fifo), recover.work);
  129. struct nvkm_device *device = fifo->base.engine.subdev.device;
  130. struct nvkm_engine *engine;
  131. unsigned long flags;
  132. u32 engn, engm = 0;
  133. u64 mask, todo;
  134. spin_lock_irqsave(&fifo->base.lock, flags);
  135. mask = fifo->recover.mask;
  136. fifo->recover.mask = 0ULL;
  137. spin_unlock_irqrestore(&fifo->base.lock, flags);
  138. for (todo = mask; engn = __ffs64(todo), todo; todo &= ~BIT_ULL(engn))
  139. engm |= 1 << gf100_fifo_engidx(fifo, engn);
  140. nvkm_mask(device, 0x002630, engm, engm);
  141. for (todo = mask; engn = __ffs64(todo), todo; todo &= ~BIT_ULL(engn)) {
  142. if ((engine = nvkm_device_engine(device, engn))) {
  143. nvkm_subdev_fini(&engine->subdev, false);
  144. WARN_ON(nvkm_subdev_init(&engine->subdev));
  145. }
  146. }
  147. gf100_fifo_runlist_commit(fifo);
  148. nvkm_wr32(device, 0x00262c, engm);
  149. nvkm_mask(device, 0x002630, engm, 0x00000000);
  150. }
  151. static void
  152. gf100_fifo_recover(struct gf100_fifo *fifo, struct nvkm_engine *engine,
  153. struct gf100_fifo_chan *chan)
  154. {
  155. struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  156. struct nvkm_device *device = subdev->device;
  157. u32 chid = chan->base.chid;
  158. nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
  159. nvkm_subdev_name[engine->subdev.index], chid);
  160. assert_spin_locked(&fifo->base.lock);
  161. nvkm_mask(device, 0x003004 + (chid * 0x08), 0x00000001, 0x00000000);
  162. list_del_init(&chan->head);
  163. chan->killed = true;
  164. if (engine != &fifo->base.engine)
  165. fifo->recover.mask |= 1ULL << engine->subdev.index;
  166. schedule_work(&fifo->recover.work);
  167. nvkm_fifo_kevent(&fifo->base, chid);
  168. }
  169. static const struct nvkm_enum
  170. gf100_fifo_sched_reason[] = {
  171. { 0x0a, "CTXSW_TIMEOUT" },
  172. {}
  173. };
  174. static void
  175. gf100_fifo_intr_sched_ctxsw(struct gf100_fifo *fifo)
  176. {
  177. struct nvkm_device *device = fifo->base.engine.subdev.device;
  178. struct nvkm_engine *engine;
  179. struct gf100_fifo_chan *chan;
  180. unsigned long flags;
  181. u32 engn;
  182. spin_lock_irqsave(&fifo->base.lock, flags);
  183. for (engn = 0; engn < 6; engn++) {
  184. u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04));
  185. u32 busy = (stat & 0x80000000);
  186. u32 save = (stat & 0x00100000); /* maybe? */
  187. u32 unk0 = (stat & 0x00040000);
  188. u32 unk1 = (stat & 0x00001000);
  189. u32 chid = (stat & 0x0000007f);
  190. (void)save;
  191. if (busy && unk0 && unk1) {
  192. list_for_each_entry(chan, &fifo->chan, head) {
  193. if (chan->base.chid == chid) {
  194. engine = gf100_fifo_engine(fifo, engn);
  195. if (!engine)
  196. break;
  197. gf100_fifo_recover(fifo, engine, chan);
  198. break;
  199. }
  200. }
  201. }
  202. }
  203. spin_unlock_irqrestore(&fifo->base.lock, flags);
  204. }
  205. static void
  206. gf100_fifo_intr_sched(struct gf100_fifo *fifo)
  207. {
  208. struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  209. struct nvkm_device *device = subdev->device;
  210. u32 intr = nvkm_rd32(device, 0x00254c);
  211. u32 code = intr & 0x000000ff;
  212. const struct nvkm_enum *en;
  213. en = nvkm_enum_find(gf100_fifo_sched_reason, code);
  214. nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
  215. switch (code) {
  216. case 0x0a:
  217. gf100_fifo_intr_sched_ctxsw(fifo);
  218. break;
  219. default:
  220. break;
  221. }
  222. }
  223. static const struct nvkm_enum
  224. gf100_fifo_fault_engine[] = {
  225. { 0x00, "PGRAPH", NULL, NVKM_ENGINE_GR },
  226. { 0x03, "PEEPHOLE", NULL, NVKM_ENGINE_IFB },
  227. { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
  228. { 0x05, "BAR3", NULL, NVKM_SUBDEV_INSTMEM },
  229. { 0x07, "PFIFO", NULL, NVKM_ENGINE_FIFO },
  230. { 0x10, "PMSVLD", NULL, NVKM_ENGINE_MSVLD },
  231. { 0x11, "PMSPPP", NULL, NVKM_ENGINE_MSPPP },
  232. { 0x13, "PCOUNTER" },
  233. { 0x14, "PMSPDEC", NULL, NVKM_ENGINE_MSPDEC },
  234. { 0x15, "PCE0", NULL, NVKM_ENGINE_CE0 },
  235. { 0x16, "PCE1", NULL, NVKM_ENGINE_CE1 },
  236. { 0x17, "PMU" },
  237. {}
  238. };
  239. static const struct nvkm_enum
  240. gf100_fifo_fault_reason[] = {
  241. { 0x00, "PT_NOT_PRESENT" },
  242. { 0x01, "PT_TOO_SHORT" },
  243. { 0x02, "PAGE_NOT_PRESENT" },
  244. { 0x03, "VM_LIMIT_EXCEEDED" },
  245. { 0x04, "NO_CHANNEL" },
  246. { 0x05, "PAGE_SYSTEM_ONLY" },
  247. { 0x06, "PAGE_READ_ONLY" },
  248. { 0x0a, "COMPRESSED_SYSRAM" },
  249. { 0x0c, "INVALID_STORAGE_TYPE" },
  250. {}
  251. };
  252. static const struct nvkm_enum
  253. gf100_fifo_fault_hubclient[] = {
  254. { 0x01, "PCOPY0" },
  255. { 0x02, "PCOPY1" },
  256. { 0x04, "DISPATCH" },
  257. { 0x05, "CTXCTL" },
  258. { 0x06, "PFIFO" },
  259. { 0x07, "BAR_READ" },
  260. { 0x08, "BAR_WRITE" },
  261. { 0x0b, "PVP" },
  262. { 0x0c, "PMSPPP" },
  263. { 0x0d, "PMSVLD" },
  264. { 0x11, "PCOUNTER" },
  265. { 0x12, "PMU" },
  266. { 0x14, "CCACHE" },
  267. { 0x15, "CCACHE_POST" },
  268. {}
  269. };
  270. static const struct nvkm_enum
  271. gf100_fifo_fault_gpcclient[] = {
  272. { 0x01, "TEX" },
  273. { 0x0c, "ESETUP" },
  274. { 0x0e, "CTXCTL" },
  275. { 0x0f, "PROP" },
  276. {}
  277. };
  278. static void
  279. gf100_fifo_intr_fault(struct gf100_fifo *fifo, int unit)
  280. {
  281. struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  282. struct nvkm_device *device = subdev->device;
  283. u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
  284. u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
  285. u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
  286. u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10));
  287. u32 gpc = (stat & 0x1f000000) >> 24;
  288. u32 client = (stat & 0x00001f00) >> 8;
  289. u32 write = (stat & 0x00000080);
  290. u32 hub = (stat & 0x00000040);
  291. u32 reason = (stat & 0x0000000f);
  292. const struct nvkm_enum *er, *eu, *ec;
  293. struct nvkm_engine *engine = NULL;
  294. struct nvkm_fifo_chan *chan;
  295. unsigned long flags;
  296. char gpcid[8] = "";
  297. er = nvkm_enum_find(gf100_fifo_fault_reason, reason);
  298. eu = nvkm_enum_find(gf100_fifo_fault_engine, unit);
  299. if (hub) {
  300. ec = nvkm_enum_find(gf100_fifo_fault_hubclient, client);
  301. } else {
  302. ec = nvkm_enum_find(gf100_fifo_fault_gpcclient, client);
  303. snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
  304. }
  305. if (eu && eu->data2) {
  306. switch (eu->data2) {
  307. case NVKM_SUBDEV_BAR:
  308. nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
  309. break;
  310. case NVKM_SUBDEV_INSTMEM:
  311. nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
  312. break;
  313. case NVKM_ENGINE_IFB:
  314. nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
  315. break;
  316. default:
  317. engine = nvkm_device_engine(device, eu->data2);
  318. break;
  319. }
  320. }
  321. chan = nvkm_fifo_chan_inst(&fifo->base, (u64)inst << 12, &flags);
  322. nvkm_error(subdev,
  323. "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
  324. "reason %02x [%s] on channel %d [%010llx %s]\n",
  325. write ? "write" : "read", (u64)vahi << 32 | valo,
  326. unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "",
  327. reason, er ? er->name : "", chan ? chan->chid : -1,
  328. (u64)inst << 12,
  329. chan ? chan->object.client->name : "unknown");
  330. if (engine && chan)
  331. gf100_fifo_recover(fifo, engine, (void *)chan);
  332. nvkm_fifo_chan_put(&fifo->base, flags, &chan);
  333. }
  334. static const struct nvkm_bitfield
  335. gf100_fifo_pbdma_intr[] = {
  336. /* { 0x00008000, "" } seen with null ib push */
  337. { 0x00200000, "ILLEGAL_MTHD" },
  338. { 0x00800000, "EMPTY_SUBC" },
  339. {}
  340. };
  341. static void
  342. gf100_fifo_intr_pbdma(struct gf100_fifo *fifo, int unit)
  343. {
  344. struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  345. struct nvkm_device *device = subdev->device;
  346. u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000));
  347. u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
  348. u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
  349. u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0x7f;
  350. u32 subc = (addr & 0x00070000) >> 16;
  351. u32 mthd = (addr & 0x00003ffc);
  352. struct nvkm_fifo_chan *chan;
  353. unsigned long flags;
  354. u32 show= stat;
  355. char msg[128];
  356. if (stat & 0x00800000) {
  357. if (device->sw) {
  358. if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data))
  359. show &= ~0x00800000;
  360. }
  361. }
  362. if (show) {
  363. nvkm_snprintbf(msg, sizeof(msg), gf100_fifo_pbdma_intr, show);
  364. chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
  365. nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] "
  366. "subc %d mthd %04x data %08x\n",
  367. unit, show, msg, chid, chan ? chan->inst->addr : 0,
  368. chan ? chan->object.client->name : "unknown",
  369. subc, mthd, data);
  370. nvkm_fifo_chan_put(&fifo->base, flags, &chan);
  371. }
  372. nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
  373. nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
  374. }
  375. static void
  376. gf100_fifo_intr_runlist(struct gf100_fifo *fifo)
  377. {
  378. struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  379. struct nvkm_device *device = subdev->device;
  380. u32 intr = nvkm_rd32(device, 0x002a00);
  381. if (intr & 0x10000000) {
  382. wake_up(&fifo->runlist.wait);
  383. nvkm_wr32(device, 0x002a00, 0x10000000);
  384. intr &= ~0x10000000;
  385. }
  386. if (intr) {
  387. nvkm_error(subdev, "RUNLIST %08x\n", intr);
  388. nvkm_wr32(device, 0x002a00, intr);
  389. }
  390. }
  391. static void
  392. gf100_fifo_intr_engine_unit(struct gf100_fifo *fifo, int engn)
  393. {
  394. struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  395. struct nvkm_device *device = subdev->device;
  396. u32 intr = nvkm_rd32(device, 0x0025a8 + (engn * 0x04));
  397. u32 inte = nvkm_rd32(device, 0x002628);
  398. u32 unkn;
  399. nvkm_wr32(device, 0x0025a8 + (engn * 0x04), intr);
  400. for (unkn = 0; unkn < 8; unkn++) {
  401. u32 ints = (intr >> (unkn * 0x04)) & inte;
  402. if (ints & 0x1) {
  403. nvkm_fifo_uevent(&fifo->base);
  404. ints &= ~1;
  405. }
  406. if (ints) {
  407. nvkm_error(subdev, "ENGINE %d %d %01x",
  408. engn, unkn, ints);
  409. nvkm_mask(device, 0x002628, ints, 0);
  410. }
  411. }
  412. }
  413. void
  414. gf100_fifo_intr_engine(struct gf100_fifo *fifo)
  415. {
  416. struct nvkm_device *device = fifo->base.engine.subdev.device;
  417. u32 mask = nvkm_rd32(device, 0x0025a4);
  418. while (mask) {
  419. u32 unit = __ffs(mask);
  420. gf100_fifo_intr_engine_unit(fifo, unit);
  421. mask &= ~(1 << unit);
  422. }
  423. }
  424. static void
  425. gf100_fifo_intr(struct nvkm_fifo *base)
  426. {
  427. struct gf100_fifo *fifo = gf100_fifo(base);
  428. struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  429. struct nvkm_device *device = subdev->device;
  430. u32 mask = nvkm_rd32(device, 0x002140);
  431. u32 stat = nvkm_rd32(device, 0x002100) & mask;
  432. if (stat & 0x00000001) {
  433. u32 intr = nvkm_rd32(device, 0x00252c);
  434. nvkm_warn(subdev, "INTR 00000001: %08x\n", intr);
  435. nvkm_wr32(device, 0x002100, 0x00000001);
  436. stat &= ~0x00000001;
  437. }
  438. if (stat & 0x00000100) {
  439. gf100_fifo_intr_sched(fifo);
  440. nvkm_wr32(device, 0x002100, 0x00000100);
  441. stat &= ~0x00000100;
  442. }
  443. if (stat & 0x00010000) {
  444. u32 intr = nvkm_rd32(device, 0x00256c);
  445. nvkm_warn(subdev, "INTR 00010000: %08x\n", intr);
  446. nvkm_wr32(device, 0x002100, 0x00010000);
  447. stat &= ~0x00010000;
  448. }
  449. if (stat & 0x01000000) {
  450. u32 intr = nvkm_rd32(device, 0x00258c);
  451. nvkm_warn(subdev, "INTR 01000000: %08x\n", intr);
  452. nvkm_wr32(device, 0x002100, 0x01000000);
  453. stat &= ~0x01000000;
  454. }
  455. if (stat & 0x10000000) {
  456. u32 mask = nvkm_rd32(device, 0x00259c);
  457. while (mask) {
  458. u32 unit = __ffs(mask);
  459. gf100_fifo_intr_fault(fifo, unit);
  460. nvkm_wr32(device, 0x00259c, (1 << unit));
  461. mask &= ~(1 << unit);
  462. }
  463. stat &= ~0x10000000;
  464. }
  465. if (stat & 0x20000000) {
  466. u32 mask = nvkm_rd32(device, 0x0025a0);
  467. while (mask) {
  468. u32 unit = __ffs(mask);
  469. gf100_fifo_intr_pbdma(fifo, unit);
  470. nvkm_wr32(device, 0x0025a0, (1 << unit));
  471. mask &= ~(1 << unit);
  472. }
  473. stat &= ~0x20000000;
  474. }
  475. if (stat & 0x40000000) {
  476. gf100_fifo_intr_runlist(fifo);
  477. stat &= ~0x40000000;
  478. }
  479. if (stat & 0x80000000) {
  480. gf100_fifo_intr_engine(fifo);
  481. stat &= ~0x80000000;
  482. }
  483. if (stat) {
  484. nvkm_error(subdev, "INTR %08x\n", stat);
  485. nvkm_mask(device, 0x002140, stat, 0x00000000);
  486. nvkm_wr32(device, 0x002100, stat);
  487. }
  488. }
  489. static int
  490. gf100_fifo_oneinit(struct nvkm_fifo *base)
  491. {
  492. struct gf100_fifo *fifo = gf100_fifo(base);
  493. struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  494. struct nvkm_device *device = subdev->device;
  495. int ret;
  496. /* Determine number of PBDMAs by checking valid enable bits. */
  497. nvkm_wr32(device, 0x002204, 0xffffffff);
  498. fifo->pbdma_nr = hweight32(nvkm_rd32(device, 0x002204));
  499. nvkm_debug(subdev, "%d PBDMA(s)\n", fifo->pbdma_nr);
  500. ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
  501. false, &fifo->runlist.mem[0]);
  502. if (ret)
  503. return ret;
  504. ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
  505. false, &fifo->runlist.mem[1]);
  506. if (ret)
  507. return ret;
  508. init_waitqueue_head(&fifo->runlist.wait);
  509. ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 0x1000,
  510. 0x1000, false, &fifo->user.mem);
  511. if (ret)
  512. return ret;
  513. ret = nvkm_bar_umap(device->bar, 128 * 0x1000, 12, &fifo->user.bar);
  514. if (ret)
  515. return ret;
  516. nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0);
  517. return 0;
  518. }
  519. static void
  520. gf100_fifo_fini(struct nvkm_fifo *base)
  521. {
  522. struct gf100_fifo *fifo = gf100_fifo(base);
  523. flush_work(&fifo->recover.work);
  524. }
  525. static void
  526. gf100_fifo_init(struct nvkm_fifo *base)
  527. {
  528. struct gf100_fifo *fifo = gf100_fifo(base);
  529. struct nvkm_device *device = fifo->base.engine.subdev.device;
  530. int i;
  531. /* Enable PBDMAs. */
  532. nvkm_wr32(device, 0x000204, (1 << fifo->pbdma_nr) - 1);
  533. nvkm_wr32(device, 0x002204, (1 << fifo->pbdma_nr) - 1);
  534. /* Assign engines to PBDMAs. */
  535. if (fifo->pbdma_nr >= 3) {
  536. nvkm_wr32(device, 0x002208, ~(1 << 0)); /* PGRAPH */
  537. nvkm_wr32(device, 0x00220c, ~(1 << 1)); /* PVP */
  538. nvkm_wr32(device, 0x002210, ~(1 << 1)); /* PMSPP */
  539. nvkm_wr32(device, 0x002214, ~(1 << 1)); /* PMSVLD */
  540. nvkm_wr32(device, 0x002218, ~(1 << 2)); /* PCE0 */
  541. nvkm_wr32(device, 0x00221c, ~(1 << 1)); /* PCE1 */
  542. }
  543. /* PBDMA[n] */
  544. for (i = 0; i < fifo->pbdma_nr; i++) {
  545. nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
  546. nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
  547. nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
  548. }
  549. nvkm_mask(device, 0x002200, 0x00000001, 0x00000001);
  550. nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
  551. nvkm_wr32(device, 0x002100, 0xffffffff);
  552. nvkm_wr32(device, 0x002140, 0x7fffffff);
  553. nvkm_wr32(device, 0x002628, 0x00000001); /* ENGINE_INTR_EN */
  554. }
  555. static void *
  556. gf100_fifo_dtor(struct nvkm_fifo *base)
  557. {
  558. struct gf100_fifo *fifo = gf100_fifo(base);
  559. nvkm_vm_put(&fifo->user.bar);
  560. nvkm_memory_del(&fifo->user.mem);
  561. nvkm_memory_del(&fifo->runlist.mem[0]);
  562. nvkm_memory_del(&fifo->runlist.mem[1]);
  563. return fifo;
  564. }
  565. static const struct nvkm_fifo_func
  566. gf100_fifo = {
  567. .dtor = gf100_fifo_dtor,
  568. .oneinit = gf100_fifo_oneinit,
  569. .init = gf100_fifo_init,
  570. .fini = gf100_fifo_fini,
  571. .intr = gf100_fifo_intr,
  572. .uevent_init = gf100_fifo_uevent_init,
  573. .uevent_fini = gf100_fifo_uevent_fini,
  574. .chan = {
  575. &gf100_fifo_gpfifo_oclass,
  576. NULL
  577. },
  578. };
  579. int
  580. gf100_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
  581. {
  582. struct gf100_fifo *fifo;
  583. if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
  584. return -ENOMEM;
  585. INIT_LIST_HEAD(&fifo->chan);
  586. INIT_WORK(&fifo->recover.work, gf100_fifo_recover_work);
  587. *pfifo = &fifo->base;
  588. return nvkm_fifo_ctor(&gf100_fifo, device, index, 128, &fifo->base);
  589. }