gk104.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050
  1. /*
  2. * Copyright 2012 Red Hat Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Ben Skeggs
  23. */
  24. #include "gk104.h"
  25. #include "changk104.h"
  26. #include <core/client.h>
  27. #include <core/gpuobj.h>
  28. #include <subdev/bar.h>
  29. #include <subdev/timer.h>
  30. #include <subdev/top.h>
  31. #include <engine/sw.h>
  32. #include <nvif/class.h>
  33. struct gk104_fifo_engine_status {
  34. bool busy;
  35. bool faulted;
  36. bool chsw;
  37. bool save;
  38. bool load;
  39. struct {
  40. bool tsg;
  41. u32 id;
  42. } prev, next, *chan;
  43. };
  44. static void
  45. gk104_fifo_engine_status(struct gk104_fifo *fifo, int engn,
  46. struct gk104_fifo_engine_status *status)
  47. {
  48. struct nvkm_engine *engine = fifo->engine[engn].engine;
  49. struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  50. struct nvkm_device *device = subdev->device;
  51. u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x08));
  52. status->busy = !!(stat & 0x80000000);
  53. status->faulted = !!(stat & 0x40000000);
  54. status->next.tsg = !!(stat & 0x10000000);
  55. status->next.id = (stat & 0x0fff0000) >> 16;
  56. status->chsw = !!(stat & 0x00008000);
  57. status->save = !!(stat & 0x00004000);
  58. status->load = !!(stat & 0x00002000);
  59. status->prev.tsg = !!(stat & 0x00001000);
  60. status->prev.id = (stat & 0x00000fff);
  61. status->chan = NULL;
  62. if (status->busy && status->chsw) {
  63. if (status->load && status->save) {
  64. if (engine && nvkm_engine_chsw_load(engine))
  65. status->chan = &status->next;
  66. else
  67. status->chan = &status->prev;
  68. } else
  69. if (status->load) {
  70. status->chan = &status->next;
  71. } else {
  72. status->chan = &status->prev;
  73. }
  74. } else
  75. if (status->load) {
  76. status->chan = &status->prev;
  77. }
  78. nvkm_debug(subdev, "engine %02d: busy %d faulted %d chsw %d "
  79. "save %d load %d %sid %d%s-> %sid %d%s\n",
  80. engn, status->busy, status->faulted,
  81. status->chsw, status->save, status->load,
  82. status->prev.tsg ? "tsg" : "ch", status->prev.id,
  83. status->chan == &status->prev ? "*" : " ",
  84. status->next.tsg ? "tsg" : "ch", status->next.id,
  85. status->chan == &status->next ? "*" : " ");
  86. }
  87. static int
  88. gk104_fifo_class_get(struct nvkm_fifo *base, int index,
  89. const struct nvkm_fifo_chan_oclass **psclass)
  90. {
  91. struct gk104_fifo *fifo = gk104_fifo(base);
  92. int c = 0;
  93. while ((*psclass = fifo->func->chan[c])) {
  94. if (c++ == index)
  95. return 0;
  96. }
  97. return c;
  98. }
  99. static void
  100. gk104_fifo_uevent_fini(struct nvkm_fifo *fifo)
  101. {
  102. struct nvkm_device *device = fifo->engine.subdev.device;
  103. nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
  104. }
  105. static void
  106. gk104_fifo_uevent_init(struct nvkm_fifo *fifo)
  107. {
  108. struct nvkm_device *device = fifo->engine.subdev.device;
  109. nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
  110. }
  111. void
  112. gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl)
  113. {
  114. struct gk104_fifo_chan *chan;
  115. struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  116. struct nvkm_device *device = subdev->device;
  117. struct nvkm_memory *mem;
  118. int nr = 0;
  119. int target;
  120. mutex_lock(&subdev->mutex);
  121. mem = fifo->runlist[runl].mem[fifo->runlist[runl].next];
  122. fifo->runlist[runl].next = !fifo->runlist[runl].next;
  123. nvkm_kmap(mem);
  124. list_for_each_entry(chan, &fifo->runlist[runl].chan, head) {
  125. nvkm_wo32(mem, (nr * 8) + 0, chan->base.chid);
  126. nvkm_wo32(mem, (nr * 8) + 4, 0x00000000);
  127. nr++;
  128. }
  129. nvkm_done(mem);
  130. switch (nvkm_memory_target(mem)) {
  131. case NVKM_MEM_TARGET_VRAM: target = 0; break;
  132. case NVKM_MEM_TARGET_NCOH: target = 3; break;
  133. default:
  134. WARN_ON(1);
  135. return;
  136. }
  137. nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) |
  138. (target << 28));
  139. nvkm_wr32(device, 0x002274, (runl << 20) | nr);
  140. if (wait_event_timeout(fifo->runlist[runl].wait,
  141. !(nvkm_rd32(device, 0x002284 + (runl * 0x08))
  142. & 0x00100000),
  143. msecs_to_jiffies(2000)) == 0)
  144. nvkm_error(subdev, "runlist %d update timeout\n", runl);
  145. mutex_unlock(&subdev->mutex);
  146. }
  147. void
  148. gk104_fifo_runlist_remove(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan)
  149. {
  150. mutex_lock(&fifo->base.engine.subdev.mutex);
  151. list_del_init(&chan->head);
  152. mutex_unlock(&fifo->base.engine.subdev.mutex);
  153. }
  154. void
  155. gk104_fifo_runlist_insert(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan)
  156. {
  157. mutex_lock(&fifo->base.engine.subdev.mutex);
  158. list_add_tail(&chan->head, &fifo->runlist[chan->runl].chan);
  159. mutex_unlock(&fifo->base.engine.subdev.mutex);
  160. }
  161. static void
  162. gk104_fifo_recover_work(struct work_struct *w)
  163. {
  164. struct gk104_fifo *fifo = container_of(w, typeof(*fifo), recover.work);
  165. struct nvkm_device *device = fifo->base.engine.subdev.device;
  166. struct nvkm_engine *engine;
  167. unsigned long flags;
  168. u32 engm, runm, todo;
  169. int engn, runl;
  170. spin_lock_irqsave(&fifo->base.lock, flags);
  171. runm = fifo->recover.runm;
  172. engm = fifo->recover.engm;
  173. fifo->recover.engm = 0;
  174. fifo->recover.runm = 0;
  175. spin_unlock_irqrestore(&fifo->base.lock, flags);
  176. nvkm_mask(device, 0x002630, runm, runm);
  177. for (todo = engm; engn = __ffs(todo), todo; todo &= ~BIT(engn)) {
  178. if ((engine = fifo->engine[engn].engine)) {
  179. nvkm_subdev_fini(&engine->subdev, false);
  180. WARN_ON(nvkm_subdev_init(&engine->subdev));
  181. }
  182. }
  183. for (todo = runm; runl = __ffs(todo), todo; todo &= ~BIT(runl))
  184. gk104_fifo_runlist_commit(fifo, runl);
  185. nvkm_wr32(device, 0x00262c, runm);
  186. nvkm_mask(device, 0x002630, runm, 0x00000000);
  187. }
  188. static void gk104_fifo_recover_engn(struct gk104_fifo *fifo, int engn);
  189. static void
  190. gk104_fifo_recover_runl(struct gk104_fifo *fifo, int runl)
  191. {
  192. struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  193. struct nvkm_device *device = subdev->device;
  194. const u32 runm = BIT(runl);
  195. assert_spin_locked(&fifo->base.lock);
  196. if (fifo->recover.runm & runm)
  197. return;
  198. fifo->recover.runm |= runm;
  199. /* Block runlist to prevent channel assignment(s) from changing. */
  200. nvkm_mask(device, 0x002630, runm, runm);
  201. /* Schedule recovery. */
  202. nvkm_warn(subdev, "runlist %d: scheduled for recovery\n", runl);
  203. schedule_work(&fifo->recover.work);
  204. }
  205. static void
  206. gk104_fifo_recover_chan(struct nvkm_fifo *base, int chid)
  207. {
  208. struct gk104_fifo *fifo = gk104_fifo(base);
  209. struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  210. struct nvkm_device *device = subdev->device;
  211. const u32 stat = nvkm_rd32(device, 0x800004 + (chid * 0x08));
  212. const u32 runl = (stat & 0x000f0000) >> 16;
  213. const bool used = (stat & 0x00000001);
  214. unsigned long engn, engm = fifo->runlist[runl].engm;
  215. struct gk104_fifo_chan *chan;
  216. assert_spin_locked(&fifo->base.lock);
  217. if (!used)
  218. return;
  219. /* Lookup SW state for channel, and mark it as dead. */
  220. list_for_each_entry(chan, &fifo->runlist[runl].chan, head) {
  221. if (chan->base.chid == chid) {
  222. list_del_init(&chan->head);
  223. chan->killed = true;
  224. nvkm_fifo_kevent(&fifo->base, chid);
  225. break;
  226. }
  227. }
  228. /* Disable channel. */
  229. nvkm_wr32(device, 0x800004 + (chid * 0x08), stat | 0x00000800);
  230. nvkm_warn(subdev, "channel %d: killed\n", chid);
  231. /* Block channel assignments from changing during recovery. */
  232. gk104_fifo_recover_runl(fifo, runl);
  233. /* Schedule recovery for any engines the channel is on. */
  234. for_each_set_bit(engn, &engm, fifo->engine_nr) {
  235. struct gk104_fifo_engine_status status;
  236. gk104_fifo_engine_status(fifo, engn, &status);
  237. if (!status.chan || status.chan->id != chid)
  238. continue;
  239. gk104_fifo_recover_engn(fifo, engn);
  240. }
  241. }
  242. static void
  243. gk104_fifo_recover_engn(struct gk104_fifo *fifo, int engn)
  244. {
  245. struct nvkm_engine *engine = fifo->engine[engn].engine;
  246. struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  247. struct nvkm_device *device = subdev->device;
  248. const u32 runl = fifo->engine[engn].runl;
  249. const u32 engm = BIT(engn);
  250. struct gk104_fifo_engine_status status;
  251. int mmui = -1;
  252. assert_spin_locked(&fifo->base.lock);
  253. if (fifo->recover.engm & engm)
  254. return;
  255. fifo->recover.engm |= engm;
  256. /* Block channel assignments from changing during recovery. */
  257. gk104_fifo_recover_runl(fifo, runl);
  258. /* Determine which channel (if any) is currently on the engine. */
  259. gk104_fifo_engine_status(fifo, engn, &status);
  260. if (status.chan) {
  261. /* The channel is not longer viable, kill it. */
  262. gk104_fifo_recover_chan(&fifo->base, status.chan->id);
  263. }
  264. /* Determine MMU fault ID for the engine, if we're not being
  265. * called from the fault handler already.
  266. */
  267. if (!status.faulted && engine) {
  268. mmui = nvkm_top_fault_id(device, engine->subdev.index);
  269. if (mmui < 0) {
  270. const struct nvkm_enum *en = fifo->func->fault.engine;
  271. for (; en && en->name; en++) {
  272. if (en->data2 == engine->subdev.index) {
  273. mmui = en->value;
  274. break;
  275. }
  276. }
  277. }
  278. WARN_ON(mmui < 0);
  279. }
  280. /* Trigger a MMU fault for the engine.
  281. *
  282. * No good idea why this is needed, but nvgpu does something similar,
  283. * and it makes recovery from CTXSW_TIMEOUT a lot more reliable.
  284. */
  285. if (mmui >= 0) {
  286. nvkm_wr32(device, 0x002a30 + (engn * 0x04), 0x00000100 | mmui);
  287. /* Wait for fault to trigger. */
  288. nvkm_msec(device, 2000,
  289. gk104_fifo_engine_status(fifo, engn, &status);
  290. if (status.faulted)
  291. break;
  292. );
  293. /* Release MMU fault trigger, and ACK the fault. */
  294. nvkm_wr32(device, 0x002a30 + (engn * 0x04), 0x00000000);
  295. nvkm_wr32(device, 0x00259c, BIT(mmui));
  296. nvkm_wr32(device, 0x002100, 0x10000000);
  297. }
  298. /* Schedule recovery. */
  299. nvkm_warn(subdev, "engine %d: scheduled for recovery\n", engn);
  300. schedule_work(&fifo->recover.work);
  301. }
  302. static const struct nvkm_enum
  303. gk104_fifo_bind_reason[] = {
  304. { 0x01, "BIND_NOT_UNBOUND" },
  305. { 0x02, "SNOOP_WITHOUT_BAR1" },
  306. { 0x03, "UNBIND_WHILE_RUNNING" },
  307. { 0x05, "INVALID_RUNLIST" },
  308. { 0x06, "INVALID_CTX_TGT" },
  309. { 0x0b, "UNBIND_WHILE_PARKED" },
  310. {}
  311. };
  312. static void
  313. gk104_fifo_intr_bind(struct gk104_fifo *fifo)
  314. {
  315. struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  316. struct nvkm_device *device = subdev->device;
  317. u32 intr = nvkm_rd32(device, 0x00252c);
  318. u32 code = intr & 0x000000ff;
  319. const struct nvkm_enum *en =
  320. nvkm_enum_find(gk104_fifo_bind_reason, code);
  321. nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : "");
  322. }
  323. static const struct nvkm_enum
  324. gk104_fifo_sched_reason[] = {
  325. { 0x0a, "CTXSW_TIMEOUT" },
  326. {}
  327. };
  328. static void
  329. gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo)
  330. {
  331. struct nvkm_device *device = fifo->base.engine.subdev.device;
  332. unsigned long flags, engm = 0;
  333. u32 engn;
  334. /* We need to ACK the SCHED_ERROR here, and prevent it reasserting,
  335. * as MMU_FAULT cannot be triggered while it's pending.
  336. */
  337. spin_lock_irqsave(&fifo->base.lock, flags);
  338. nvkm_mask(device, 0x002140, 0x00000100, 0x00000000);
  339. nvkm_wr32(device, 0x002100, 0x00000100);
  340. for (engn = 0; engn < fifo->engine_nr; engn++) {
  341. struct gk104_fifo_engine_status status;
  342. gk104_fifo_engine_status(fifo, engn, &status);
  343. if (!status.busy || !status.chsw)
  344. continue;
  345. engm |= BIT(engn);
  346. }
  347. for_each_set_bit(engn, &engm, fifo->engine_nr)
  348. gk104_fifo_recover_engn(fifo, engn);
  349. nvkm_mask(device, 0x002140, 0x00000100, 0x00000100);
  350. spin_unlock_irqrestore(&fifo->base.lock, flags);
  351. }
  352. static void
  353. gk104_fifo_intr_sched(struct gk104_fifo *fifo)
  354. {
  355. struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  356. struct nvkm_device *device = subdev->device;
  357. u32 intr = nvkm_rd32(device, 0x00254c);
  358. u32 code = intr & 0x000000ff;
  359. const struct nvkm_enum *en =
  360. nvkm_enum_find(gk104_fifo_sched_reason, code);
  361. nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
  362. switch (code) {
  363. case 0x0a:
  364. gk104_fifo_intr_sched_ctxsw(fifo);
  365. break;
  366. default:
  367. break;
  368. }
  369. }
  370. static void
  371. gk104_fifo_intr_chsw(struct gk104_fifo *fifo)
  372. {
  373. struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  374. struct nvkm_device *device = subdev->device;
  375. u32 stat = nvkm_rd32(device, 0x00256c);
  376. nvkm_error(subdev, "CHSW_ERROR %08x\n", stat);
  377. nvkm_wr32(device, 0x00256c, stat);
  378. }
  379. static void
  380. gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo)
  381. {
  382. struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  383. struct nvkm_device *device = subdev->device;
  384. u32 stat = nvkm_rd32(device, 0x00259c);
  385. nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat);
  386. }
  387. static void
  388. gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
  389. {
  390. struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  391. struct nvkm_device *device = subdev->device;
  392. u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
  393. u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
  394. u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
  395. u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10));
  396. u32 gpc = (stat & 0x1f000000) >> 24;
  397. u32 client = (stat & 0x00001f00) >> 8;
  398. u32 write = (stat & 0x00000080);
  399. u32 hub = (stat & 0x00000040);
  400. u32 reason = (stat & 0x0000000f);
  401. const struct nvkm_enum *er, *eu, *ec;
  402. struct nvkm_engine *engine = NULL;
  403. struct nvkm_fifo_chan *chan;
  404. unsigned long flags;
  405. char gpcid[8] = "", en[16] = "";
  406. int engn;
  407. er = nvkm_enum_find(fifo->func->fault.reason, reason);
  408. eu = nvkm_enum_find(fifo->func->fault.engine, unit);
  409. if (hub) {
  410. ec = nvkm_enum_find(fifo->func->fault.hubclient, client);
  411. } else {
  412. ec = nvkm_enum_find(fifo->func->fault.gpcclient, client);
  413. snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
  414. }
  415. if (eu && eu->data2) {
  416. switch (eu->data2) {
  417. case NVKM_SUBDEV_BAR:
  418. nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
  419. break;
  420. case NVKM_SUBDEV_INSTMEM:
  421. nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
  422. break;
  423. case NVKM_ENGINE_IFB:
  424. nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
  425. break;
  426. default:
  427. engine = nvkm_device_engine(device, eu->data2);
  428. break;
  429. }
  430. }
  431. if (eu == NULL) {
  432. enum nvkm_devidx engidx = nvkm_top_fault(device, unit);
  433. if (engidx < NVKM_SUBDEV_NR) {
  434. const char *src = nvkm_subdev_name[engidx];
  435. char *dst = en;
  436. do {
  437. *dst++ = toupper(*src++);
  438. } while(*src);
  439. engine = nvkm_device_engine(device, engidx);
  440. }
  441. } else {
  442. snprintf(en, sizeof(en), "%s", eu->name);
  443. }
  444. spin_lock_irqsave(&fifo->base.lock, flags);
  445. chan = nvkm_fifo_chan_inst_locked(&fifo->base, (u64)inst << 12);
  446. nvkm_error(subdev,
  447. "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
  448. "reason %02x [%s] on channel %d [%010llx %s]\n",
  449. write ? "write" : "read", (u64)vahi << 32 | valo,
  450. unit, en, client, gpcid, ec ? ec->name : "",
  451. reason, er ? er->name : "", chan ? chan->chid : -1,
  452. (u64)inst << 12,
  453. chan ? chan->object.client->name : "unknown");
  454. /* Kill the channel that caused the fault. */
  455. if (chan)
  456. gk104_fifo_recover_chan(&fifo->base, chan->chid);
  457. /* Channel recovery will probably have already done this for the
  458. * correct engine(s), but just in case we can't find the channel
  459. * information...
  460. */
  461. for (engn = 0; engn < fifo->engine_nr && engine; engn++) {
  462. if (fifo->engine[engn].engine == engine) {
  463. gk104_fifo_recover_engn(fifo, engn);
  464. break;
  465. }
  466. }
  467. spin_unlock_irqrestore(&fifo->base.lock, flags);
  468. }
  469. static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = {
  470. { 0x00000001, "MEMREQ" },
  471. { 0x00000002, "MEMACK_TIMEOUT" },
  472. { 0x00000004, "MEMACK_EXTRA" },
  473. { 0x00000008, "MEMDAT_TIMEOUT" },
  474. { 0x00000010, "MEMDAT_EXTRA" },
  475. { 0x00000020, "MEMFLUSH" },
  476. { 0x00000040, "MEMOP" },
  477. { 0x00000080, "LBCONNECT" },
  478. { 0x00000100, "LBREQ" },
  479. { 0x00000200, "LBACK_TIMEOUT" },
  480. { 0x00000400, "LBACK_EXTRA" },
  481. { 0x00000800, "LBDAT_TIMEOUT" },
  482. { 0x00001000, "LBDAT_EXTRA" },
  483. { 0x00002000, "GPFIFO" },
  484. { 0x00004000, "GPPTR" },
  485. { 0x00008000, "GPENTRY" },
  486. { 0x00010000, "GPCRC" },
  487. { 0x00020000, "PBPTR" },
  488. { 0x00040000, "PBENTRY" },
  489. { 0x00080000, "PBCRC" },
  490. { 0x00100000, "XBARCONNECT" },
  491. { 0x00200000, "METHOD" },
  492. { 0x00400000, "METHODCRC" },
  493. { 0x00800000, "DEVICE" },
  494. { 0x02000000, "SEMAPHORE" },
  495. { 0x04000000, "ACQUIRE" },
  496. { 0x08000000, "PRI" },
  497. { 0x20000000, "NO_CTXSW_SEG" },
  498. { 0x40000000, "PBSEG" },
  499. { 0x80000000, "SIGNATURE" },
  500. {}
  501. };
  502. static void
  503. gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit)
  504. {
  505. struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  506. struct nvkm_device *device = subdev->device;
  507. u32 mask = nvkm_rd32(device, 0x04010c + (unit * 0x2000));
  508. u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000)) & mask;
  509. u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
  510. u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
  511. u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
  512. u32 subc = (addr & 0x00070000) >> 16;
  513. u32 mthd = (addr & 0x00003ffc);
  514. u32 show = stat;
  515. struct nvkm_fifo_chan *chan;
  516. unsigned long flags;
  517. char msg[128];
  518. if (stat & 0x00800000) {
  519. if (device->sw) {
  520. if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data))
  521. show &= ~0x00800000;
  522. }
  523. }
  524. nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
  525. if (show) {
  526. nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_0, show);
  527. chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
  528. nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] "
  529. "subc %d mthd %04x data %08x\n",
  530. unit, show, msg, chid, chan ? chan->inst->addr : 0,
  531. chan ? chan->object.client->name : "unknown",
  532. subc, mthd, data);
  533. nvkm_fifo_chan_put(&fifo->base, flags, &chan);
  534. }
  535. nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
  536. }
  537. static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = {
  538. { 0x00000001, "HCE_RE_ILLEGAL_OP" },
  539. { 0x00000002, "HCE_RE_ALIGNB" },
  540. { 0x00000004, "HCE_PRIV" },
  541. { 0x00000008, "HCE_ILLEGAL_MTHD" },
  542. { 0x00000010, "HCE_ILLEGAL_CLASS" },
  543. {}
  544. };
  545. static void
  546. gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit)
  547. {
  548. struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  549. struct nvkm_device *device = subdev->device;
  550. u32 mask = nvkm_rd32(device, 0x04014c + (unit * 0x2000));
  551. u32 stat = nvkm_rd32(device, 0x040148 + (unit * 0x2000)) & mask;
  552. u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
  553. char msg[128];
  554. if (stat) {
  555. nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_1, stat);
  556. nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d %08x %08x\n",
  557. unit, stat, msg, chid,
  558. nvkm_rd32(device, 0x040150 + (unit * 0x2000)),
  559. nvkm_rd32(device, 0x040154 + (unit * 0x2000)));
  560. }
  561. nvkm_wr32(device, 0x040148 + (unit * 0x2000), stat);
  562. }
  563. static void
  564. gk104_fifo_intr_runlist(struct gk104_fifo *fifo)
  565. {
  566. struct nvkm_device *device = fifo->base.engine.subdev.device;
  567. u32 mask = nvkm_rd32(device, 0x002a00);
  568. while (mask) {
  569. int runl = __ffs(mask);
  570. wake_up(&fifo->runlist[runl].wait);
  571. nvkm_wr32(device, 0x002a00, 1 << runl);
  572. mask &= ~(1 << runl);
  573. }
  574. }
  575. static void
  576. gk104_fifo_intr_engine(struct gk104_fifo *fifo)
  577. {
  578. nvkm_fifo_uevent(&fifo->base);
  579. }
  580. static void
  581. gk104_fifo_intr(struct nvkm_fifo *base)
  582. {
  583. struct gk104_fifo *fifo = gk104_fifo(base);
  584. struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  585. struct nvkm_device *device = subdev->device;
  586. u32 mask = nvkm_rd32(device, 0x002140);
  587. u32 stat = nvkm_rd32(device, 0x002100) & mask;
  588. if (stat & 0x00000001) {
  589. gk104_fifo_intr_bind(fifo);
  590. nvkm_wr32(device, 0x002100, 0x00000001);
  591. stat &= ~0x00000001;
  592. }
  593. if (stat & 0x00000010) {
  594. nvkm_error(subdev, "PIO_ERROR\n");
  595. nvkm_wr32(device, 0x002100, 0x00000010);
  596. stat &= ~0x00000010;
  597. }
  598. if (stat & 0x00000100) {
  599. gk104_fifo_intr_sched(fifo);
  600. nvkm_wr32(device, 0x002100, 0x00000100);
  601. stat &= ~0x00000100;
  602. }
  603. if (stat & 0x00010000) {
  604. gk104_fifo_intr_chsw(fifo);
  605. nvkm_wr32(device, 0x002100, 0x00010000);
  606. stat &= ~0x00010000;
  607. }
  608. if (stat & 0x00800000) {
  609. nvkm_error(subdev, "FB_FLUSH_TIMEOUT\n");
  610. nvkm_wr32(device, 0x002100, 0x00800000);
  611. stat &= ~0x00800000;
  612. }
  613. if (stat & 0x01000000) {
  614. nvkm_error(subdev, "LB_ERROR\n");
  615. nvkm_wr32(device, 0x002100, 0x01000000);
  616. stat &= ~0x01000000;
  617. }
  618. if (stat & 0x08000000) {
  619. gk104_fifo_intr_dropped_fault(fifo);
  620. nvkm_wr32(device, 0x002100, 0x08000000);
  621. stat &= ~0x08000000;
  622. }
  623. if (stat & 0x10000000) {
  624. u32 mask = nvkm_rd32(device, 0x00259c);
  625. while (mask) {
  626. u32 unit = __ffs(mask);
  627. gk104_fifo_intr_fault(fifo, unit);
  628. nvkm_wr32(device, 0x00259c, (1 << unit));
  629. mask &= ~(1 << unit);
  630. }
  631. stat &= ~0x10000000;
  632. }
  633. if (stat & 0x20000000) {
  634. u32 mask = nvkm_rd32(device, 0x0025a0);
  635. while (mask) {
  636. u32 unit = __ffs(mask);
  637. gk104_fifo_intr_pbdma_0(fifo, unit);
  638. gk104_fifo_intr_pbdma_1(fifo, unit);
  639. nvkm_wr32(device, 0x0025a0, (1 << unit));
  640. mask &= ~(1 << unit);
  641. }
  642. stat &= ~0x20000000;
  643. }
  644. if (stat & 0x40000000) {
  645. gk104_fifo_intr_runlist(fifo);
  646. stat &= ~0x40000000;
  647. }
  648. if (stat & 0x80000000) {
  649. nvkm_wr32(device, 0x002100, 0x80000000);
  650. gk104_fifo_intr_engine(fifo);
  651. stat &= ~0x80000000;
  652. }
  653. if (stat) {
  654. nvkm_error(subdev, "INTR %08x\n", stat);
  655. nvkm_mask(device, 0x002140, stat, 0x00000000);
  656. nvkm_wr32(device, 0x002100, stat);
  657. }
  658. }
  659. static void
  660. gk104_fifo_fini(struct nvkm_fifo *base)
  661. {
  662. struct gk104_fifo *fifo = gk104_fifo(base);
  663. struct nvkm_device *device = fifo->base.engine.subdev.device;
  664. flush_work(&fifo->recover.work);
  665. /* allow mmu fault interrupts, even when we're not using fifo */
  666. nvkm_mask(device, 0x002140, 0x10000000, 0x10000000);
  667. }
  668. static int
  669. gk104_fifo_oneinit(struct nvkm_fifo *base)
  670. {
  671. struct gk104_fifo *fifo = gk104_fifo(base);
  672. struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
  673. struct nvkm_device *device = subdev->device;
  674. int engn, runl, pbid, ret, i, j;
  675. enum nvkm_devidx engidx;
  676. u32 *map;
  677. /* Determine number of PBDMAs by checking valid enable bits. */
  678. nvkm_wr32(device, 0x000204, 0xffffffff);
  679. fifo->pbdma_nr = hweight32(nvkm_rd32(device, 0x000204));
  680. nvkm_debug(subdev, "%d PBDMA(s)\n", fifo->pbdma_nr);
  681. /* Read PBDMA->runlist(s) mapping from HW. */
  682. if (!(map = kzalloc(sizeof(*map) * fifo->pbdma_nr, GFP_KERNEL)))
  683. return -ENOMEM;
  684. for (i = 0; i < fifo->pbdma_nr; i++)
  685. map[i] = nvkm_rd32(device, 0x002390 + (i * 0x04));
  686. /* Determine runlist configuration from topology device info. */
  687. i = 0;
  688. while ((int)(engidx = nvkm_top_engine(device, i++, &runl, &engn)) >= 0) {
  689. /* Determine which PBDMA handles requests for this engine. */
  690. for (j = 0, pbid = -1; j < fifo->pbdma_nr; j++) {
  691. if (map[j] & (1 << runl)) {
  692. pbid = j;
  693. break;
  694. }
  695. }
  696. nvkm_debug(subdev, "engine %2d: runlist %2d pbdma %2d (%s)\n",
  697. engn, runl, pbid, nvkm_subdev_name[engidx]);
  698. fifo->engine[engn].engine = nvkm_device_engine(device, engidx);
  699. fifo->engine[engn].runl = runl;
  700. fifo->engine[engn].pbid = pbid;
  701. fifo->engine_nr = max(fifo->engine_nr, engn + 1);
  702. fifo->runlist[runl].engm |= 1 << engn;
  703. fifo->runlist_nr = max(fifo->runlist_nr, runl + 1);
  704. }
  705. kfree(map);
  706. for (i = 0; i < fifo->runlist_nr; i++) {
  707. ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
  708. 0x8000, 0x1000, false,
  709. &fifo->runlist[i].mem[0]);
  710. if (ret)
  711. return ret;
  712. ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
  713. 0x8000, 0x1000, false,
  714. &fifo->runlist[i].mem[1]);
  715. if (ret)
  716. return ret;
  717. init_waitqueue_head(&fifo->runlist[i].wait);
  718. INIT_LIST_HEAD(&fifo->runlist[i].chan);
  719. }
  720. ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
  721. fifo->base.nr * 0x200, 0x1000, true,
  722. &fifo->user.mem);
  723. if (ret)
  724. return ret;
  725. ret = nvkm_bar_umap(device->bar, fifo->base.nr * 0x200, 12,
  726. &fifo->user.bar);
  727. if (ret)
  728. return ret;
  729. nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0);
  730. return 0;
  731. }
  732. static void
  733. gk104_fifo_init(struct nvkm_fifo *base)
  734. {
  735. struct gk104_fifo *fifo = gk104_fifo(base);
  736. struct nvkm_device *device = fifo->base.engine.subdev.device;
  737. int i;
  738. /* Enable PBDMAs. */
  739. nvkm_wr32(device, 0x000204, (1 << fifo->pbdma_nr) - 1);
  740. /* PBDMA[n] */
  741. for (i = 0; i < fifo->pbdma_nr; i++) {
  742. nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
  743. nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
  744. nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
  745. }
  746. /* PBDMA[n].HCE */
  747. for (i = 0; i < fifo->pbdma_nr; i++) {
  748. nvkm_wr32(device, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */
  749. nvkm_wr32(device, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */
  750. }
  751. nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
  752. nvkm_wr32(device, 0x002100, 0xffffffff);
  753. nvkm_wr32(device, 0x002140, 0x7fffffff);
  754. }
  755. static void *
  756. gk104_fifo_dtor(struct nvkm_fifo *base)
  757. {
  758. struct gk104_fifo *fifo = gk104_fifo(base);
  759. int i;
  760. nvkm_vm_put(&fifo->user.bar);
  761. nvkm_memory_del(&fifo->user.mem);
  762. for (i = 0; i < fifo->runlist_nr; i++) {
  763. nvkm_memory_del(&fifo->runlist[i].mem[1]);
  764. nvkm_memory_del(&fifo->runlist[i].mem[0]);
  765. }
  766. return fifo;
  767. }
  768. static const struct nvkm_fifo_func
  769. gk104_fifo_ = {
  770. .dtor = gk104_fifo_dtor,
  771. .oneinit = gk104_fifo_oneinit,
  772. .init = gk104_fifo_init,
  773. .fini = gk104_fifo_fini,
  774. .intr = gk104_fifo_intr,
  775. .uevent_init = gk104_fifo_uevent_init,
  776. .uevent_fini = gk104_fifo_uevent_fini,
  777. .recover_chan = gk104_fifo_recover_chan,
  778. .class_get = gk104_fifo_class_get,
  779. };
  780. int
  781. gk104_fifo_new_(const struct gk104_fifo_func *func, struct nvkm_device *device,
  782. int index, int nr, struct nvkm_fifo **pfifo)
  783. {
  784. struct gk104_fifo *fifo;
  785. if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
  786. return -ENOMEM;
  787. fifo->func = func;
  788. INIT_WORK(&fifo->recover.work, gk104_fifo_recover_work);
  789. *pfifo = &fifo->base;
  790. return nvkm_fifo_ctor(&gk104_fifo_, device, index, nr, &fifo->base);
  791. }
  792. const struct nvkm_enum
  793. gk104_fifo_fault_engine[] = {
  794. { 0x00, "GR", NULL, NVKM_ENGINE_GR },
  795. { 0x01, "DISPLAY" },
  796. { 0x02, "CAPTURE" },
  797. { 0x03, "IFB", NULL, NVKM_ENGINE_IFB },
  798. { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
  799. { 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM },
  800. { 0x06, "SCHED" },
  801. { 0x07, "HOST0", NULL, NVKM_ENGINE_FIFO },
  802. { 0x08, "HOST1", NULL, NVKM_ENGINE_FIFO },
  803. { 0x09, "HOST2", NULL, NVKM_ENGINE_FIFO },
  804. { 0x0a, "HOST3", NULL, NVKM_ENGINE_FIFO },
  805. { 0x0b, "HOST4", NULL, NVKM_ENGINE_FIFO },
  806. { 0x0c, "HOST5", NULL, NVKM_ENGINE_FIFO },
  807. { 0x0d, "HOST6", NULL, NVKM_ENGINE_FIFO },
  808. { 0x0e, "HOST7", NULL, NVKM_ENGINE_FIFO },
  809. { 0x0f, "HOSTSR" },
  810. { 0x10, "MSVLD", NULL, NVKM_ENGINE_MSVLD },
  811. { 0x11, "MSPPP", NULL, NVKM_ENGINE_MSPPP },
  812. { 0x13, "PERF" },
  813. { 0x14, "MSPDEC", NULL, NVKM_ENGINE_MSPDEC },
  814. { 0x15, "CE0", NULL, NVKM_ENGINE_CE0 },
  815. { 0x16, "CE1", NULL, NVKM_ENGINE_CE1 },
  816. { 0x17, "PMU" },
  817. { 0x18, "PTP" },
  818. { 0x19, "MSENC", NULL, NVKM_ENGINE_MSENC },
  819. { 0x1b, "CE2", NULL, NVKM_ENGINE_CE2 },
  820. {}
  821. };
  822. const struct nvkm_enum
  823. gk104_fifo_fault_reason[] = {
  824. { 0x00, "PDE" },
  825. { 0x01, "PDE_SIZE" },
  826. { 0x02, "PTE" },
  827. { 0x03, "VA_LIMIT_VIOLATION" },
  828. { 0x04, "UNBOUND_INST_BLOCK" },
  829. { 0x05, "PRIV_VIOLATION" },
  830. { 0x06, "RO_VIOLATION" },
  831. { 0x07, "WO_VIOLATION" },
  832. { 0x08, "PITCH_MASK_VIOLATION" },
  833. { 0x09, "WORK_CREATION" },
  834. { 0x0a, "UNSUPPORTED_APERTURE" },
  835. { 0x0b, "COMPRESSION_FAILURE" },
  836. { 0x0c, "UNSUPPORTED_KIND" },
  837. { 0x0d, "REGION_VIOLATION" },
  838. { 0x0e, "BOTH_PTES_VALID" },
  839. { 0x0f, "INFO_TYPE_POISONED" },
  840. {}
  841. };
  842. const struct nvkm_enum
  843. gk104_fifo_fault_hubclient[] = {
  844. { 0x00, "VIP" },
  845. { 0x01, "CE0" },
  846. { 0x02, "CE1" },
  847. { 0x03, "DNISO" },
  848. { 0x04, "FE" },
  849. { 0x05, "FECS" },
  850. { 0x06, "HOST" },
  851. { 0x07, "HOST_CPU" },
  852. { 0x08, "HOST_CPU_NB" },
  853. { 0x09, "ISO" },
  854. { 0x0a, "MMU" },
  855. { 0x0b, "MSPDEC" },
  856. { 0x0c, "MSPPP" },
  857. { 0x0d, "MSVLD" },
  858. { 0x0e, "NISO" },
  859. { 0x0f, "P2P" },
  860. { 0x10, "PD" },
  861. { 0x11, "PERF" },
  862. { 0x12, "PMU" },
  863. { 0x13, "RASTERTWOD" },
  864. { 0x14, "SCC" },
  865. { 0x15, "SCC_NB" },
  866. { 0x16, "SEC" },
  867. { 0x17, "SSYNC" },
  868. { 0x18, "GR_CE" },
  869. { 0x19, "CE2" },
  870. { 0x1a, "XV" },
  871. { 0x1b, "MMU_NB" },
  872. { 0x1c, "MSENC" },
  873. { 0x1d, "DFALCON" },
  874. { 0x1e, "SKED" },
  875. { 0x1f, "AFALCON" },
  876. {}
  877. };
  878. const struct nvkm_enum
  879. gk104_fifo_fault_gpcclient[] = {
  880. { 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" },
  881. { 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" },
  882. { 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" },
  883. { 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" },
  884. { 0x0c, "RAST" },
  885. { 0x0d, "GCC" },
  886. { 0x0e, "GPCCS" },
  887. { 0x0f, "PROP_0" },
  888. { 0x10, "PROP_1" },
  889. { 0x11, "PROP_2" },
  890. { 0x12, "PROP_3" },
  891. { 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" },
  892. { 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" },
  893. { 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" },
  894. { 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" },
  895. { 0x1f, "GPM" },
  896. { 0x20, "LTP_UTLB_0" },
  897. { 0x21, "LTP_UTLB_1" },
  898. { 0x22, "LTP_UTLB_2" },
  899. { 0x23, "LTP_UTLB_3" },
  900. { 0x24, "GPC_RGG_UTLB" },
  901. {}
  902. };
  903. static const struct gk104_fifo_func
  904. gk104_fifo = {
  905. .fault.engine = gk104_fifo_fault_engine,
  906. .fault.reason = gk104_fifo_fault_reason,
  907. .fault.hubclient = gk104_fifo_fault_hubclient,
  908. .fault.gpcclient = gk104_fifo_fault_gpcclient,
  909. .chan = {
  910. &gk104_fifo_gpfifo_oclass,
  911. NULL
  912. },
  913. };
  914. int
  915. gk104_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
  916. {
  917. return gk104_fifo_new_(&gk104_fifo, device, index, 4096, pfifo);
  918. }