nouveau_irq.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263
  1. /*
  2. * Copyright (C) 2006 Ben Skeggs.
  3. *
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining
  7. * a copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sublicense, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial
  16. * portions of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  19. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  20. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  21. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
  22. * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
  23. * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  24. * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. */
  27. /*
  28. * Authors:
  29. * Ben Skeggs <darktama@iinet.net.au>
  30. */
  31. #include "drmP.h"
  32. #include "drm.h"
  33. #include "nouveau_drm.h"
  34. #include "nouveau_drv.h"
  35. #include "nouveau_reg.h"
  36. #include "nouveau_ramht.h"
  37. #include <linux/ratelimit.h>
  38. /* needed for hotplug irq */
  39. #include "nouveau_connector.h"
  40. #include "nv50_display.h"
  41. static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
  42. static int nouveau_ratelimit(void)
  43. {
  44. return __ratelimit(&nouveau_ratelimit_state);
  45. }
  46. void
  47. nouveau_irq_preinstall(struct drm_device *dev)
  48. {
  49. struct drm_nouveau_private *dev_priv = dev->dev_private;
  50. /* Master disable */
  51. nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
  52. if (dev_priv->card_type >= NV_50) {
  53. INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
  54. INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh);
  55. spin_lock_init(&dev_priv->hpd_state.lock);
  56. INIT_LIST_HEAD(&dev_priv->vbl_waiting);
  57. }
  58. }
  59. int
  60. nouveau_irq_postinstall(struct drm_device *dev)
  61. {
  62. /* Master enable */
  63. nv_wr32(dev, NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE);
  64. return 0;
  65. }
  66. void
  67. nouveau_irq_uninstall(struct drm_device *dev)
  68. {
  69. /* Master disable */
  70. nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
  71. }
  72. static int
  73. nouveau_call_method(struct nouveau_channel *chan, int class, int mthd, int data)
  74. {
  75. struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
  76. struct nouveau_pgraph_object_method *grm;
  77. struct nouveau_pgraph_object_class *grc;
  78. grc = dev_priv->engine.graph.grclass;
  79. while (grc->id) {
  80. if (grc->id == class)
  81. break;
  82. grc++;
  83. }
  84. if (grc->id != class || !grc->methods)
  85. return -ENOENT;
  86. grm = grc->methods;
  87. while (grm->id) {
  88. if (grm->id == mthd)
  89. return grm->exec(chan, class, mthd, data);
  90. grm++;
  91. }
  92. return -ENOENT;
  93. }
  94. static bool
  95. nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
  96. {
  97. struct drm_nouveau_private *dev_priv = dev->dev_private;
  98. struct nouveau_channel *chan = NULL;
  99. struct nouveau_gpuobj *obj;
  100. const int subc = (addr >> 13) & 0x7;
  101. const int mthd = addr & 0x1ffc;
  102. bool handled = false;
  103. u32 engine;
  104. if (likely(chid >= 0 && chid < dev_priv->engine.fifo.channels))
  105. chan = dev_priv->fifos[chid];
  106. if (unlikely(!chan))
  107. return false;
  108. switch (mthd) {
  109. case 0x0000: /* bind object to subchannel */
  110. obj = nouveau_ramht_find(chan, data);
  111. if (unlikely(!obj || obj->engine != NVOBJ_ENGINE_SW))
  112. break;
  113. chan->sw_subchannel[subc] = obj->class;
  114. engine = 0x0000000f << (subc * 4);
  115. nv_mask(dev, NV04_PFIFO_CACHE1_ENGINE, engine, 0x00000000);
  116. handled = true;
  117. break;
  118. default:
  119. engine = nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE);
  120. if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
  121. break;
  122. if (!nouveau_call_method(chan, chan->sw_subchannel[subc],
  123. mthd, data))
  124. handled = true;
  125. break;
  126. }
  127. return handled;
  128. }
  129. static void
  130. nouveau_fifo_irq_handler(struct drm_device *dev)
  131. {
  132. struct drm_nouveau_private *dev_priv = dev->dev_private;
  133. struct nouveau_engine *engine = &dev_priv->engine;
  134. uint32_t status, reassign;
  135. int cnt = 0;
  136. reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
  137. while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
  138. uint32_t chid, get;
  139. nv_wr32(dev, NV03_PFIFO_CACHES, 0);
  140. chid = engine->fifo.channel_id(dev);
  141. get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
  142. if (status & NV_PFIFO_INTR_CACHE_ERROR) {
  143. uint32_t mthd, data;
  144. int ptr;
  145. /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
  146. * wrapping on my G80 chips, but CACHE1 isn't big
  147. * enough for this much data.. Tests show that it
  148. * wraps around to the start at GET=0x800.. No clue
  149. * as to why..
  150. */
  151. ptr = (get & 0x7ff) >> 2;
  152. if (dev_priv->card_type < NV_40) {
  153. mthd = nv_rd32(dev,
  154. NV04_PFIFO_CACHE1_METHOD(ptr));
  155. data = nv_rd32(dev,
  156. NV04_PFIFO_CACHE1_DATA(ptr));
  157. } else {
  158. mthd = nv_rd32(dev,
  159. NV40_PFIFO_CACHE1_METHOD(ptr));
  160. data = nv_rd32(dev,
  161. NV40_PFIFO_CACHE1_DATA(ptr));
  162. }
  163. if (!nouveau_fifo_swmthd(dev, chid, mthd, data)) {
  164. NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
  165. "Mthd 0x%04x Data 0x%08x\n",
  166. chid, (mthd >> 13) & 7, mthd & 0x1ffc,
  167. data);
  168. }
  169. nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
  170. nv_wr32(dev, NV03_PFIFO_INTR_0,
  171. NV_PFIFO_INTR_CACHE_ERROR);
  172. nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
  173. nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
  174. nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
  175. nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
  176. nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
  177. nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
  178. nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
  179. nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
  180. nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
  181. status &= ~NV_PFIFO_INTR_CACHE_ERROR;
  182. }
  183. if (status & NV_PFIFO_INTR_DMA_PUSHER) {
  184. u32 dma_get = nv_rd32(dev, 0x003244);
  185. u32 dma_put = nv_rd32(dev, 0x003240);
  186. u32 push = nv_rd32(dev, 0x003220);
  187. u32 state = nv_rd32(dev, 0x003228);
  188. if (dev_priv->card_type == NV_50) {
  189. u32 ho_get = nv_rd32(dev, 0x003328);
  190. u32 ho_put = nv_rd32(dev, 0x003320);
  191. u32 ib_get = nv_rd32(dev, 0x003334);
  192. u32 ib_put = nv_rd32(dev, 0x003330);
  193. if (nouveau_ratelimit())
  194. NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
  195. "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
  196. "State 0x%08x Push 0x%08x\n",
  197. chid, ho_get, dma_get, ho_put,
  198. dma_put, ib_get, ib_put, state,
  199. push);
  200. /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
  201. nv_wr32(dev, 0x003364, 0x00000000);
  202. if (dma_get != dma_put || ho_get != ho_put) {
  203. nv_wr32(dev, 0x003244, dma_put);
  204. nv_wr32(dev, 0x003328, ho_put);
  205. } else
  206. if (ib_get != ib_put) {
  207. nv_wr32(dev, 0x003334, ib_put);
  208. }
  209. } else {
  210. NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
  211. "Put 0x%08x State 0x%08x Push 0x%08x\n",
  212. chid, dma_get, dma_put, state, push);
  213. if (dma_get != dma_put)
  214. nv_wr32(dev, 0x003244, dma_put);
  215. }
  216. nv_wr32(dev, 0x003228, 0x00000000);
  217. nv_wr32(dev, 0x003220, 0x00000001);
  218. nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
  219. status &= ~NV_PFIFO_INTR_DMA_PUSHER;
  220. }
  221. if (status & NV_PFIFO_INTR_SEMAPHORE) {
  222. uint32_t sem;
  223. status &= ~NV_PFIFO_INTR_SEMAPHORE;
  224. nv_wr32(dev, NV03_PFIFO_INTR_0,
  225. NV_PFIFO_INTR_SEMAPHORE);
  226. sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
  227. nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
  228. nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
  229. nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
  230. }
  231. if (dev_priv->card_type == NV_50) {
  232. if (status & 0x00000010) {
  233. nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT");
  234. status &= ~0x00000010;
  235. nv_wr32(dev, 0x002100, 0x00000010);
  236. }
  237. }
  238. if (status) {
  239. if (nouveau_ratelimit())
  240. NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
  241. status, chid);
  242. nv_wr32(dev, NV03_PFIFO_INTR_0, status);
  243. status = 0;
  244. }
  245. nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
  246. }
  247. if (status) {
  248. NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
  249. nv_wr32(dev, 0x2140, 0);
  250. nv_wr32(dev, 0x140, 0);
  251. }
  252. nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
  253. }
  254. struct nouveau_bitfield_names {
  255. uint32_t mask;
  256. const char *name;
  257. };
  258. static struct nouveau_bitfield_names nstatus_names[] =
  259. {
  260. { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
  261. { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
  262. { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
  263. { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
  264. };
  265. static struct nouveau_bitfield_names nstatus_names_nv10[] =
  266. {
  267. { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
  268. { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
  269. { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
  270. { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
  271. };
  272. static struct nouveau_bitfield_names nsource_names[] =
  273. {
  274. { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" },
  275. { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" },
  276. { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" },
  277. { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" },
  278. { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" },
  279. { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" },
  280. { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" },
  281. { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" },
  282. { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" },
  283. { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" },
  284. { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" },
  285. { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" },
  286. { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" },
  287. { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" },
  288. { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" },
  289. { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" },
  290. { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
  291. { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" },
  292. { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" },
  293. };
  294. static void
  295. nouveau_print_bitfield_names_(uint32_t value,
  296. const struct nouveau_bitfield_names *namelist,
  297. const int namelist_len)
  298. {
  299. /*
  300. * Caller must have already printed the KERN_* log level for us.
  301. * Also the caller is responsible for adding the newline.
  302. */
  303. int i;
  304. for (i = 0; i < namelist_len; ++i) {
  305. uint32_t mask = namelist[i].mask;
  306. if (value & mask) {
  307. printk(" %s", namelist[i].name);
  308. value &= ~mask;
  309. }
  310. }
  311. if (value)
  312. printk(" (unknown bits 0x%08x)", value);
  313. }
  314. #define nouveau_print_bitfield_names(val, namelist) \
  315. nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist))
  316. struct nouveau_enum_names {
  317. uint32_t value;
  318. const char *name;
  319. };
  320. static void
  321. nouveau_print_enum_names_(uint32_t value,
  322. const struct nouveau_enum_names *namelist,
  323. const int namelist_len)
  324. {
  325. /*
  326. * Caller must have already printed the KERN_* log level for us.
  327. * Also the caller is responsible for adding the newline.
  328. */
  329. int i;
  330. for (i = 0; i < namelist_len; ++i) {
  331. if (value == namelist[i].value) {
  332. printk("%s", namelist[i].name);
  333. return;
  334. }
  335. }
  336. printk("unknown value 0x%08x", value);
  337. }
  338. #define nouveau_print_enum_names(val, namelist) \
  339. nouveau_print_enum_names_((val), (namelist), ARRAY_SIZE(namelist))
  340. static int
  341. nouveau_graph_chid_from_grctx(struct drm_device *dev)
  342. {
  343. struct drm_nouveau_private *dev_priv = dev->dev_private;
  344. uint32_t inst;
  345. int i;
  346. if (dev_priv->card_type < NV_40)
  347. return dev_priv->engine.fifo.channels;
  348. else
  349. if (dev_priv->card_type < NV_50) {
  350. inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 4;
  351. for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
  352. struct nouveau_channel *chan = dev_priv->fifos[i];
  353. if (!chan || !chan->ramin_grctx)
  354. continue;
  355. if (inst == chan->ramin_grctx->pinst)
  356. break;
  357. }
  358. } else {
  359. inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 12;
  360. for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
  361. struct nouveau_channel *chan = dev_priv->fifos[i];
  362. if (!chan || !chan->ramin)
  363. continue;
  364. if (inst == chan->ramin->vinst)
  365. break;
  366. }
  367. }
  368. return i;
  369. }
  370. static int
  371. nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret)
  372. {
  373. struct drm_nouveau_private *dev_priv = dev->dev_private;
  374. struct nouveau_engine *engine = &dev_priv->engine;
  375. int channel;
  376. if (dev_priv->card_type < NV_10)
  377. channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0xf;
  378. else
  379. if (dev_priv->card_type < NV_40)
  380. channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
  381. else
  382. channel = nouveau_graph_chid_from_grctx(dev);
  383. if (channel >= engine->fifo.channels || !dev_priv->fifos[channel]) {
  384. NV_ERROR(dev, "AIII, invalid/inactive channel id %d\n", channel);
  385. return -EINVAL;
  386. }
  387. *channel_ret = channel;
  388. return 0;
  389. }
  390. struct nouveau_pgraph_trap {
  391. int channel;
  392. int class;
  393. int subc, mthd, size;
  394. uint32_t data, data2;
  395. uint32_t nsource, nstatus;
  396. };
  397. static void
  398. nouveau_graph_trap_info(struct drm_device *dev,
  399. struct nouveau_pgraph_trap *trap)
  400. {
  401. struct drm_nouveau_private *dev_priv = dev->dev_private;
  402. uint32_t address;
  403. trap->nsource = trap->nstatus = 0;
  404. if (dev_priv->card_type < NV_50) {
  405. trap->nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
  406. trap->nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
  407. }
  408. if (nouveau_graph_trapped_channel(dev, &trap->channel))
  409. trap->channel = -1;
  410. address = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
  411. trap->mthd = address & 0x1FFC;
  412. trap->data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
  413. if (dev_priv->card_type < NV_10) {
  414. trap->subc = (address >> 13) & 0x7;
  415. } else {
  416. trap->subc = (address >> 16) & 0x7;
  417. trap->data2 = nv_rd32(dev, NV10_PGRAPH_TRAPPED_DATA_HIGH);
  418. }
  419. if (dev_priv->card_type < NV_10)
  420. trap->class = nv_rd32(dev, 0x400180 + trap->subc*4) & 0xFF;
  421. else if (dev_priv->card_type < NV_40)
  422. trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFF;
  423. else if (dev_priv->card_type < NV_50)
  424. trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFFF;
  425. else
  426. trap->class = nv_rd32(dev, 0x400814);
  427. }
  428. static void
  429. nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id,
  430. struct nouveau_pgraph_trap *trap)
  431. {
  432. struct drm_nouveau_private *dev_priv = dev->dev_private;
  433. uint32_t nsource = trap->nsource, nstatus = trap->nstatus;
  434. if (dev_priv->card_type < NV_50) {
  435. NV_INFO(dev, "%s - nSource:", id);
  436. nouveau_print_bitfield_names(nsource, nsource_names);
  437. printk(", nStatus:");
  438. if (dev_priv->card_type < NV_10)
  439. nouveau_print_bitfield_names(nstatus, nstatus_names);
  440. else
  441. nouveau_print_bitfield_names(nstatus, nstatus_names_nv10);
  442. printk("\n");
  443. }
  444. NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x "
  445. "Data 0x%08x:0x%08x\n",
  446. id, trap->channel, trap->subc,
  447. trap->class, trap->mthd,
  448. trap->data2, trap->data);
  449. }
  450. static int
  451. nouveau_pgraph_intr_swmthd(struct drm_device *dev,
  452. struct nouveau_pgraph_trap *trap)
  453. {
  454. struct drm_nouveau_private *dev_priv = dev->dev_private;
  455. if (trap->channel < 0 ||
  456. trap->channel >= dev_priv->engine.fifo.channels ||
  457. !dev_priv->fifos[trap->channel])
  458. return -ENODEV;
  459. return nouveau_call_method(dev_priv->fifos[trap->channel],
  460. trap->class, trap->mthd, trap->data);
  461. }
  462. static inline void
  463. nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource)
  464. {
  465. struct nouveau_pgraph_trap trap;
  466. int unhandled = 0;
  467. nouveau_graph_trap_info(dev, &trap);
  468. if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
  469. if (nouveau_pgraph_intr_swmthd(dev, &trap))
  470. unhandled = 1;
  471. } else {
  472. unhandled = 1;
  473. }
  474. if (unhandled)
  475. nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap);
  476. }
  477. static inline void
  478. nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
  479. {
  480. struct nouveau_pgraph_trap trap;
  481. int unhandled = 0;
  482. nouveau_graph_trap_info(dev, &trap);
  483. trap.nsource = nsource;
  484. if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
  485. if (nouveau_pgraph_intr_swmthd(dev, &trap))
  486. unhandled = 1;
  487. } else if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
  488. uint32_t v = nv_rd32(dev, 0x402000);
  489. nv_wr32(dev, 0x402000, v);
  490. /* dump the error anyway for now: it's useful for
  491. Gallium development */
  492. unhandled = 1;
  493. } else {
  494. unhandled = 1;
  495. }
  496. if (unhandled && nouveau_ratelimit())
  497. nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR", &trap);
  498. }
  499. static inline void
  500. nouveau_pgraph_intr_context_switch(struct drm_device *dev)
  501. {
  502. struct drm_nouveau_private *dev_priv = dev->dev_private;
  503. struct nouveau_engine *engine = &dev_priv->engine;
  504. uint32_t chid;
  505. chid = engine->fifo.channel_id(dev);
  506. NV_DEBUG(dev, "PGRAPH context switch interrupt channel %x\n", chid);
  507. switch (dev_priv->card_type) {
  508. case NV_04:
  509. nv04_graph_context_switch(dev);
  510. break;
  511. case NV_10:
  512. nv10_graph_context_switch(dev);
  513. break;
  514. default:
  515. NV_ERROR(dev, "Context switch not implemented\n");
  516. break;
  517. }
  518. }
  519. static void
  520. nouveau_pgraph_irq_handler(struct drm_device *dev)
  521. {
  522. uint32_t status;
  523. while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
  524. uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
  525. if (status & NV_PGRAPH_INTR_NOTIFY) {
  526. nouveau_pgraph_intr_notify(dev, nsource);
  527. status &= ~NV_PGRAPH_INTR_NOTIFY;
  528. nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY);
  529. }
  530. if (status & NV_PGRAPH_INTR_ERROR) {
  531. nouveau_pgraph_intr_error(dev, nsource);
  532. status &= ~NV_PGRAPH_INTR_ERROR;
  533. nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_ERROR);
  534. }
  535. if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
  536. status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
  537. nv_wr32(dev, NV03_PGRAPH_INTR,
  538. NV_PGRAPH_INTR_CONTEXT_SWITCH);
  539. nouveau_pgraph_intr_context_switch(dev);
  540. }
  541. if (status) {
  542. NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status);
  543. nv_wr32(dev, NV03_PGRAPH_INTR, status);
  544. }
  545. if ((nv_rd32(dev, NV04_PGRAPH_FIFO) & (1 << 0)) == 0)
  546. nv_wr32(dev, NV04_PGRAPH_FIFO, 1);
  547. }
  548. nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
  549. }
  550. static struct nouveau_enum_names nv50_mp_exec_error_names[] =
  551. {
  552. { 3, "STACK_UNDERFLOW" },
  553. { 4, "QUADON_ACTIVE" },
  554. { 8, "TIMEOUT" },
  555. { 0x10, "INVALID_OPCODE" },
  556. { 0x40, "BREAKPOINT" },
  557. };
  558. static void
  559. nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
  560. {
  561. struct drm_nouveau_private *dev_priv = dev->dev_private;
  562. uint32_t units = nv_rd32(dev, 0x1540);
  563. uint32_t addr, mp10, status, pc, oplow, ophigh;
  564. int i;
  565. int mps = 0;
  566. for (i = 0; i < 4; i++) {
  567. if (!(units & 1 << (i+24)))
  568. continue;
  569. if (dev_priv->chipset < 0xa0)
  570. addr = 0x408200 + (tpid << 12) + (i << 7);
  571. else
  572. addr = 0x408100 + (tpid << 11) + (i << 7);
  573. mp10 = nv_rd32(dev, addr + 0x10);
  574. status = nv_rd32(dev, addr + 0x14);
  575. if (!status)
  576. continue;
  577. if (display) {
  578. nv_rd32(dev, addr + 0x20);
  579. pc = nv_rd32(dev, addr + 0x24);
  580. oplow = nv_rd32(dev, addr + 0x70);
  581. ophigh= nv_rd32(dev, addr + 0x74);
  582. NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
  583. "TP %d MP %d: ", tpid, i);
  584. nouveau_print_enum_names(status,
  585. nv50_mp_exec_error_names);
  586. printk(" at %06x warp %d, opcode %08x %08x\n",
  587. pc&0xffffff, pc >> 24,
  588. oplow, ophigh);
  589. }
  590. nv_wr32(dev, addr + 0x10, mp10);
  591. nv_wr32(dev, addr + 0x14, 0);
  592. mps++;
  593. }
  594. if (!mps && display)
  595. NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
  596. "No MPs claiming errors?\n", tpid);
  597. }
  598. static void
  599. nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
  600. uint32_t ustatus_new, int display, const char *name)
  601. {
  602. struct drm_nouveau_private *dev_priv = dev->dev_private;
  603. int tps = 0;
  604. uint32_t units = nv_rd32(dev, 0x1540);
  605. int i, r;
  606. uint32_t ustatus_addr, ustatus;
  607. for (i = 0; i < 16; i++) {
  608. if (!(units & (1 << i)))
  609. continue;
  610. if (dev_priv->chipset < 0xa0)
  611. ustatus_addr = ustatus_old + (i << 12);
  612. else
  613. ustatus_addr = ustatus_new + (i << 11);
  614. ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
  615. if (!ustatus)
  616. continue;
  617. tps++;
  618. switch (type) {
  619. case 6: /* texture error... unknown for now */
  620. nv50_fb_vm_trap(dev, display, name);
  621. if (display) {
  622. NV_ERROR(dev, "magic set %d:\n", i);
  623. for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
  624. NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
  625. nv_rd32(dev, r));
  626. }
  627. break;
  628. case 7: /* MP error */
  629. if (ustatus & 0x00010000) {
  630. nv50_pgraph_mp_trap(dev, i, display);
  631. ustatus &= ~0x00010000;
  632. }
  633. break;
  634. case 8: /* TPDMA error */
  635. {
  636. uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
  637. uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
  638. uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
  639. uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
  640. uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
  641. uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
  642. uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
  643. nv50_fb_vm_trap(dev, display, name);
  644. /* 2d engine destination */
  645. if (ustatus & 0x00000010) {
  646. if (display) {
  647. NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
  648. i, e14, e10);
  649. NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
  650. i, e0c, e18, e1c, e20, e24);
  651. }
  652. ustatus &= ~0x00000010;
  653. }
  654. /* Render target */
  655. if (ustatus & 0x00000040) {
  656. if (display) {
  657. NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
  658. i, e14, e10);
  659. NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
  660. i, e0c, e18, e1c, e20, e24);
  661. }
  662. ustatus &= ~0x00000040;
  663. }
  664. /* CUDA memory: l[], g[] or stack. */
  665. if (ustatus & 0x00000080) {
  666. if (display) {
  667. if (e18 & 0x80000000) {
  668. /* g[] read fault? */
  669. NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
  670. i, e14, e10 | ((e18 >> 24) & 0x1f));
  671. e18 &= ~0x1f000000;
  672. } else if (e18 & 0xc) {
  673. /* g[] write fault? */
  674. NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
  675. i, e14, e10 | ((e18 >> 7) & 0x1f));
  676. e18 &= ~0x00000f80;
  677. } else {
  678. NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
  679. i, e14, e10);
  680. }
  681. NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
  682. i, e0c, e18, e1c, e20, e24);
  683. }
  684. ustatus &= ~0x00000080;
  685. }
  686. }
  687. break;
  688. }
  689. if (ustatus) {
  690. if (display)
  691. NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
  692. }
  693. nv_wr32(dev, ustatus_addr, 0xc0000000);
  694. }
  695. if (!tps && display)
  696. NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
  697. }
  698. static void
  699. nv50_pgraph_trap_handler(struct drm_device *dev)
  700. {
  701. struct nouveau_pgraph_trap trap;
  702. uint32_t status = nv_rd32(dev, 0x400108);
  703. uint32_t ustatus;
  704. int display = nouveau_ratelimit();
  705. if (!status && display) {
  706. nouveau_graph_trap_info(dev, &trap);
  707. nouveau_graph_dump_trap_info(dev, "PGRAPH_TRAP", &trap);
  708. NV_INFO(dev, "PGRAPH_TRAP - no units reporting traps?\n");
  709. }
  710. /* DISPATCH: Relays commands to other units and handles NOTIFY,
  711. * COND, QUERY. If you get a trap from it, the command is still stuck
  712. * in DISPATCH and you need to do something about it. */
  713. if (status & 0x001) {
  714. ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
  715. if (!ustatus && display) {
  716. NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
  717. }
  718. /* Known to be triggered by screwed up NOTIFY and COND... */
  719. if (ustatus & 0x00000001) {
  720. nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT");
  721. nv_wr32(dev, 0x400500, 0);
  722. if (nv_rd32(dev, 0x400808) & 0x80000000) {
  723. if (display) {
  724. if (nouveau_graph_trapped_channel(dev, &trap.channel))
  725. trap.channel = -1;
  726. trap.class = nv_rd32(dev, 0x400814);
  727. trap.mthd = nv_rd32(dev, 0x400808) & 0x1ffc;
  728. trap.subc = (nv_rd32(dev, 0x400808) >> 16) & 0x7;
  729. trap.data = nv_rd32(dev, 0x40080c);
  730. trap.data2 = nv_rd32(dev, 0x400810);
  731. nouveau_graph_dump_trap_info(dev,
  732. "PGRAPH_TRAP_DISPATCH_FAULT", &trap);
  733. NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400808: %08x\n", nv_rd32(dev, 0x400808));
  734. NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400848: %08x\n", nv_rd32(dev, 0x400848));
  735. }
  736. nv_wr32(dev, 0x400808, 0);
  737. } else if (display) {
  738. NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - No stuck command?\n");
  739. }
  740. nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
  741. nv_wr32(dev, 0x400848, 0);
  742. ustatus &= ~0x00000001;
  743. }
  744. if (ustatus & 0x00000002) {
  745. nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY");
  746. nv_wr32(dev, 0x400500, 0);
  747. if (nv_rd32(dev, 0x40084c) & 0x80000000) {
  748. if (display) {
  749. if (nouveau_graph_trapped_channel(dev, &trap.channel))
  750. trap.channel = -1;
  751. trap.class = nv_rd32(dev, 0x400814);
  752. trap.mthd = nv_rd32(dev, 0x40084c) & 0x1ffc;
  753. trap.subc = (nv_rd32(dev, 0x40084c) >> 16) & 0x7;
  754. trap.data = nv_rd32(dev, 0x40085c);
  755. trap.data2 = 0;
  756. nouveau_graph_dump_trap_info(dev,
  757. "PGRAPH_TRAP_DISPATCH_QUERY", &trap);
  758. NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - 40084c: %08x\n", nv_rd32(dev, 0x40084c));
  759. }
  760. nv_wr32(dev, 0x40084c, 0);
  761. } else if (display) {
  762. NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - No stuck command?\n");
  763. }
  764. ustatus &= ~0x00000002;
  765. }
  766. if (ustatus && display)
  767. NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - Unhandled ustatus 0x%08x\n", ustatus);
  768. nv_wr32(dev, 0x400804, 0xc0000000);
  769. nv_wr32(dev, 0x400108, 0x001);
  770. status &= ~0x001;
  771. }
  772. /* TRAPs other than dispatch use the "normal" trap regs. */
  773. if (status && display) {
  774. nouveau_graph_trap_info(dev, &trap);
  775. nouveau_graph_dump_trap_info(dev,
  776. "PGRAPH_TRAP", &trap);
  777. }
  778. /* M2MF: Memory to memory copy engine. */
  779. if (status & 0x002) {
  780. ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
  781. if (!ustatus && display) {
  782. NV_INFO(dev, "PGRAPH_TRAP_M2MF - no ustatus?\n");
  783. }
  784. if (ustatus & 0x00000001) {
  785. nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY");
  786. ustatus &= ~0x00000001;
  787. }
  788. if (ustatus & 0x00000002) {
  789. nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN");
  790. ustatus &= ~0x00000002;
  791. }
  792. if (ustatus & 0x00000004) {
  793. nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT");
  794. ustatus &= ~0x00000004;
  795. }
  796. NV_INFO (dev, "PGRAPH_TRAP_M2MF - %08x %08x %08x %08x\n",
  797. nv_rd32(dev, 0x406804),
  798. nv_rd32(dev, 0x406808),
  799. nv_rd32(dev, 0x40680c),
  800. nv_rd32(dev, 0x406810));
  801. if (ustatus && display)
  802. NV_INFO(dev, "PGRAPH_TRAP_M2MF - Unhandled ustatus 0x%08x\n", ustatus);
  803. /* No sane way found yet -- just reset the bugger. */
  804. nv_wr32(dev, 0x400040, 2);
  805. nv_wr32(dev, 0x400040, 0);
  806. nv_wr32(dev, 0x406800, 0xc0000000);
  807. nv_wr32(dev, 0x400108, 0x002);
  808. status &= ~0x002;
  809. }
  810. /* VFETCH: Fetches data from vertex buffers. */
  811. if (status & 0x004) {
  812. ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
  813. if (!ustatus && display) {
  814. NV_INFO(dev, "PGRAPH_TRAP_VFETCH - no ustatus?\n");
  815. }
  816. if (ustatus & 0x00000001) {
  817. nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT");
  818. NV_INFO (dev, "PGRAPH_TRAP_VFETCH_FAULT - %08x %08x %08x %08x\n",
  819. nv_rd32(dev, 0x400c00),
  820. nv_rd32(dev, 0x400c08),
  821. nv_rd32(dev, 0x400c0c),
  822. nv_rd32(dev, 0x400c10));
  823. ustatus &= ~0x00000001;
  824. }
  825. if (ustatus && display)
  826. NV_INFO(dev, "PGRAPH_TRAP_VFETCH - Unhandled ustatus 0x%08x\n", ustatus);
  827. nv_wr32(dev, 0x400c04, 0xc0000000);
  828. nv_wr32(dev, 0x400108, 0x004);
  829. status &= ~0x004;
  830. }
  831. /* STRMOUT: DirectX streamout / OpenGL transform feedback. */
  832. if (status & 0x008) {
  833. ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
  834. if (!ustatus && display) {
  835. NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - no ustatus?\n");
  836. }
  837. if (ustatus & 0x00000001) {
  838. nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT");
  839. NV_INFO (dev, "PGRAPH_TRAP_STRMOUT_FAULT - %08x %08x %08x %08x\n",
  840. nv_rd32(dev, 0x401804),
  841. nv_rd32(dev, 0x401808),
  842. nv_rd32(dev, 0x40180c),
  843. nv_rd32(dev, 0x401810));
  844. ustatus &= ~0x00000001;
  845. }
  846. if (ustatus && display)
  847. NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - Unhandled ustatus 0x%08x\n", ustatus);
  848. /* No sane way found yet -- just reset the bugger. */
  849. nv_wr32(dev, 0x400040, 0x80);
  850. nv_wr32(dev, 0x400040, 0);
  851. nv_wr32(dev, 0x401800, 0xc0000000);
  852. nv_wr32(dev, 0x400108, 0x008);
  853. status &= ~0x008;
  854. }
  855. /* CCACHE: Handles code and c[] caches and fills them. */
  856. if (status & 0x010) {
  857. ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
  858. if (!ustatus && display) {
  859. NV_INFO(dev, "PGRAPH_TRAP_CCACHE - no ustatus?\n");
  860. }
  861. if (ustatus & 0x00000001) {
  862. nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT");
  863. NV_INFO (dev, "PGRAPH_TRAP_CCACHE_FAULT - %08x %08x %08x %08x %08x %08x %08x\n",
  864. nv_rd32(dev, 0x405800),
  865. nv_rd32(dev, 0x405804),
  866. nv_rd32(dev, 0x405808),
  867. nv_rd32(dev, 0x40580c),
  868. nv_rd32(dev, 0x405810),
  869. nv_rd32(dev, 0x405814),
  870. nv_rd32(dev, 0x40581c));
  871. ustatus &= ~0x00000001;
  872. }
  873. if (ustatus && display)
  874. NV_INFO(dev, "PGRAPH_TRAP_CCACHE - Unhandled ustatus 0x%08x\n", ustatus);
  875. nv_wr32(dev, 0x405018, 0xc0000000);
  876. nv_wr32(dev, 0x400108, 0x010);
  877. status &= ~0x010;
  878. }
  879. /* Unknown, not seen yet... 0x402000 is the only trap status reg
  880. * remaining, so try to handle it anyway. Perhaps related to that
  881. * unknown DMA slot on tesla? */
  882. if (status & 0x20) {
  883. nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04");
  884. ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
  885. if (display)
  886. NV_INFO(dev, "PGRAPH_TRAP_UNKC04 - Unhandled ustatus 0x%08x\n", ustatus);
  887. nv_wr32(dev, 0x402000, 0xc0000000);
  888. /* no status modifiction on purpose */
  889. }
  890. /* TEXTURE: CUDA texturing units */
  891. if (status & 0x040) {
  892. nv50_pgraph_tp_trap (dev, 6, 0x408900, 0x408600, display,
  893. "PGRAPH_TRAP_TEXTURE");
  894. nv_wr32(dev, 0x400108, 0x040);
  895. status &= ~0x040;
  896. }
  897. /* MP: CUDA execution engines. */
  898. if (status & 0x080) {
  899. nv50_pgraph_tp_trap (dev, 7, 0x408314, 0x40831c, display,
  900. "PGRAPH_TRAP_MP");
  901. nv_wr32(dev, 0x400108, 0x080);
  902. status &= ~0x080;
  903. }
  904. /* TPDMA: Handles TP-initiated uncached memory accesses:
  905. * l[], g[], stack, 2d surfaces, render targets. */
  906. if (status & 0x100) {
  907. nv50_pgraph_tp_trap (dev, 8, 0x408e08, 0x408708, display,
  908. "PGRAPH_TRAP_TPDMA");
  909. nv_wr32(dev, 0x400108, 0x100);
  910. status &= ~0x100;
  911. }
  912. if (status) {
  913. if (display)
  914. NV_INFO(dev, "PGRAPH_TRAP - Unknown trap 0x%08x\n",
  915. status);
  916. nv_wr32(dev, 0x400108, status);
  917. }
  918. }
  919. /* There must be a *lot* of these. Will take some time to gather them up. */
  920. static struct nouveau_enum_names nv50_data_error_names[] =
  921. {
  922. { 4, "INVALID_VALUE" },
  923. { 5, "INVALID_ENUM" },
  924. { 8, "INVALID_OBJECT" },
  925. { 0xc, "INVALID_BITFIELD" },
  926. { 0x28, "MP_NO_REG_SPACE" },
  927. { 0x2b, "MP_BLOCK_SIZE_MISMATCH" },
  928. };
  929. static void
  930. nv50_pgraph_irq_handler(struct drm_device *dev)
  931. {
  932. struct nouveau_pgraph_trap trap;
  933. int unhandled = 0;
  934. uint32_t status;
  935. while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
  936. /* NOTIFY: You've set a NOTIFY an a command and it's done. */
  937. if (status & 0x00000001) {
  938. nouveau_graph_trap_info(dev, &trap);
  939. if (nouveau_ratelimit())
  940. nouveau_graph_dump_trap_info(dev,
  941. "PGRAPH_NOTIFY", &trap);
  942. status &= ~0x00000001;
  943. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
  944. }
  945. /* COMPUTE_QUERY: Purpose and exact cause unknown, happens
  946. * when you write 0x200 to 0x50c0 method 0x31c. */
  947. if (status & 0x00000002) {
  948. nouveau_graph_trap_info(dev, &trap);
  949. if (nouveau_ratelimit())
  950. nouveau_graph_dump_trap_info(dev,
  951. "PGRAPH_COMPUTE_QUERY", &trap);
  952. status &= ~0x00000002;
  953. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000002);
  954. }
  955. /* Unknown, never seen: 0x4 */
  956. /* ILLEGAL_MTHD: You used a wrong method for this class. */
  957. if (status & 0x00000010) {
  958. nouveau_graph_trap_info(dev, &trap);
  959. if (nouveau_pgraph_intr_swmthd(dev, &trap))
  960. unhandled = 1;
  961. if (unhandled && nouveau_ratelimit())
  962. nouveau_graph_dump_trap_info(dev,
  963. "PGRAPH_ILLEGAL_MTHD", &trap);
  964. status &= ~0x00000010;
  965. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
  966. }
  967. /* ILLEGAL_CLASS: You used a wrong class. */
  968. if (status & 0x00000020) {
  969. nouveau_graph_trap_info(dev, &trap);
  970. if (nouveau_ratelimit())
  971. nouveau_graph_dump_trap_info(dev,
  972. "PGRAPH_ILLEGAL_CLASS", &trap);
  973. status &= ~0x00000020;
  974. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000020);
  975. }
  976. /* DOUBLE_NOTIFY: You tried to set a NOTIFY on another NOTIFY. */
  977. if (status & 0x00000040) {
  978. nouveau_graph_trap_info(dev, &trap);
  979. if (nouveau_ratelimit())
  980. nouveau_graph_dump_trap_info(dev,
  981. "PGRAPH_DOUBLE_NOTIFY", &trap);
  982. status &= ~0x00000040;
  983. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000040);
  984. }
  985. /* CONTEXT_SWITCH: PGRAPH needs us to load a new context */
  986. if (status & 0x00001000) {
  987. nv_wr32(dev, 0x400500, 0x00000000);
  988. nv_wr32(dev, NV03_PGRAPH_INTR,
  989. NV_PGRAPH_INTR_CONTEXT_SWITCH);
  990. nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
  991. NV40_PGRAPH_INTR_EN) &
  992. ~NV_PGRAPH_INTR_CONTEXT_SWITCH);
  993. nv_wr32(dev, 0x400500, 0x00010001);
  994. nv50_graph_context_switch(dev);
  995. status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
  996. }
  997. /* BUFFER_NOTIFY: Your m2mf transfer finished */
  998. if (status & 0x00010000) {
  999. nouveau_graph_trap_info(dev, &trap);
  1000. if (nouveau_ratelimit())
  1001. nouveau_graph_dump_trap_info(dev,
  1002. "PGRAPH_BUFFER_NOTIFY", &trap);
  1003. status &= ~0x00010000;
  1004. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00010000);
  1005. }
  1006. /* DATA_ERROR: Invalid value for this method, or invalid
  1007. * state in current PGRAPH context for this operation */
  1008. if (status & 0x00100000) {
  1009. nouveau_graph_trap_info(dev, &trap);
  1010. if (nouveau_ratelimit()) {
  1011. nouveau_graph_dump_trap_info(dev,
  1012. "PGRAPH_DATA_ERROR", &trap);
  1013. NV_INFO (dev, "PGRAPH_DATA_ERROR - ");
  1014. nouveau_print_enum_names(nv_rd32(dev, 0x400110),
  1015. nv50_data_error_names);
  1016. printk("\n");
  1017. }
  1018. status &= ~0x00100000;
  1019. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
  1020. }
  1021. /* TRAP: Something bad happened in the middle of command
  1022. * execution. Has a billion types, subtypes, and even
  1023. * subsubtypes. */
  1024. if (status & 0x00200000) {
  1025. nv50_pgraph_trap_handler(dev);
  1026. status &= ~0x00200000;
  1027. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
  1028. }
  1029. /* Unknown, never seen: 0x00400000 */
  1030. /* SINGLE_STEP: Happens on every method if you turned on
  1031. * single stepping in 40008c */
  1032. if (status & 0x01000000) {
  1033. nouveau_graph_trap_info(dev, &trap);
  1034. if (nouveau_ratelimit())
  1035. nouveau_graph_dump_trap_info(dev,
  1036. "PGRAPH_SINGLE_STEP", &trap);
  1037. status &= ~0x01000000;
  1038. nv_wr32(dev, NV03_PGRAPH_INTR, 0x01000000);
  1039. }
  1040. /* 0x02000000 happens when you pause a ctxprog...
  1041. * but the only way this can happen that I know is by
  1042. * poking the relevant MMIO register, and we don't
  1043. * do that. */
  1044. if (status) {
  1045. NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n",
  1046. status);
  1047. nv_wr32(dev, NV03_PGRAPH_INTR, status);
  1048. }
  1049. {
  1050. const int isb = (1 << 16) | (1 << 0);
  1051. if ((nv_rd32(dev, 0x400500) & isb) != isb)
  1052. nv_wr32(dev, 0x400500,
  1053. nv_rd32(dev, 0x400500) | isb);
  1054. }
  1055. }
  1056. nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
  1057. if (nv_rd32(dev, 0x400824) & (1 << 31))
  1058. nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
  1059. }
  1060. static void
  1061. nouveau_crtc_irq_handler(struct drm_device *dev, int crtc)
  1062. {
  1063. if (crtc & 1)
  1064. nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
  1065. if (crtc & 2)
  1066. nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
  1067. }
  1068. irqreturn_t
  1069. nouveau_irq_handler(DRM_IRQ_ARGS)
  1070. {
  1071. struct drm_device *dev = (struct drm_device *)arg;
  1072. struct drm_nouveau_private *dev_priv = dev->dev_private;
  1073. uint32_t status;
  1074. unsigned long flags;
  1075. status = nv_rd32(dev, NV03_PMC_INTR_0);
  1076. if (!status)
  1077. return IRQ_NONE;
  1078. spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
  1079. if (status & NV_PMC_INTR_0_PFIFO_PENDING) {
  1080. nouveau_fifo_irq_handler(dev);
  1081. status &= ~NV_PMC_INTR_0_PFIFO_PENDING;
  1082. }
  1083. if (status & NV_PMC_INTR_0_PGRAPH_PENDING) {
  1084. if (dev_priv->card_type >= NV_50)
  1085. nv50_pgraph_irq_handler(dev);
  1086. else
  1087. nouveau_pgraph_irq_handler(dev);
  1088. status &= ~NV_PMC_INTR_0_PGRAPH_PENDING;
  1089. }
  1090. if (status & NV_PMC_INTR_0_CRTCn_PENDING) {
  1091. nouveau_crtc_irq_handler(dev, (status>>24)&3);
  1092. status &= ~NV_PMC_INTR_0_CRTCn_PENDING;
  1093. }
  1094. if (status & (NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
  1095. NV_PMC_INTR_0_NV50_I2C_PENDING)) {
  1096. nv50_display_irq_handler(dev);
  1097. status &= ~(NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
  1098. NV_PMC_INTR_0_NV50_I2C_PENDING);
  1099. }
  1100. if (status)
  1101. NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status);
  1102. spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
  1103. return IRQ_HANDLED;
  1104. }