i915_gpu_error.c 48 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863
  1. /*
  2. * Copyright (c) 2008 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. * Keith Packard <keithp@keithp.com>
  26. * Mika Kuoppala <mika.kuoppala@intel.com>
  27. *
  28. */
  29. #include <generated/utsrelease.h>
  30. #include <linux/stop_machine.h>
  31. #include <linux/zlib.h>
  32. #include <drm/drm_print.h>
  33. #include "i915_drv.h"
  34. static const char *engine_str(int engine)
  35. {
  36. switch (engine) {
  37. case RCS: return "render";
  38. case VCS: return "bsd";
  39. case BCS: return "blt";
  40. case VECS: return "vebox";
  41. case VCS2: return "bsd2";
  42. default: return "";
  43. }
  44. }
  45. static const char *tiling_flag(int tiling)
  46. {
  47. switch (tiling) {
  48. default:
  49. case I915_TILING_NONE: return "";
  50. case I915_TILING_X: return " X";
  51. case I915_TILING_Y: return " Y";
  52. }
  53. }
  54. static const char *dirty_flag(int dirty)
  55. {
  56. return dirty ? " dirty" : "";
  57. }
  58. static const char *purgeable_flag(int purgeable)
  59. {
  60. return purgeable ? " purgeable" : "";
  61. }
  62. static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
  63. {
  64. if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
  65. e->err = -ENOSPC;
  66. return false;
  67. }
  68. if (e->bytes == e->size - 1 || e->err)
  69. return false;
  70. return true;
  71. }
  72. static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
  73. unsigned len)
  74. {
  75. if (e->pos + len <= e->start) {
  76. e->pos += len;
  77. return false;
  78. }
  79. /* First vsnprintf needs to fit in its entirety for memmove */
  80. if (len >= e->size) {
  81. e->err = -EIO;
  82. return false;
  83. }
  84. return true;
  85. }
  86. static void __i915_error_advance(struct drm_i915_error_state_buf *e,
  87. unsigned len)
  88. {
  89. /* If this is first printf in this window, adjust it so that
  90. * start position matches start of the buffer
  91. */
  92. if (e->pos < e->start) {
  93. const size_t off = e->start - e->pos;
  94. /* Should not happen but be paranoid */
  95. if (off > len || e->bytes) {
  96. e->err = -EIO;
  97. return;
  98. }
  99. memmove(e->buf, e->buf + off, len - off);
  100. e->bytes = len - off;
  101. e->pos = e->start;
  102. return;
  103. }
  104. e->bytes += len;
  105. e->pos += len;
  106. }
  107. __printf(2, 0)
  108. static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
  109. const char *f, va_list args)
  110. {
  111. unsigned len;
  112. if (!__i915_error_ok(e))
  113. return;
  114. /* Seek the first printf which is hits start position */
  115. if (e->pos < e->start) {
  116. va_list tmp;
  117. va_copy(tmp, args);
  118. len = vsnprintf(NULL, 0, f, tmp);
  119. va_end(tmp);
  120. if (!__i915_error_seek(e, len))
  121. return;
  122. }
  123. len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
  124. if (len >= e->size - e->bytes)
  125. len = e->size - e->bytes - 1;
  126. __i915_error_advance(e, len);
  127. }
  128. static void i915_error_puts(struct drm_i915_error_state_buf *e,
  129. const char *str)
  130. {
  131. unsigned len;
  132. if (!__i915_error_ok(e))
  133. return;
  134. len = strlen(str);
  135. /* Seek the first printf which is hits start position */
  136. if (e->pos < e->start) {
  137. if (!__i915_error_seek(e, len))
  138. return;
  139. }
  140. if (len >= e->size - e->bytes)
  141. len = e->size - e->bytes - 1;
  142. memcpy(e->buf + e->bytes, str, len);
  143. __i915_error_advance(e, len);
  144. }
  145. #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
  146. #define err_puts(e, s) i915_error_puts(e, s)
  147. static void __i915_printfn_error(struct drm_printer *p, struct va_format *vaf)
  148. {
  149. i915_error_vprintf(p->arg, vaf->fmt, *vaf->va);
  150. }
  151. static inline struct drm_printer
  152. i915_error_printer(struct drm_i915_error_state_buf *e)
  153. {
  154. struct drm_printer p = {
  155. .printfn = __i915_printfn_error,
  156. .arg = e,
  157. };
  158. return p;
  159. }
  160. #ifdef CONFIG_DRM_I915_COMPRESS_ERROR
  161. struct compress {
  162. struct z_stream_s zstream;
  163. void *tmp;
  164. };
  165. static bool compress_init(struct compress *c)
  166. {
  167. struct z_stream_s *zstream = memset(&c->zstream, 0, sizeof(c->zstream));
  168. zstream->workspace =
  169. kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
  170. GFP_ATOMIC | __GFP_NOWARN);
  171. if (!zstream->workspace)
  172. return false;
  173. if (zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) != Z_OK) {
  174. kfree(zstream->workspace);
  175. return false;
  176. }
  177. c->tmp = NULL;
  178. if (i915_has_memcpy_from_wc())
  179. c->tmp = (void *)__get_free_page(GFP_ATOMIC | __GFP_NOWARN);
  180. return true;
  181. }
  182. static int compress_page(struct compress *c,
  183. void *src,
  184. struct drm_i915_error_object *dst)
  185. {
  186. struct z_stream_s *zstream = &c->zstream;
  187. zstream->next_in = src;
  188. if (c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
  189. zstream->next_in = c->tmp;
  190. zstream->avail_in = PAGE_SIZE;
  191. do {
  192. if (zstream->avail_out == 0) {
  193. unsigned long page;
  194. page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
  195. if (!page)
  196. return -ENOMEM;
  197. dst->pages[dst->page_count++] = (void *)page;
  198. zstream->next_out = (void *)page;
  199. zstream->avail_out = PAGE_SIZE;
  200. }
  201. if (zlib_deflate(zstream, Z_SYNC_FLUSH) != Z_OK)
  202. return -EIO;
  203. } while (zstream->avail_in);
  204. /* Fallback to uncompressed if we increase size? */
  205. if (0 && zstream->total_out > zstream->total_in)
  206. return -E2BIG;
  207. return 0;
  208. }
  209. static void compress_fini(struct compress *c,
  210. struct drm_i915_error_object *dst)
  211. {
  212. struct z_stream_s *zstream = &c->zstream;
  213. if (dst) {
  214. zlib_deflate(zstream, Z_FINISH);
  215. dst->unused = zstream->avail_out;
  216. }
  217. zlib_deflateEnd(zstream);
  218. kfree(zstream->workspace);
  219. if (c->tmp)
  220. free_page((unsigned long)c->tmp);
  221. }
  222. static void err_compression_marker(struct drm_i915_error_state_buf *m)
  223. {
  224. err_puts(m, ":");
  225. }
  226. #else
  227. struct compress {
  228. };
  229. static bool compress_init(struct compress *c)
  230. {
  231. return true;
  232. }
  233. static int compress_page(struct compress *c,
  234. void *src,
  235. struct drm_i915_error_object *dst)
  236. {
  237. unsigned long page;
  238. void *ptr;
  239. page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
  240. if (!page)
  241. return -ENOMEM;
  242. ptr = (void *)page;
  243. if (!i915_memcpy_from_wc(ptr, src, PAGE_SIZE))
  244. memcpy(ptr, src, PAGE_SIZE);
  245. dst->pages[dst->page_count++] = ptr;
  246. return 0;
  247. }
  248. static void compress_fini(struct compress *c,
  249. struct drm_i915_error_object *dst)
  250. {
  251. }
  252. static void err_compression_marker(struct drm_i915_error_state_buf *m)
  253. {
  254. err_puts(m, "~");
  255. }
  256. #endif
  257. static void print_error_buffers(struct drm_i915_error_state_buf *m,
  258. const char *name,
  259. struct drm_i915_error_buffer *err,
  260. int count)
  261. {
  262. int i;
  263. err_printf(m, "%s [%d]:\n", name, count);
  264. while (count--) {
  265. err_printf(m, " %08x_%08x %8u %02x %02x [ ",
  266. upper_32_bits(err->gtt_offset),
  267. lower_32_bits(err->gtt_offset),
  268. err->size,
  269. err->read_domains,
  270. err->write_domain);
  271. for (i = 0; i < I915_NUM_ENGINES; i++)
  272. err_printf(m, "%02x ", err->rseqno[i]);
  273. err_printf(m, "] %02x", err->wseqno);
  274. err_puts(m, tiling_flag(err->tiling));
  275. err_puts(m, dirty_flag(err->dirty));
  276. err_puts(m, purgeable_flag(err->purgeable));
  277. err_puts(m, err->userptr ? " userptr" : "");
  278. err_puts(m, err->engine != -1 ? " " : "");
  279. err_puts(m, engine_str(err->engine));
  280. err_puts(m, i915_cache_level_str(m->i915, err->cache_level));
  281. if (err->name)
  282. err_printf(m, " (name: %d)", err->name);
  283. if (err->fence_reg != I915_FENCE_REG_NONE)
  284. err_printf(m, " (fence: %d)", err->fence_reg);
  285. err_puts(m, "\n");
  286. err++;
  287. }
  288. }
  289. static void error_print_instdone(struct drm_i915_error_state_buf *m,
  290. const struct drm_i915_error_engine *ee)
  291. {
  292. int slice;
  293. int subslice;
  294. err_printf(m, " INSTDONE: 0x%08x\n",
  295. ee->instdone.instdone);
  296. if (ee->engine_id != RCS || INTEL_GEN(m->i915) <= 3)
  297. return;
  298. err_printf(m, " SC_INSTDONE: 0x%08x\n",
  299. ee->instdone.slice_common);
  300. if (INTEL_GEN(m->i915) <= 6)
  301. return;
  302. for_each_instdone_slice_subslice(m->i915, slice, subslice)
  303. err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
  304. slice, subslice,
  305. ee->instdone.sampler[slice][subslice]);
  306. for_each_instdone_slice_subslice(m->i915, slice, subslice)
  307. err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n",
  308. slice, subslice,
  309. ee->instdone.row[slice][subslice]);
  310. }
  311. static void error_print_request(struct drm_i915_error_state_buf *m,
  312. const char *prefix,
  313. const struct drm_i915_error_request *erq)
  314. {
  315. if (!erq->seqno)
  316. return;
  317. err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x, prio %d, emitted %dms ago, head %08x, tail %08x\n",
  318. prefix, erq->pid, erq->ban_score,
  319. erq->context, erq->seqno, erq->priority,
  320. jiffies_to_msecs(jiffies - erq->jiffies),
  321. erq->head, erq->tail);
  322. }
  323. static void error_print_context(struct drm_i915_error_state_buf *m,
  324. const char *header,
  325. const struct drm_i915_error_context *ctx)
  326. {
  327. err_printf(m, "%s%s[%d] user_handle %d hw_id %d, prio %d, ban score %d guilty %d active %d\n",
  328. header, ctx->comm, ctx->pid, ctx->handle, ctx->hw_id,
  329. ctx->priority, ctx->ban_score, ctx->guilty, ctx->active);
  330. }
  331. static void error_print_engine(struct drm_i915_error_state_buf *m,
  332. const struct drm_i915_error_engine *ee)
  333. {
  334. int n;
  335. err_printf(m, "%s command stream:\n", engine_str(ee->engine_id));
  336. err_printf(m, " IDLE?: %s\n", yesno(ee->idle));
  337. err_printf(m, " START: 0x%08x\n", ee->start);
  338. err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head);
  339. err_printf(m, " TAIL: 0x%08x [0x%08x, 0x%08x]\n",
  340. ee->tail, ee->rq_post, ee->rq_tail);
  341. err_printf(m, " CTL: 0x%08x\n", ee->ctl);
  342. err_printf(m, " MODE: 0x%08x\n", ee->mode);
  343. err_printf(m, " HWS: 0x%08x\n", ee->hws);
  344. err_printf(m, " ACTHD: 0x%08x %08x\n",
  345. (u32)(ee->acthd>>32), (u32)ee->acthd);
  346. err_printf(m, " IPEIR: 0x%08x\n", ee->ipeir);
  347. err_printf(m, " IPEHR: 0x%08x\n", ee->ipehr);
  348. error_print_instdone(m, ee);
  349. if (ee->batchbuffer) {
  350. u64 start = ee->batchbuffer->gtt_offset;
  351. u64 end = start + ee->batchbuffer->gtt_size;
  352. err_printf(m, " batch: [0x%08x_%08x, 0x%08x_%08x]\n",
  353. upper_32_bits(start), lower_32_bits(start),
  354. upper_32_bits(end), lower_32_bits(end));
  355. }
  356. if (INTEL_GEN(m->i915) >= 4) {
  357. err_printf(m, " BBADDR: 0x%08x_%08x\n",
  358. (u32)(ee->bbaddr>>32), (u32)ee->bbaddr);
  359. err_printf(m, " BB_STATE: 0x%08x\n", ee->bbstate);
  360. err_printf(m, " INSTPS: 0x%08x\n", ee->instps);
  361. }
  362. err_printf(m, " INSTPM: 0x%08x\n", ee->instpm);
  363. err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ee->faddr),
  364. lower_32_bits(ee->faddr));
  365. if (INTEL_GEN(m->i915) >= 6) {
  366. err_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi);
  367. err_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg);
  368. err_printf(m, " SYNC_0: 0x%08x\n",
  369. ee->semaphore_mboxes[0]);
  370. err_printf(m, " SYNC_1: 0x%08x\n",
  371. ee->semaphore_mboxes[1]);
  372. if (HAS_VEBOX(m->i915))
  373. err_printf(m, " SYNC_2: 0x%08x\n",
  374. ee->semaphore_mboxes[2]);
  375. }
  376. if (USES_PPGTT(m->i915)) {
  377. err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
  378. if (INTEL_GEN(m->i915) >= 8) {
  379. int i;
  380. for (i = 0; i < 4; i++)
  381. err_printf(m, " PDP%d: 0x%016llx\n",
  382. i, ee->vm_info.pdp[i]);
  383. } else {
  384. err_printf(m, " PP_DIR_BASE: 0x%08x\n",
  385. ee->vm_info.pp_dir_base);
  386. }
  387. }
  388. err_printf(m, " seqno: 0x%08x\n", ee->seqno);
  389. err_printf(m, " last_seqno: 0x%08x\n", ee->last_seqno);
  390. err_printf(m, " waiting: %s\n", yesno(ee->waiting));
  391. err_printf(m, " ring->head: 0x%08x\n", ee->cpu_ring_head);
  392. err_printf(m, " ring->tail: 0x%08x\n", ee->cpu_ring_tail);
  393. err_printf(m, " hangcheck stall: %s\n", yesno(ee->hangcheck_stalled));
  394. err_printf(m, " hangcheck action: %s\n",
  395. hangcheck_action_to_str(ee->hangcheck_action));
  396. err_printf(m, " hangcheck action timestamp: %lu, %u ms ago\n",
  397. ee->hangcheck_timestamp,
  398. jiffies_to_msecs(jiffies - ee->hangcheck_timestamp));
  399. err_printf(m, " engine reset count: %u\n", ee->reset_count);
  400. for (n = 0; n < ee->num_ports; n++) {
  401. err_printf(m, " ELSP[%d]:", n);
  402. error_print_request(m, " ", &ee->execlist[n]);
  403. }
  404. error_print_context(m, " Active context: ", &ee->context);
  405. }
  406. void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
  407. {
  408. va_list args;
  409. va_start(args, f);
  410. i915_error_vprintf(e, f, args);
  411. va_end(args);
  412. }
  413. static int
  414. ascii85_encode_len(int len)
  415. {
  416. return DIV_ROUND_UP(len, 4);
  417. }
  418. static bool
  419. ascii85_encode(u32 in, char *out)
  420. {
  421. int i;
  422. if (in == 0)
  423. return false;
  424. out[5] = '\0';
  425. for (i = 5; i--; ) {
  426. out[i] = '!' + in % 85;
  427. in /= 85;
  428. }
  429. return true;
  430. }
  431. static void print_error_obj(struct drm_i915_error_state_buf *m,
  432. struct intel_engine_cs *engine,
  433. const char *name,
  434. struct drm_i915_error_object *obj)
  435. {
  436. char out[6];
  437. int page;
  438. if (!obj)
  439. return;
  440. if (name) {
  441. err_printf(m, "%s --- %s = 0x%08x %08x\n",
  442. engine ? engine->name : "global", name,
  443. upper_32_bits(obj->gtt_offset),
  444. lower_32_bits(obj->gtt_offset));
  445. }
  446. err_compression_marker(m);
  447. for (page = 0; page < obj->page_count; page++) {
  448. int i, len;
  449. len = PAGE_SIZE;
  450. if (page == obj->page_count - 1)
  451. len -= obj->unused;
  452. len = ascii85_encode_len(len);
  453. for (i = 0; i < len; i++) {
  454. if (ascii85_encode(obj->pages[page][i], out))
  455. err_puts(m, out);
  456. else
  457. err_puts(m, "z");
  458. }
  459. }
  460. err_puts(m, "\n");
  461. }
  462. static void err_print_capabilities(struct drm_i915_error_state_buf *m,
  463. const struct intel_device_info *info)
  464. {
  465. struct drm_printer p = i915_error_printer(m);
  466. intel_device_info_dump_flags(info, &p);
  467. }
  468. static void err_print_params(struct drm_i915_error_state_buf *m,
  469. const struct i915_params *params)
  470. {
  471. struct drm_printer p = i915_error_printer(m);
  472. i915_params_dump(params, &p);
  473. }
  474. static void err_print_pciid(struct drm_i915_error_state_buf *m,
  475. struct drm_i915_private *i915)
  476. {
  477. struct pci_dev *pdev = i915->drm.pdev;
  478. err_printf(m, "PCI ID: 0x%04x\n", pdev->device);
  479. err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision);
  480. err_printf(m, "PCI Subsystem: %04x:%04x\n",
  481. pdev->subsystem_vendor,
  482. pdev->subsystem_device);
  483. }
  484. static void err_print_uc(struct drm_i915_error_state_buf *m,
  485. const struct i915_error_uc *error_uc)
  486. {
  487. struct drm_printer p = i915_error_printer(m);
  488. const struct i915_gpu_state *error =
  489. container_of(error_uc, typeof(*error), uc);
  490. if (!error->device_info.has_guc)
  491. return;
  492. intel_uc_fw_dump(&error_uc->guc_fw, &p);
  493. intel_uc_fw_dump(&error_uc->huc_fw, &p);
  494. print_error_obj(m, NULL, "GuC log buffer", error_uc->guc_log);
  495. }
  496. int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
  497. const struct i915_gpu_state *error)
  498. {
  499. struct drm_i915_private *dev_priv = m->i915;
  500. struct drm_i915_error_object *obj;
  501. int i, j;
  502. if (!error) {
  503. err_printf(m, "No error state collected\n");
  504. return 0;
  505. }
  506. if (*error->error_msg)
  507. err_printf(m, "%s\n", error->error_msg);
  508. err_printf(m, "Kernel: " UTS_RELEASE "\n");
  509. err_printf(m, "Time: %ld s %ld us\n",
  510. error->time.tv_sec, error->time.tv_usec);
  511. err_printf(m, "Boottime: %ld s %ld us\n",
  512. error->boottime.tv_sec, error->boottime.tv_usec);
  513. err_printf(m, "Uptime: %ld s %ld us\n",
  514. error->uptime.tv_sec, error->uptime.tv_usec);
  515. for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
  516. if (error->engine[i].hangcheck_stalled &&
  517. error->engine[i].context.pid) {
  518. err_printf(m, "Active process (on ring %s): %s [%d], score %d\n",
  519. engine_str(i),
  520. error->engine[i].context.comm,
  521. error->engine[i].context.pid,
  522. error->engine[i].context.ban_score);
  523. }
  524. }
  525. err_printf(m, "Reset count: %u\n", error->reset_count);
  526. err_printf(m, "Suspend count: %u\n", error->suspend_count);
  527. err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform));
  528. err_print_pciid(m, error->i915);
  529. err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
  530. if (HAS_CSR(dev_priv)) {
  531. struct intel_csr *csr = &dev_priv->csr;
  532. err_printf(m, "DMC loaded: %s\n",
  533. yesno(csr->dmc_payload != NULL));
  534. err_printf(m, "DMC fw version: %d.%d\n",
  535. CSR_VERSION_MAJOR(csr->version),
  536. CSR_VERSION_MINOR(csr->version));
  537. }
  538. err_printf(m, "GT awake: %s\n", yesno(error->awake));
  539. err_printf(m, "RPM wakelock: %s\n", yesno(error->wakelock));
  540. err_printf(m, "PM suspended: %s\n", yesno(error->suspended));
  541. err_printf(m, "EIR: 0x%08x\n", error->eir);
  542. err_printf(m, "IER: 0x%08x\n", error->ier);
  543. for (i = 0; i < error->ngtier; i++)
  544. err_printf(m, "GTIER[%d]: 0x%08x\n", i, error->gtier[i]);
  545. err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
  546. err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
  547. err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
  548. err_printf(m, "CCID: 0x%08x\n", error->ccid);
  549. err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings);
  550. for (i = 0; i < error->nfence; i++)
  551. err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
  552. if (INTEL_GEN(dev_priv) >= 6) {
  553. err_printf(m, "ERROR: 0x%08x\n", error->error);
  554. if (INTEL_GEN(dev_priv) >= 8)
  555. err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
  556. error->fault_data1, error->fault_data0);
  557. err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
  558. }
  559. if (IS_GEN7(dev_priv))
  560. err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
  561. for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
  562. if (error->engine[i].engine_id != -1)
  563. error_print_engine(m, &error->engine[i]);
  564. }
  565. for (i = 0; i < ARRAY_SIZE(error->active_vm); i++) {
  566. char buf[128];
  567. int len, first = 1;
  568. if (!error->active_vm[i])
  569. break;
  570. len = scnprintf(buf, sizeof(buf), "Active (");
  571. for (j = 0; j < ARRAY_SIZE(error->engine); j++) {
  572. if (error->engine[j].vm != error->active_vm[i])
  573. continue;
  574. len += scnprintf(buf + len, sizeof(buf), "%s%s",
  575. first ? "" : ", ",
  576. dev_priv->engine[j]->name);
  577. first = 0;
  578. }
  579. scnprintf(buf + len, sizeof(buf), ")");
  580. print_error_buffers(m, buf,
  581. error->active_bo[i],
  582. error->active_bo_count[i]);
  583. }
  584. print_error_buffers(m, "Pinned (global)",
  585. error->pinned_bo,
  586. error->pinned_bo_count);
  587. for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
  588. const struct drm_i915_error_engine *ee = &error->engine[i];
  589. obj = ee->batchbuffer;
  590. if (obj) {
  591. err_puts(m, dev_priv->engine[i]->name);
  592. if (ee->context.pid)
  593. err_printf(m, " (submitted by %s [%d], ctx %d [%d], score %d)",
  594. ee->context.comm,
  595. ee->context.pid,
  596. ee->context.handle,
  597. ee->context.hw_id,
  598. ee->context.ban_score);
  599. err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
  600. upper_32_bits(obj->gtt_offset),
  601. lower_32_bits(obj->gtt_offset));
  602. print_error_obj(m, dev_priv->engine[i], NULL, obj);
  603. }
  604. for (j = 0; j < ee->user_bo_count; j++)
  605. print_error_obj(m, dev_priv->engine[i],
  606. "user", ee->user_bo[j]);
  607. if (ee->num_requests) {
  608. err_printf(m, "%s --- %d requests\n",
  609. dev_priv->engine[i]->name,
  610. ee->num_requests);
  611. for (j = 0; j < ee->num_requests; j++)
  612. error_print_request(m, " ", &ee->requests[j]);
  613. }
  614. if (IS_ERR(ee->waiters)) {
  615. err_printf(m, "%s --- ? waiters [unable to acquire spinlock]\n",
  616. dev_priv->engine[i]->name);
  617. } else if (ee->num_waiters) {
  618. err_printf(m, "%s --- %d waiters\n",
  619. dev_priv->engine[i]->name,
  620. ee->num_waiters);
  621. for (j = 0; j < ee->num_waiters; j++) {
  622. err_printf(m, " seqno 0x%08x for %s [%d]\n",
  623. ee->waiters[j].seqno,
  624. ee->waiters[j].comm,
  625. ee->waiters[j].pid);
  626. }
  627. }
  628. print_error_obj(m, dev_priv->engine[i],
  629. "ringbuffer", ee->ringbuffer);
  630. print_error_obj(m, dev_priv->engine[i],
  631. "HW Status", ee->hws_page);
  632. print_error_obj(m, dev_priv->engine[i],
  633. "HW context", ee->ctx);
  634. print_error_obj(m, dev_priv->engine[i],
  635. "WA context", ee->wa_ctx);
  636. print_error_obj(m, dev_priv->engine[i],
  637. "WA batchbuffer", ee->wa_batchbuffer);
  638. print_error_obj(m, dev_priv->engine[i],
  639. "NULL context", ee->default_state);
  640. }
  641. if (error->overlay)
  642. intel_overlay_print_error_state(m, error->overlay);
  643. if (error->display)
  644. intel_display_print_error_state(m, error->display);
  645. err_print_capabilities(m, &error->device_info);
  646. err_print_params(m, &error->params);
  647. err_print_uc(m, &error->uc);
  648. if (m->bytes == 0 && m->err)
  649. return m->err;
  650. return 0;
  651. }
  652. int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf,
  653. struct drm_i915_private *i915,
  654. size_t count, loff_t pos)
  655. {
  656. memset(ebuf, 0, sizeof(*ebuf));
  657. ebuf->i915 = i915;
  658. /* We need to have enough room to store any i915_error_state printf
  659. * so that we can move it to start position.
  660. */
  661. ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
  662. ebuf->buf = kmalloc(ebuf->size,
  663. GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
  664. if (ebuf->buf == NULL) {
  665. ebuf->size = PAGE_SIZE;
  666. ebuf->buf = kmalloc(ebuf->size, GFP_KERNEL);
  667. }
  668. if (ebuf->buf == NULL) {
  669. ebuf->size = 128;
  670. ebuf->buf = kmalloc(ebuf->size, GFP_KERNEL);
  671. }
  672. if (ebuf->buf == NULL)
  673. return -ENOMEM;
  674. ebuf->start = pos;
  675. return 0;
  676. }
  677. static void i915_error_object_free(struct drm_i915_error_object *obj)
  678. {
  679. int page;
  680. if (obj == NULL)
  681. return;
  682. for (page = 0; page < obj->page_count; page++)
  683. free_page((unsigned long)obj->pages[page]);
  684. kfree(obj);
  685. }
  686. static __always_inline void free_param(const char *type, void *x)
  687. {
  688. if (!__builtin_strcmp(type, "char *"))
  689. kfree(*(void **)x);
  690. }
  691. static void cleanup_params(struct i915_gpu_state *error)
  692. {
  693. #define FREE(T, x, ...) free_param(#T, &error->params.x);
  694. I915_PARAMS_FOR_EACH(FREE);
  695. #undef FREE
  696. }
  697. static void cleanup_uc_state(struct i915_gpu_state *error)
  698. {
  699. struct i915_error_uc *error_uc = &error->uc;
  700. kfree(error_uc->guc_fw.path);
  701. kfree(error_uc->huc_fw.path);
  702. i915_error_object_free(error_uc->guc_log);
  703. }
  704. void __i915_gpu_state_free(struct kref *error_ref)
  705. {
  706. struct i915_gpu_state *error =
  707. container_of(error_ref, typeof(*error), ref);
  708. long i, j;
  709. for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
  710. struct drm_i915_error_engine *ee = &error->engine[i];
  711. for (j = 0; j < ee->user_bo_count; j++)
  712. i915_error_object_free(ee->user_bo[j]);
  713. kfree(ee->user_bo);
  714. i915_error_object_free(ee->batchbuffer);
  715. i915_error_object_free(ee->wa_batchbuffer);
  716. i915_error_object_free(ee->ringbuffer);
  717. i915_error_object_free(ee->hws_page);
  718. i915_error_object_free(ee->ctx);
  719. i915_error_object_free(ee->wa_ctx);
  720. kfree(ee->requests);
  721. if (!IS_ERR_OR_NULL(ee->waiters))
  722. kfree(ee->waiters);
  723. }
  724. for (i = 0; i < ARRAY_SIZE(error->active_bo); i++)
  725. kfree(error->active_bo[i]);
  726. kfree(error->pinned_bo);
  727. kfree(error->overlay);
  728. kfree(error->display);
  729. cleanup_params(error);
  730. cleanup_uc_state(error);
  731. kfree(error);
  732. }
  733. static struct drm_i915_error_object *
  734. i915_error_object_create(struct drm_i915_private *i915,
  735. struct i915_vma *vma)
  736. {
  737. struct i915_ggtt *ggtt = &i915->ggtt;
  738. const u64 slot = ggtt->error_capture.start;
  739. struct drm_i915_error_object *dst;
  740. struct compress compress;
  741. unsigned long num_pages;
  742. struct sgt_iter iter;
  743. dma_addr_t dma;
  744. if (!vma)
  745. return NULL;
  746. num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT;
  747. num_pages = DIV_ROUND_UP(10 * num_pages, 8); /* worstcase zlib growth */
  748. dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *),
  749. GFP_ATOMIC | __GFP_NOWARN);
  750. if (!dst)
  751. return NULL;
  752. dst->gtt_offset = vma->node.start;
  753. dst->gtt_size = vma->node.size;
  754. dst->page_count = 0;
  755. dst->unused = 0;
  756. if (!compress_init(&compress)) {
  757. kfree(dst);
  758. return NULL;
  759. }
  760. for_each_sgt_dma(dma, iter, vma->pages) {
  761. void __iomem *s;
  762. int ret;
  763. ggtt->base.insert_page(&ggtt->base, dma, slot,
  764. I915_CACHE_NONE, 0);
  765. s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
  766. ret = compress_page(&compress, (void __force *)s, dst);
  767. io_mapping_unmap_atomic(s);
  768. if (ret)
  769. goto unwind;
  770. }
  771. goto out;
  772. unwind:
  773. while (dst->page_count--)
  774. free_page((unsigned long)dst->pages[dst->page_count]);
  775. kfree(dst);
  776. dst = NULL;
  777. out:
  778. compress_fini(&compress, dst);
  779. ggtt->base.clear_range(&ggtt->base, slot, PAGE_SIZE);
  780. return dst;
  781. }
  782. /* The error capture is special as tries to run underneath the normal
  783. * locking rules - so we use the raw version of the i915_gem_active lookup.
  784. */
  785. static inline uint32_t
  786. __active_get_seqno(struct i915_gem_active *active)
  787. {
  788. struct drm_i915_gem_request *request;
  789. request = __i915_gem_active_peek(active);
  790. return request ? request->global_seqno : 0;
  791. }
  792. static inline int
  793. __active_get_engine_id(struct i915_gem_active *active)
  794. {
  795. struct drm_i915_gem_request *request;
  796. request = __i915_gem_active_peek(active);
  797. return request ? request->engine->id : -1;
  798. }
  799. static void capture_bo(struct drm_i915_error_buffer *err,
  800. struct i915_vma *vma)
  801. {
  802. struct drm_i915_gem_object *obj = vma->obj;
  803. int i;
  804. err->size = obj->base.size;
  805. err->name = obj->base.name;
  806. for (i = 0; i < I915_NUM_ENGINES; i++)
  807. err->rseqno[i] = __active_get_seqno(&vma->last_read[i]);
  808. err->wseqno = __active_get_seqno(&obj->frontbuffer_write);
  809. err->engine = __active_get_engine_id(&obj->frontbuffer_write);
  810. err->gtt_offset = vma->node.start;
  811. err->read_domains = obj->base.read_domains;
  812. err->write_domain = obj->base.write_domain;
  813. err->fence_reg = vma->fence ? vma->fence->id : -1;
  814. err->tiling = i915_gem_object_get_tiling(obj);
  815. err->dirty = obj->mm.dirty;
  816. err->purgeable = obj->mm.madv != I915_MADV_WILLNEED;
  817. err->userptr = obj->userptr.mm != NULL;
  818. err->cache_level = obj->cache_level;
  819. }
  820. static u32 capture_error_bo(struct drm_i915_error_buffer *err,
  821. int count, struct list_head *head,
  822. bool pinned_only)
  823. {
  824. struct i915_vma *vma;
  825. int i = 0;
  826. list_for_each_entry(vma, head, vm_link) {
  827. if (pinned_only && !i915_vma_is_pinned(vma))
  828. continue;
  829. capture_bo(err++, vma);
  830. if (++i == count)
  831. break;
  832. }
  833. return i;
  834. }
  835. /* Generate a semi-unique error code. The code is not meant to have meaning, The
  836. * code's only purpose is to try to prevent false duplicated bug reports by
  837. * grossly estimating a GPU error state.
  838. *
  839. * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
  840. * the hang if we could strip the GTT offset information from it.
  841. *
  842. * It's only a small step better than a random number in its current form.
  843. */
  844. static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
  845. struct i915_gpu_state *error,
  846. int *engine_id)
  847. {
  848. uint32_t error_code = 0;
  849. int i;
  850. /* IPEHR would be an ideal way to detect errors, as it's the gross
  851. * measure of "the command that hung." However, has some very common
  852. * synchronization commands which almost always appear in the case
  853. * strictly a client bug. Use instdone to differentiate those some.
  854. */
  855. for (i = 0; i < I915_NUM_ENGINES; i++) {
  856. if (error->engine[i].hangcheck_stalled) {
  857. if (engine_id)
  858. *engine_id = i;
  859. return error->engine[i].ipehr ^
  860. error->engine[i].instdone.instdone;
  861. }
  862. }
  863. return error_code;
  864. }
  865. static void i915_gem_record_fences(struct drm_i915_private *dev_priv,
  866. struct i915_gpu_state *error)
  867. {
  868. int i;
  869. if (INTEL_GEN(dev_priv) >= 6) {
  870. for (i = 0; i < dev_priv->num_fence_regs; i++)
  871. error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i));
  872. } else if (INTEL_GEN(dev_priv) >= 4) {
  873. for (i = 0; i < dev_priv->num_fence_regs; i++)
  874. error->fence[i] = I915_READ64(FENCE_REG_965_LO(i));
  875. } else {
  876. for (i = 0; i < dev_priv->num_fence_regs; i++)
  877. error->fence[i] = I915_READ(FENCE_REG(i));
  878. }
  879. error->nfence = i;
  880. }
  881. static inline u32
  882. gen8_engine_sync_index(struct intel_engine_cs *engine,
  883. struct intel_engine_cs *other)
  884. {
  885. int idx;
  886. /*
  887. * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2;
  888. * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs;
  889. * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs;
  890. * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs;
  891. * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
  892. */
  893. idx = (other - engine) - 1;
  894. if (idx < 0)
  895. idx += I915_NUM_ENGINES;
  896. return idx;
  897. }
  898. static void gen6_record_semaphore_state(struct intel_engine_cs *engine,
  899. struct drm_i915_error_engine *ee)
  900. {
  901. struct drm_i915_private *dev_priv = engine->i915;
  902. ee->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base));
  903. ee->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base));
  904. if (HAS_VEBOX(dev_priv))
  905. ee->semaphore_mboxes[2] =
  906. I915_READ(RING_SYNC_2(engine->mmio_base));
  907. }
  908. static void error_record_engine_waiters(struct intel_engine_cs *engine,
  909. struct drm_i915_error_engine *ee)
  910. {
  911. struct intel_breadcrumbs *b = &engine->breadcrumbs;
  912. struct drm_i915_error_waiter *waiter;
  913. struct rb_node *rb;
  914. int count;
  915. ee->num_waiters = 0;
  916. ee->waiters = NULL;
  917. if (RB_EMPTY_ROOT(&b->waiters))
  918. return;
  919. if (!spin_trylock_irq(&b->rb_lock)) {
  920. ee->waiters = ERR_PTR(-EDEADLK);
  921. return;
  922. }
  923. count = 0;
  924. for (rb = rb_first(&b->waiters); rb != NULL; rb = rb_next(rb))
  925. count++;
  926. spin_unlock_irq(&b->rb_lock);
  927. waiter = NULL;
  928. if (count)
  929. waiter = kmalloc_array(count,
  930. sizeof(struct drm_i915_error_waiter),
  931. GFP_ATOMIC);
  932. if (!waiter)
  933. return;
  934. if (!spin_trylock_irq(&b->rb_lock)) {
  935. kfree(waiter);
  936. ee->waiters = ERR_PTR(-EDEADLK);
  937. return;
  938. }
  939. ee->waiters = waiter;
  940. for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
  941. struct intel_wait *w = rb_entry(rb, typeof(*w), node);
  942. strcpy(waiter->comm, w->tsk->comm);
  943. waiter->pid = w->tsk->pid;
  944. waiter->seqno = w->seqno;
  945. waiter++;
  946. if (++ee->num_waiters == count)
  947. break;
  948. }
  949. spin_unlock_irq(&b->rb_lock);
  950. }
  951. static void error_record_engine_registers(struct i915_gpu_state *error,
  952. struct intel_engine_cs *engine,
  953. struct drm_i915_error_engine *ee)
  954. {
  955. struct drm_i915_private *dev_priv = engine->i915;
  956. if (INTEL_GEN(dev_priv) >= 6) {
  957. ee->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
  958. if (INTEL_GEN(dev_priv) >= 8) {
  959. ee->fault_reg = I915_READ(GEN8_RING_FAULT_REG);
  960. } else {
  961. gen6_record_semaphore_state(engine, ee);
  962. ee->fault_reg = I915_READ(RING_FAULT_REG(engine));
  963. }
  964. }
  965. if (INTEL_GEN(dev_priv) >= 4) {
  966. ee->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
  967. ee->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
  968. ee->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
  969. ee->instps = I915_READ(RING_INSTPS(engine->mmio_base));
  970. ee->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
  971. if (INTEL_GEN(dev_priv) >= 8) {
  972. ee->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
  973. ee->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
  974. }
  975. ee->bbstate = I915_READ(RING_BBSTATE(engine->mmio_base));
  976. } else {
  977. ee->faddr = I915_READ(DMA_FADD_I8XX);
  978. ee->ipeir = I915_READ(IPEIR);
  979. ee->ipehr = I915_READ(IPEHR);
  980. }
  981. intel_engine_get_instdone(engine, &ee->instdone);
  982. ee->waiting = intel_engine_has_waiter(engine);
  983. ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
  984. ee->acthd = intel_engine_get_active_head(engine);
  985. ee->seqno = intel_engine_get_seqno(engine);
  986. ee->last_seqno = intel_engine_last_submit(engine);
  987. ee->start = I915_READ_START(engine);
  988. ee->head = I915_READ_HEAD(engine);
  989. ee->tail = I915_READ_TAIL(engine);
  990. ee->ctl = I915_READ_CTL(engine);
  991. if (INTEL_GEN(dev_priv) > 2)
  992. ee->mode = I915_READ_MODE(engine);
  993. if (!HWS_NEEDS_PHYSICAL(dev_priv)) {
  994. i915_reg_t mmio;
  995. if (IS_GEN7(dev_priv)) {
  996. switch (engine->id) {
  997. default:
  998. case RCS:
  999. mmio = RENDER_HWS_PGA_GEN7;
  1000. break;
  1001. case BCS:
  1002. mmio = BLT_HWS_PGA_GEN7;
  1003. break;
  1004. case VCS:
  1005. mmio = BSD_HWS_PGA_GEN7;
  1006. break;
  1007. case VECS:
  1008. mmio = VEBOX_HWS_PGA_GEN7;
  1009. break;
  1010. }
  1011. } else if (IS_GEN6(engine->i915)) {
  1012. mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
  1013. } else {
  1014. /* XXX: gen8 returns to sanity */
  1015. mmio = RING_HWS_PGA(engine->mmio_base);
  1016. }
  1017. ee->hws = I915_READ(mmio);
  1018. }
  1019. ee->idle = intel_engine_is_idle(engine);
  1020. ee->hangcheck_timestamp = engine->hangcheck.action_timestamp;
  1021. ee->hangcheck_action = engine->hangcheck.action;
  1022. ee->hangcheck_stalled = engine->hangcheck.stalled;
  1023. ee->reset_count = i915_reset_engine_count(&dev_priv->gpu_error,
  1024. engine);
  1025. if (USES_PPGTT(dev_priv)) {
  1026. int i;
  1027. ee->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
  1028. if (IS_GEN6(dev_priv))
  1029. ee->vm_info.pp_dir_base =
  1030. I915_READ(RING_PP_DIR_BASE_READ(engine));
  1031. else if (IS_GEN7(dev_priv))
  1032. ee->vm_info.pp_dir_base =
  1033. I915_READ(RING_PP_DIR_BASE(engine));
  1034. else if (INTEL_GEN(dev_priv) >= 8)
  1035. for (i = 0; i < 4; i++) {
  1036. ee->vm_info.pdp[i] =
  1037. I915_READ(GEN8_RING_PDP_UDW(engine, i));
  1038. ee->vm_info.pdp[i] <<= 32;
  1039. ee->vm_info.pdp[i] |=
  1040. I915_READ(GEN8_RING_PDP_LDW(engine, i));
  1041. }
  1042. }
  1043. }
  1044. static void record_request(struct drm_i915_gem_request *request,
  1045. struct drm_i915_error_request *erq)
  1046. {
  1047. erq->context = request->ctx->hw_id;
  1048. erq->priority = request->priotree.priority;
  1049. erq->ban_score = atomic_read(&request->ctx->ban_score);
  1050. erq->seqno = request->global_seqno;
  1051. erq->jiffies = request->emitted_jiffies;
  1052. erq->head = request->head;
  1053. erq->tail = request->tail;
  1054. rcu_read_lock();
  1055. erq->pid = request->ctx->pid ? pid_nr(request->ctx->pid) : 0;
  1056. rcu_read_unlock();
  1057. }
  1058. static void engine_record_requests(struct intel_engine_cs *engine,
  1059. struct drm_i915_gem_request *first,
  1060. struct drm_i915_error_engine *ee)
  1061. {
  1062. struct drm_i915_gem_request *request;
  1063. int count;
  1064. count = 0;
  1065. request = first;
  1066. list_for_each_entry_from(request, &engine->timeline->requests, link)
  1067. count++;
  1068. if (!count)
  1069. return;
  1070. ee->requests = kcalloc(count, sizeof(*ee->requests), GFP_ATOMIC);
  1071. if (!ee->requests)
  1072. return;
  1073. ee->num_requests = count;
  1074. count = 0;
  1075. request = first;
  1076. list_for_each_entry_from(request, &engine->timeline->requests, link) {
  1077. if (count >= ee->num_requests) {
  1078. /*
  1079. * If the ring request list was changed in
  1080. * between the point where the error request
  1081. * list was created and dimensioned and this
  1082. * point then just exit early to avoid crashes.
  1083. *
  1084. * We don't need to communicate that the
  1085. * request list changed state during error
  1086. * state capture and that the error state is
  1087. * slightly incorrect as a consequence since we
  1088. * are typically only interested in the request
  1089. * list state at the point of error state
  1090. * capture, not in any changes happening during
  1091. * the capture.
  1092. */
  1093. break;
  1094. }
  1095. record_request(request, &ee->requests[count++]);
  1096. }
  1097. ee->num_requests = count;
  1098. }
  1099. static void error_record_engine_execlists(struct intel_engine_cs *engine,
  1100. struct drm_i915_error_engine *ee)
  1101. {
  1102. const struct intel_engine_execlists * const execlists = &engine->execlists;
  1103. unsigned int n;
  1104. for (n = 0; n < execlists_num_ports(execlists); n++) {
  1105. struct drm_i915_gem_request *rq = port_request(&execlists->port[n]);
  1106. if (!rq)
  1107. break;
  1108. record_request(rq, &ee->execlist[n]);
  1109. }
  1110. ee->num_ports = n;
  1111. }
  1112. static void record_context(struct drm_i915_error_context *e,
  1113. struct i915_gem_context *ctx)
  1114. {
  1115. if (ctx->pid) {
  1116. struct task_struct *task;
  1117. rcu_read_lock();
  1118. task = pid_task(ctx->pid, PIDTYPE_PID);
  1119. if (task) {
  1120. strcpy(e->comm, task->comm);
  1121. e->pid = task->pid;
  1122. }
  1123. rcu_read_unlock();
  1124. }
  1125. e->handle = ctx->user_handle;
  1126. e->hw_id = ctx->hw_id;
  1127. e->priority = ctx->priority;
  1128. e->ban_score = atomic_read(&ctx->ban_score);
  1129. e->guilty = atomic_read(&ctx->guilty_count);
  1130. e->active = atomic_read(&ctx->active_count);
  1131. }
  1132. static void request_record_user_bo(struct drm_i915_gem_request *request,
  1133. struct drm_i915_error_engine *ee)
  1134. {
  1135. struct i915_gem_capture_list *c;
  1136. struct drm_i915_error_object **bo;
  1137. long count;
  1138. count = 0;
  1139. for (c = request->capture_list; c; c = c->next)
  1140. count++;
  1141. bo = NULL;
  1142. if (count)
  1143. bo = kcalloc(count, sizeof(*bo), GFP_ATOMIC);
  1144. if (!bo)
  1145. return;
  1146. count = 0;
  1147. for (c = request->capture_list; c; c = c->next) {
  1148. bo[count] = i915_error_object_create(request->i915, c->vma);
  1149. if (!bo[count])
  1150. break;
  1151. count++;
  1152. }
  1153. ee->user_bo = bo;
  1154. ee->user_bo_count = count;
  1155. }
  1156. static struct drm_i915_error_object *
  1157. capture_object(struct drm_i915_private *dev_priv,
  1158. struct drm_i915_gem_object *obj)
  1159. {
  1160. if (obj && i915_gem_object_has_pages(obj)) {
  1161. struct i915_vma fake = {
  1162. .node = { .start = U64_MAX, .size = obj->base.size },
  1163. .size = obj->base.size,
  1164. .pages = obj->mm.pages,
  1165. .obj = obj,
  1166. };
  1167. return i915_error_object_create(dev_priv, &fake);
  1168. } else {
  1169. return NULL;
  1170. }
  1171. }
  1172. static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
  1173. struct i915_gpu_state *error)
  1174. {
  1175. struct i915_ggtt *ggtt = &dev_priv->ggtt;
  1176. int i;
  1177. for (i = 0; i < I915_NUM_ENGINES; i++) {
  1178. struct intel_engine_cs *engine = dev_priv->engine[i];
  1179. struct drm_i915_error_engine *ee = &error->engine[i];
  1180. struct drm_i915_gem_request *request;
  1181. ee->engine_id = -1;
  1182. if (!engine)
  1183. continue;
  1184. ee->engine_id = i;
  1185. error_record_engine_registers(error, engine, ee);
  1186. error_record_engine_waiters(engine, ee);
  1187. error_record_engine_execlists(engine, ee);
  1188. request = i915_gem_find_active_request(engine);
  1189. if (request) {
  1190. struct intel_ring *ring;
  1191. ee->vm = request->ctx->ppgtt ?
  1192. &request->ctx->ppgtt->base : &ggtt->base;
  1193. record_context(&ee->context, request->ctx);
  1194. /* We need to copy these to an anonymous buffer
  1195. * as the simplest method to avoid being overwritten
  1196. * by userspace.
  1197. */
  1198. ee->batchbuffer =
  1199. i915_error_object_create(dev_priv,
  1200. request->batch);
  1201. if (HAS_BROKEN_CS_TLB(dev_priv))
  1202. ee->wa_batchbuffer =
  1203. i915_error_object_create(dev_priv,
  1204. engine->scratch);
  1205. request_record_user_bo(request, ee);
  1206. ee->ctx =
  1207. i915_error_object_create(dev_priv,
  1208. request->ctx->engine[i].state);
  1209. error->simulated |=
  1210. i915_gem_context_no_error_capture(request->ctx);
  1211. ee->rq_head = request->head;
  1212. ee->rq_post = request->postfix;
  1213. ee->rq_tail = request->tail;
  1214. ring = request->ring;
  1215. ee->cpu_ring_head = ring->head;
  1216. ee->cpu_ring_tail = ring->tail;
  1217. ee->ringbuffer =
  1218. i915_error_object_create(dev_priv, ring->vma);
  1219. engine_record_requests(engine, request, ee);
  1220. }
  1221. ee->hws_page =
  1222. i915_error_object_create(dev_priv,
  1223. engine->status_page.vma);
  1224. ee->wa_ctx =
  1225. i915_error_object_create(dev_priv, engine->wa_ctx.vma);
  1226. ee->default_state =
  1227. capture_object(dev_priv, engine->default_state);
  1228. }
  1229. }
  1230. static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
  1231. struct i915_gpu_state *error,
  1232. struct i915_address_space *vm,
  1233. int idx)
  1234. {
  1235. struct drm_i915_error_buffer *active_bo;
  1236. struct i915_vma *vma;
  1237. int count;
  1238. count = 0;
  1239. list_for_each_entry(vma, &vm->active_list, vm_link)
  1240. count++;
  1241. active_bo = NULL;
  1242. if (count)
  1243. active_bo = kcalloc(count, sizeof(*active_bo), GFP_ATOMIC);
  1244. if (active_bo)
  1245. count = capture_error_bo(active_bo, count, &vm->active_list, false);
  1246. else
  1247. count = 0;
  1248. error->active_vm[idx] = vm;
  1249. error->active_bo[idx] = active_bo;
  1250. error->active_bo_count[idx] = count;
  1251. }
  1252. static void i915_capture_active_buffers(struct drm_i915_private *dev_priv,
  1253. struct i915_gpu_state *error)
  1254. {
  1255. int cnt = 0, i, j;
  1256. BUILD_BUG_ON(ARRAY_SIZE(error->engine) > ARRAY_SIZE(error->active_bo));
  1257. BUILD_BUG_ON(ARRAY_SIZE(error->active_bo) != ARRAY_SIZE(error->active_vm));
  1258. BUILD_BUG_ON(ARRAY_SIZE(error->active_bo) != ARRAY_SIZE(error->active_bo_count));
  1259. /* Scan each engine looking for unique active contexts/vm */
  1260. for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
  1261. struct drm_i915_error_engine *ee = &error->engine[i];
  1262. bool found;
  1263. if (!ee->vm)
  1264. continue;
  1265. found = false;
  1266. for (j = 0; j < i && !found; j++)
  1267. found = error->engine[j].vm == ee->vm;
  1268. if (!found)
  1269. i915_gem_capture_vm(dev_priv, error, ee->vm, cnt++);
  1270. }
  1271. }
  1272. static void i915_capture_pinned_buffers(struct drm_i915_private *dev_priv,
  1273. struct i915_gpu_state *error)
  1274. {
  1275. struct i915_address_space *vm = &dev_priv->ggtt.base;
  1276. struct drm_i915_error_buffer *bo;
  1277. struct i915_vma *vma;
  1278. int count_inactive, count_active;
  1279. count_inactive = 0;
  1280. list_for_each_entry(vma, &vm->active_list, vm_link)
  1281. count_inactive++;
  1282. count_active = 0;
  1283. list_for_each_entry(vma, &vm->inactive_list, vm_link)
  1284. count_active++;
  1285. bo = NULL;
  1286. if (count_inactive + count_active)
  1287. bo = kcalloc(count_inactive + count_active,
  1288. sizeof(*bo), GFP_ATOMIC);
  1289. if (!bo)
  1290. return;
  1291. count_inactive = capture_error_bo(bo, count_inactive,
  1292. &vm->active_list, true);
  1293. count_active = capture_error_bo(bo + count_inactive, count_active,
  1294. &vm->inactive_list, true);
  1295. error->pinned_bo_count = count_inactive + count_active;
  1296. error->pinned_bo = bo;
  1297. }
  1298. static void capture_uc_state(struct i915_gpu_state *error)
  1299. {
  1300. struct drm_i915_private *i915 = error->i915;
  1301. struct i915_error_uc *error_uc = &error->uc;
  1302. /* Capturing uC state won't be useful if there is no GuC */
  1303. if (!error->device_info.has_guc)
  1304. return;
  1305. error_uc->guc_fw = i915->guc.fw;
  1306. error_uc->huc_fw = i915->huc.fw;
  1307. /* Non-default firmware paths will be specified by the modparam.
  1308. * As modparams are generally accesible from the userspace make
  1309. * explicit copies of the firmware paths.
  1310. */
  1311. error_uc->guc_fw.path = kstrdup(i915->guc.fw.path, GFP_ATOMIC);
  1312. error_uc->huc_fw.path = kstrdup(i915->huc.fw.path, GFP_ATOMIC);
  1313. error_uc->guc_log = i915_error_object_create(i915, i915->guc.log.vma);
  1314. }
  1315. /* Capture all registers which don't fit into another category. */
  1316. static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
  1317. struct i915_gpu_state *error)
  1318. {
  1319. int i;
  1320. /* General organization
  1321. * 1. Registers specific to a single generation
  1322. * 2. Registers which belong to multiple generations
  1323. * 3. Feature specific registers.
  1324. * 4. Everything else
  1325. * Please try to follow the order.
  1326. */
  1327. /* 1: Registers specific to a single generation */
  1328. if (IS_VALLEYVIEW(dev_priv)) {
  1329. error->gtier[0] = I915_READ(GTIER);
  1330. error->ier = I915_READ(VLV_IER);
  1331. error->forcewake = I915_READ_FW(FORCEWAKE_VLV);
  1332. }
  1333. if (IS_GEN7(dev_priv))
  1334. error->err_int = I915_READ(GEN7_ERR_INT);
  1335. if (INTEL_GEN(dev_priv) >= 8) {
  1336. error->fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
  1337. error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
  1338. }
  1339. if (IS_GEN6(dev_priv)) {
  1340. error->forcewake = I915_READ_FW(FORCEWAKE);
  1341. error->gab_ctl = I915_READ(GAB_CTL);
  1342. error->gfx_mode = I915_READ(GFX_MODE);
  1343. }
  1344. /* 2: Registers which belong to multiple generations */
  1345. if (INTEL_GEN(dev_priv) >= 7)
  1346. error->forcewake = I915_READ_FW(FORCEWAKE_MT);
  1347. if (INTEL_GEN(dev_priv) >= 6) {
  1348. error->derrmr = I915_READ(DERRMR);
  1349. error->error = I915_READ(ERROR_GEN6);
  1350. error->done_reg = I915_READ(DONE_REG);
  1351. }
  1352. if (INTEL_GEN(dev_priv) >= 5)
  1353. error->ccid = I915_READ(CCID);
  1354. /* 3: Feature specific registers */
  1355. if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
  1356. error->gam_ecochk = I915_READ(GAM_ECOCHK);
  1357. error->gac_eco = I915_READ(GAC_ECO_BITS);
  1358. }
  1359. /* 4: Everything else */
  1360. if (INTEL_GEN(dev_priv) >= 8) {
  1361. error->ier = I915_READ(GEN8_DE_MISC_IER);
  1362. for (i = 0; i < 4; i++)
  1363. error->gtier[i] = I915_READ(GEN8_GT_IER(i));
  1364. error->ngtier = 4;
  1365. } else if (HAS_PCH_SPLIT(dev_priv)) {
  1366. error->ier = I915_READ(DEIER);
  1367. error->gtier[0] = I915_READ(GTIER);
  1368. error->ngtier = 1;
  1369. } else if (IS_GEN2(dev_priv)) {
  1370. error->ier = I915_READ16(IER);
  1371. } else if (!IS_VALLEYVIEW(dev_priv)) {
  1372. error->ier = I915_READ(IER);
  1373. }
  1374. error->eir = I915_READ(EIR);
  1375. error->pgtbl_er = I915_READ(PGTBL_ER);
  1376. }
  1377. static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
  1378. struct i915_gpu_state *error,
  1379. u32 engine_mask,
  1380. const char *error_msg)
  1381. {
  1382. u32 ecode;
  1383. int engine_id = -1, len;
  1384. ecode = i915_error_generate_code(dev_priv, error, &engine_id);
  1385. len = scnprintf(error->error_msg, sizeof(error->error_msg),
  1386. "GPU HANG: ecode %d:%d:0x%08x",
  1387. INTEL_GEN(dev_priv), engine_id, ecode);
  1388. if (engine_id != -1 && error->engine[engine_id].context.pid)
  1389. len += scnprintf(error->error_msg + len,
  1390. sizeof(error->error_msg) - len,
  1391. ", in %s [%d]",
  1392. error->engine[engine_id].context.comm,
  1393. error->engine[engine_id].context.pid);
  1394. scnprintf(error->error_msg + len, sizeof(error->error_msg) - len,
  1395. ", reason: %s, action: %s",
  1396. error_msg,
  1397. engine_mask ? "reset" : "continue");
  1398. }
  1399. static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
  1400. struct i915_gpu_state *error)
  1401. {
  1402. error->awake = dev_priv->gt.awake;
  1403. error->wakelock = atomic_read(&dev_priv->runtime_pm.wakeref_count);
  1404. error->suspended = dev_priv->runtime_pm.suspended;
  1405. error->iommu = -1;
  1406. #ifdef CONFIG_INTEL_IOMMU
  1407. error->iommu = intel_iommu_gfx_mapped;
  1408. #endif
  1409. error->reset_count = i915_reset_count(&dev_priv->gpu_error);
  1410. error->suspend_count = dev_priv->suspend_count;
  1411. memcpy(&error->device_info,
  1412. INTEL_INFO(dev_priv),
  1413. sizeof(error->device_info));
  1414. }
  1415. static __always_inline void dup_param(const char *type, void *x)
  1416. {
  1417. if (!__builtin_strcmp(type, "char *"))
  1418. *(void **)x = kstrdup(*(void **)x, GFP_ATOMIC);
  1419. }
  1420. static void capture_params(struct i915_gpu_state *error)
  1421. {
  1422. error->params = i915_modparams;
  1423. #define DUP(T, x, ...) dup_param(#T, &error->params.x);
  1424. I915_PARAMS_FOR_EACH(DUP);
  1425. #undef DUP
  1426. }
  1427. static int capture(void *data)
  1428. {
  1429. struct i915_gpu_state *error = data;
  1430. do_gettimeofday(&error->time);
  1431. error->boottime = ktime_to_timeval(ktime_get_boottime());
  1432. error->uptime =
  1433. ktime_to_timeval(ktime_sub(ktime_get(),
  1434. error->i915->gt.last_init_time));
  1435. capture_params(error);
  1436. capture_uc_state(error);
  1437. i915_capture_gen_state(error->i915, error);
  1438. i915_capture_reg_state(error->i915, error);
  1439. i915_gem_record_fences(error->i915, error);
  1440. i915_gem_record_rings(error->i915, error);
  1441. i915_capture_active_buffers(error->i915, error);
  1442. i915_capture_pinned_buffers(error->i915, error);
  1443. error->overlay = intel_overlay_capture_error_state(error->i915);
  1444. error->display = intel_display_capture_error_state(error->i915);
  1445. return 0;
  1446. }
  1447. #define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
  1448. struct i915_gpu_state *
  1449. i915_capture_gpu_state(struct drm_i915_private *i915)
  1450. {
  1451. struct i915_gpu_state *error;
  1452. error = kzalloc(sizeof(*error), GFP_ATOMIC);
  1453. if (!error)
  1454. return NULL;
  1455. kref_init(&error->ref);
  1456. error->i915 = i915;
  1457. stop_machine(capture, error, NULL);
  1458. return error;
  1459. }
  1460. /**
  1461. * i915_capture_error_state - capture an error record for later analysis
  1462. * @dev: drm device
  1463. *
  1464. * Should be called when an error is detected (either a hang or an error
  1465. * interrupt) to capture error state from the time of the error. Fills
  1466. * out a structure which becomes available in debugfs for user level tools
  1467. * to pick up.
  1468. */
  1469. void i915_capture_error_state(struct drm_i915_private *dev_priv,
  1470. u32 engine_mask,
  1471. const char *error_msg)
  1472. {
  1473. static bool warned;
  1474. struct i915_gpu_state *error;
  1475. unsigned long flags;
  1476. if (!i915_modparams.error_capture)
  1477. return;
  1478. if (READ_ONCE(dev_priv->gpu_error.first_error))
  1479. return;
  1480. error = i915_capture_gpu_state(dev_priv);
  1481. if (!error) {
  1482. DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
  1483. return;
  1484. }
  1485. i915_error_capture_msg(dev_priv, error, engine_mask, error_msg);
  1486. DRM_INFO("%s\n", error->error_msg);
  1487. if (!error->simulated) {
  1488. spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
  1489. if (!dev_priv->gpu_error.first_error) {
  1490. dev_priv->gpu_error.first_error = error;
  1491. error = NULL;
  1492. }
  1493. spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
  1494. }
  1495. if (error) {
  1496. __i915_gpu_state_free(&error->ref);
  1497. return;
  1498. }
  1499. if (!warned &&
  1500. ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
  1501. DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
  1502. DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
  1503. DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
  1504. DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
  1505. DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
  1506. dev_priv->drm.primary->index);
  1507. warned = true;
  1508. }
  1509. }
  1510. struct i915_gpu_state *
  1511. i915_first_error_state(struct drm_i915_private *i915)
  1512. {
  1513. struct i915_gpu_state *error;
  1514. spin_lock_irq(&i915->gpu_error.lock);
  1515. error = i915->gpu_error.first_error;
  1516. if (error)
  1517. i915_gpu_state_get(error);
  1518. spin_unlock_irq(&i915->gpu_error.lock);
  1519. return error;
  1520. }
  1521. void i915_reset_error_state(struct drm_i915_private *i915)
  1522. {
  1523. struct i915_gpu_state *error;
  1524. spin_lock_irq(&i915->gpu_error.lock);
  1525. error = i915->gpu_error.first_error;
  1526. i915->gpu_error.first_error = NULL;
  1527. spin_unlock_irq(&i915->gpu_error.lock);
  1528. i915_gpu_state_put(error);
  1529. }