i915_gpu_error.c 47 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811
  1. /*
  2. * Copyright (c) 2008 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. * Keith Packard <keithp@keithp.com>
  26. * Mika Kuoppala <mika.kuoppala@intel.com>
  27. *
  28. */
  29. #include <generated/utsrelease.h>
  30. #include <linux/stop_machine.h>
  31. #include <linux/zlib.h>
  32. #include "i915_drv.h"
  33. static const char *engine_str(int engine)
  34. {
  35. switch (engine) {
  36. case RCS: return "render";
  37. case VCS: return "bsd";
  38. case BCS: return "blt";
  39. case VECS: return "vebox";
  40. case VCS2: return "bsd2";
  41. default: return "";
  42. }
  43. }
  44. static const char *tiling_flag(int tiling)
  45. {
  46. switch (tiling) {
  47. default:
  48. case I915_TILING_NONE: return "";
  49. case I915_TILING_X: return " X";
  50. case I915_TILING_Y: return " Y";
  51. }
  52. }
  53. static const char *dirty_flag(int dirty)
  54. {
  55. return dirty ? " dirty" : "";
  56. }
  57. static const char *purgeable_flag(int purgeable)
  58. {
  59. return purgeable ? " purgeable" : "";
  60. }
  61. static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
  62. {
  63. if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
  64. e->err = -ENOSPC;
  65. return false;
  66. }
  67. if (e->bytes == e->size - 1 || e->err)
  68. return false;
  69. return true;
  70. }
  71. static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
  72. unsigned len)
  73. {
  74. if (e->pos + len <= e->start) {
  75. e->pos += len;
  76. return false;
  77. }
  78. /* First vsnprintf needs to fit in its entirety for memmove */
  79. if (len >= e->size) {
  80. e->err = -EIO;
  81. return false;
  82. }
  83. return true;
  84. }
  85. static void __i915_error_advance(struct drm_i915_error_state_buf *e,
  86. unsigned len)
  87. {
  88. /* If this is first printf in this window, adjust it so that
  89. * start position matches start of the buffer
  90. */
  91. if (e->pos < e->start) {
  92. const size_t off = e->start - e->pos;
  93. /* Should not happen but be paranoid */
  94. if (off > len || e->bytes) {
  95. e->err = -EIO;
  96. return;
  97. }
  98. memmove(e->buf, e->buf + off, len - off);
  99. e->bytes = len - off;
  100. e->pos = e->start;
  101. return;
  102. }
  103. e->bytes += len;
  104. e->pos += len;
  105. }
  106. __printf(2, 0)
  107. static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
  108. const char *f, va_list args)
  109. {
  110. unsigned len;
  111. if (!__i915_error_ok(e))
  112. return;
  113. /* Seek the first printf which is hits start position */
  114. if (e->pos < e->start) {
  115. va_list tmp;
  116. va_copy(tmp, args);
  117. len = vsnprintf(NULL, 0, f, tmp);
  118. va_end(tmp);
  119. if (!__i915_error_seek(e, len))
  120. return;
  121. }
  122. len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
  123. if (len >= e->size - e->bytes)
  124. len = e->size - e->bytes - 1;
  125. __i915_error_advance(e, len);
  126. }
  127. static void i915_error_puts(struct drm_i915_error_state_buf *e,
  128. const char *str)
  129. {
  130. unsigned len;
  131. if (!__i915_error_ok(e))
  132. return;
  133. len = strlen(str);
  134. /* Seek the first printf which is hits start position */
  135. if (e->pos < e->start) {
  136. if (!__i915_error_seek(e, len))
  137. return;
  138. }
  139. if (len >= e->size - e->bytes)
  140. len = e->size - e->bytes - 1;
  141. memcpy(e->buf + e->bytes, str, len);
  142. __i915_error_advance(e, len);
  143. }
  144. #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
  145. #define err_puts(e, s) i915_error_puts(e, s)
  146. #ifdef CONFIG_DRM_I915_COMPRESS_ERROR
  147. struct compress {
  148. struct z_stream_s zstream;
  149. void *tmp;
  150. };
  151. static bool compress_init(struct compress *c)
  152. {
  153. struct z_stream_s *zstream = memset(&c->zstream, 0, sizeof(c->zstream));
  154. zstream->workspace =
  155. kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
  156. GFP_ATOMIC | __GFP_NOWARN);
  157. if (!zstream->workspace)
  158. return false;
  159. if (zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) != Z_OK) {
  160. kfree(zstream->workspace);
  161. return false;
  162. }
  163. c->tmp = NULL;
  164. if (i915_has_memcpy_from_wc())
  165. c->tmp = (void *)__get_free_page(GFP_ATOMIC | __GFP_NOWARN);
  166. return true;
  167. }
  168. static int compress_page(struct compress *c,
  169. void *src,
  170. struct drm_i915_error_object *dst)
  171. {
  172. struct z_stream_s *zstream = &c->zstream;
  173. zstream->next_in = src;
  174. if (c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
  175. zstream->next_in = c->tmp;
  176. zstream->avail_in = PAGE_SIZE;
  177. do {
  178. if (zstream->avail_out == 0) {
  179. unsigned long page;
  180. page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
  181. if (!page)
  182. return -ENOMEM;
  183. dst->pages[dst->page_count++] = (void *)page;
  184. zstream->next_out = (void *)page;
  185. zstream->avail_out = PAGE_SIZE;
  186. }
  187. if (zlib_deflate(zstream, Z_SYNC_FLUSH) != Z_OK)
  188. return -EIO;
  189. } while (zstream->avail_in);
  190. /* Fallback to uncompressed if we increase size? */
  191. if (0 && zstream->total_out > zstream->total_in)
  192. return -E2BIG;
  193. return 0;
  194. }
  195. static void compress_fini(struct compress *c,
  196. struct drm_i915_error_object *dst)
  197. {
  198. struct z_stream_s *zstream = &c->zstream;
  199. if (dst) {
  200. zlib_deflate(zstream, Z_FINISH);
  201. dst->unused = zstream->avail_out;
  202. }
  203. zlib_deflateEnd(zstream);
  204. kfree(zstream->workspace);
  205. if (c->tmp)
  206. free_page((unsigned long)c->tmp);
  207. }
  208. static void err_compression_marker(struct drm_i915_error_state_buf *m)
  209. {
  210. err_puts(m, ":");
  211. }
  212. #else
  213. struct compress {
  214. };
  215. static bool compress_init(struct compress *c)
  216. {
  217. return true;
  218. }
  219. static int compress_page(struct compress *c,
  220. void *src,
  221. struct drm_i915_error_object *dst)
  222. {
  223. unsigned long page;
  224. void *ptr;
  225. page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
  226. if (!page)
  227. return -ENOMEM;
  228. ptr = (void *)page;
  229. if (!i915_memcpy_from_wc(ptr, src, PAGE_SIZE))
  230. memcpy(ptr, src, PAGE_SIZE);
  231. dst->pages[dst->page_count++] = ptr;
  232. return 0;
  233. }
  234. static void compress_fini(struct compress *c,
  235. struct drm_i915_error_object *dst)
  236. {
  237. }
  238. static void err_compression_marker(struct drm_i915_error_state_buf *m)
  239. {
  240. err_puts(m, "~");
  241. }
  242. #endif
  243. static void print_error_buffers(struct drm_i915_error_state_buf *m,
  244. const char *name,
  245. struct drm_i915_error_buffer *err,
  246. int count)
  247. {
  248. int i;
  249. err_printf(m, "%s [%d]:\n", name, count);
  250. while (count--) {
  251. err_printf(m, " %08x_%08x %8u %02x %02x [ ",
  252. upper_32_bits(err->gtt_offset),
  253. lower_32_bits(err->gtt_offset),
  254. err->size,
  255. err->read_domains,
  256. err->write_domain);
  257. for (i = 0; i < I915_NUM_ENGINES; i++)
  258. err_printf(m, "%02x ", err->rseqno[i]);
  259. err_printf(m, "] %02x", err->wseqno);
  260. err_puts(m, tiling_flag(err->tiling));
  261. err_puts(m, dirty_flag(err->dirty));
  262. err_puts(m, purgeable_flag(err->purgeable));
  263. err_puts(m, err->userptr ? " userptr" : "");
  264. err_puts(m, err->engine != -1 ? " " : "");
  265. err_puts(m, engine_str(err->engine));
  266. err_puts(m, i915_cache_level_str(m->i915, err->cache_level));
  267. if (err->name)
  268. err_printf(m, " (name: %d)", err->name);
  269. if (err->fence_reg != I915_FENCE_REG_NONE)
  270. err_printf(m, " (fence: %d)", err->fence_reg);
  271. err_puts(m, "\n");
  272. err++;
  273. }
  274. }
  275. static void error_print_instdone(struct drm_i915_error_state_buf *m,
  276. const struct drm_i915_error_engine *ee)
  277. {
  278. int slice;
  279. int subslice;
  280. err_printf(m, " INSTDONE: 0x%08x\n",
  281. ee->instdone.instdone);
  282. if (ee->engine_id != RCS || INTEL_GEN(m->i915) <= 3)
  283. return;
  284. err_printf(m, " SC_INSTDONE: 0x%08x\n",
  285. ee->instdone.slice_common);
  286. if (INTEL_GEN(m->i915) <= 6)
  287. return;
  288. for_each_instdone_slice_subslice(m->i915, slice, subslice)
  289. err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
  290. slice, subslice,
  291. ee->instdone.sampler[slice][subslice]);
  292. for_each_instdone_slice_subslice(m->i915, slice, subslice)
  293. err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n",
  294. slice, subslice,
  295. ee->instdone.row[slice][subslice]);
  296. }
  297. static void error_print_request(struct drm_i915_error_state_buf *m,
  298. const char *prefix,
  299. const struct drm_i915_error_request *erq)
  300. {
  301. if (!erq->seqno)
  302. return;
  303. err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x, emitted %dms ago, head %08x, tail %08x\n",
  304. prefix, erq->pid, erq->ban_score,
  305. erq->context, erq->seqno,
  306. jiffies_to_msecs(jiffies - erq->jiffies),
  307. erq->head, erq->tail);
  308. }
  309. static void error_print_context(struct drm_i915_error_state_buf *m,
  310. const char *header,
  311. const struct drm_i915_error_context *ctx)
  312. {
  313. err_printf(m, "%s%s[%d] user_handle %d hw_id %d, ban score %d guilty %d active %d\n",
  314. header, ctx->comm, ctx->pid, ctx->handle, ctx->hw_id,
  315. ctx->ban_score, ctx->guilty, ctx->active);
  316. }
  317. static void error_print_engine(struct drm_i915_error_state_buf *m,
  318. const struct drm_i915_error_engine *ee)
  319. {
  320. err_printf(m, "%s command stream:\n", engine_str(ee->engine_id));
  321. err_printf(m, " START: 0x%08x\n", ee->start);
  322. err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head);
  323. err_printf(m, " TAIL: 0x%08x [0x%08x, 0x%08x]\n",
  324. ee->tail, ee->rq_post, ee->rq_tail);
  325. err_printf(m, " CTL: 0x%08x\n", ee->ctl);
  326. err_printf(m, " MODE: 0x%08x\n", ee->mode);
  327. err_printf(m, " HWS: 0x%08x\n", ee->hws);
  328. err_printf(m, " ACTHD: 0x%08x %08x\n",
  329. (u32)(ee->acthd>>32), (u32)ee->acthd);
  330. err_printf(m, " IPEIR: 0x%08x\n", ee->ipeir);
  331. err_printf(m, " IPEHR: 0x%08x\n", ee->ipehr);
  332. error_print_instdone(m, ee);
  333. if (ee->batchbuffer) {
  334. u64 start = ee->batchbuffer->gtt_offset;
  335. u64 end = start + ee->batchbuffer->gtt_size;
  336. err_printf(m, " batch: [0x%08x_%08x, 0x%08x_%08x]\n",
  337. upper_32_bits(start), lower_32_bits(start),
  338. upper_32_bits(end), lower_32_bits(end));
  339. }
  340. if (INTEL_GEN(m->i915) >= 4) {
  341. err_printf(m, " BBADDR: 0x%08x_%08x\n",
  342. (u32)(ee->bbaddr>>32), (u32)ee->bbaddr);
  343. err_printf(m, " BB_STATE: 0x%08x\n", ee->bbstate);
  344. err_printf(m, " INSTPS: 0x%08x\n", ee->instps);
  345. }
  346. err_printf(m, " INSTPM: 0x%08x\n", ee->instpm);
  347. err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ee->faddr),
  348. lower_32_bits(ee->faddr));
  349. if (INTEL_GEN(m->i915) >= 6) {
  350. err_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi);
  351. err_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg);
  352. err_printf(m, " SYNC_0: 0x%08x\n",
  353. ee->semaphore_mboxes[0]);
  354. err_printf(m, " SYNC_1: 0x%08x\n",
  355. ee->semaphore_mboxes[1]);
  356. if (HAS_VEBOX(m->i915))
  357. err_printf(m, " SYNC_2: 0x%08x\n",
  358. ee->semaphore_mboxes[2]);
  359. }
  360. if (USES_PPGTT(m->i915)) {
  361. err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
  362. if (INTEL_GEN(m->i915) >= 8) {
  363. int i;
  364. for (i = 0; i < 4; i++)
  365. err_printf(m, " PDP%d: 0x%016llx\n",
  366. i, ee->vm_info.pdp[i]);
  367. } else {
  368. err_printf(m, " PP_DIR_BASE: 0x%08x\n",
  369. ee->vm_info.pp_dir_base);
  370. }
  371. }
  372. err_printf(m, " seqno: 0x%08x\n", ee->seqno);
  373. err_printf(m, " last_seqno: 0x%08x\n", ee->last_seqno);
  374. err_printf(m, " waiting: %s\n", yesno(ee->waiting));
  375. err_printf(m, " ring->head: 0x%08x\n", ee->cpu_ring_head);
  376. err_printf(m, " ring->tail: 0x%08x\n", ee->cpu_ring_tail);
  377. err_printf(m, " hangcheck stall: %s\n", yesno(ee->hangcheck_stalled));
  378. err_printf(m, " hangcheck action: %s\n",
  379. hangcheck_action_to_str(ee->hangcheck_action));
  380. err_printf(m, " hangcheck action timestamp: %lu, %u ms ago\n",
  381. ee->hangcheck_timestamp,
  382. jiffies_to_msecs(jiffies - ee->hangcheck_timestamp));
  383. error_print_request(m, " ELSP[0]: ", &ee->execlist[0]);
  384. error_print_request(m, " ELSP[1]: ", &ee->execlist[1]);
  385. error_print_context(m, " Active context: ", &ee->context);
  386. }
  387. void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
  388. {
  389. va_list args;
  390. va_start(args, f);
  391. i915_error_vprintf(e, f, args);
  392. va_end(args);
  393. }
  394. static int
  395. ascii85_encode_len(int len)
  396. {
  397. return DIV_ROUND_UP(len, 4);
  398. }
  399. static bool
  400. ascii85_encode(u32 in, char *out)
  401. {
  402. int i;
  403. if (in == 0)
  404. return false;
  405. out[5] = '\0';
  406. for (i = 5; i--; ) {
  407. out[i] = '!' + in % 85;
  408. in /= 85;
  409. }
  410. return true;
  411. }
  412. static void print_error_obj(struct drm_i915_error_state_buf *m,
  413. struct intel_engine_cs *engine,
  414. const char *name,
  415. struct drm_i915_error_object *obj)
  416. {
  417. char out[6];
  418. int page;
  419. if (!obj)
  420. return;
  421. if (name) {
  422. err_printf(m, "%s --- %s = 0x%08x %08x\n",
  423. engine ? engine->name : "global", name,
  424. upper_32_bits(obj->gtt_offset),
  425. lower_32_bits(obj->gtt_offset));
  426. }
  427. err_compression_marker(m);
  428. for (page = 0; page < obj->page_count; page++) {
  429. int i, len;
  430. len = PAGE_SIZE;
  431. if (page == obj->page_count - 1)
  432. len -= obj->unused;
  433. len = ascii85_encode_len(len);
  434. for (i = 0; i < len; i++) {
  435. if (ascii85_encode(obj->pages[page][i], out))
  436. err_puts(m, out);
  437. else
  438. err_puts(m, "z");
  439. }
  440. }
  441. err_puts(m, "\n");
  442. }
  443. static void err_print_capabilities(struct drm_i915_error_state_buf *m,
  444. const struct intel_device_info *info)
  445. {
  446. #define PRINT_FLAG(x) err_printf(m, #x ": %s\n", yesno(info->x))
  447. DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
  448. #undef PRINT_FLAG
  449. }
  450. static __always_inline void err_print_param(struct drm_i915_error_state_buf *m,
  451. const char *name,
  452. const char *type,
  453. const void *x)
  454. {
  455. if (!__builtin_strcmp(type, "bool"))
  456. err_printf(m, "i915.%s=%s\n", name, yesno(*(const bool *)x));
  457. else if (!__builtin_strcmp(type, "int"))
  458. err_printf(m, "i915.%s=%d\n", name, *(const int *)x);
  459. else if (!__builtin_strcmp(type, "unsigned int"))
  460. err_printf(m, "i915.%s=%u\n", name, *(const unsigned int *)x);
  461. else if (!__builtin_strcmp(type, "char *"))
  462. err_printf(m, "i915.%s=%s\n", name, *(const char **)x);
  463. else
  464. BUILD_BUG();
  465. }
  466. static void err_print_params(struct drm_i915_error_state_buf *m,
  467. const struct i915_params *p)
  468. {
  469. #define PRINT(T, x) err_print_param(m, #x, #T, &p->x);
  470. I915_PARAMS_FOR_EACH(PRINT);
  471. #undef PRINT
  472. }
  473. static void err_print_pciid(struct drm_i915_error_state_buf *m,
  474. struct drm_i915_private *i915)
  475. {
  476. struct pci_dev *pdev = i915->drm.pdev;
  477. err_printf(m, "PCI ID: 0x%04x\n", pdev->device);
  478. err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision);
  479. err_printf(m, "PCI Subsystem: %04x:%04x\n",
  480. pdev->subsystem_vendor,
  481. pdev->subsystem_device);
  482. }
  483. int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
  484. const struct i915_gpu_state *error)
  485. {
  486. struct drm_i915_private *dev_priv = m->i915;
  487. struct drm_i915_error_object *obj;
  488. int i, j;
  489. if (!error) {
  490. err_printf(m, "No error state collected\n");
  491. return 0;
  492. }
  493. if (*error->error_msg)
  494. err_printf(m, "%s\n", error->error_msg);
  495. err_printf(m, "Kernel: " UTS_RELEASE "\n");
  496. err_printf(m, "Time: %ld s %ld us\n",
  497. error->time.tv_sec, error->time.tv_usec);
  498. err_printf(m, "Boottime: %ld s %ld us\n",
  499. error->boottime.tv_sec, error->boottime.tv_usec);
  500. err_printf(m, "Uptime: %ld s %ld us\n",
  501. error->uptime.tv_sec, error->uptime.tv_usec);
  502. for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
  503. if (error->engine[i].hangcheck_stalled &&
  504. error->engine[i].context.pid) {
  505. err_printf(m, "Active process (on ring %s): %s [%d], score %d\n",
  506. engine_str(i),
  507. error->engine[i].context.comm,
  508. error->engine[i].context.pid,
  509. error->engine[i].context.ban_score);
  510. }
  511. }
  512. err_printf(m, "Reset count: %u\n", error->reset_count);
  513. err_printf(m, "Suspend count: %u\n", error->suspend_count);
  514. err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform));
  515. err_print_pciid(m, error->i915);
  516. err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
  517. if (HAS_CSR(dev_priv)) {
  518. struct intel_csr *csr = &dev_priv->csr;
  519. err_printf(m, "DMC loaded: %s\n",
  520. yesno(csr->dmc_payload != NULL));
  521. err_printf(m, "DMC fw version: %d.%d\n",
  522. CSR_VERSION_MAJOR(csr->version),
  523. CSR_VERSION_MINOR(csr->version));
  524. }
  525. err_printf(m, "GT awake: %s\n", yesno(error->awake));
  526. err_printf(m, "RPM wakelock: %s\n", yesno(error->wakelock));
  527. err_printf(m, "PM suspended: %s\n", yesno(error->suspended));
  528. err_printf(m, "EIR: 0x%08x\n", error->eir);
  529. err_printf(m, "IER: 0x%08x\n", error->ier);
  530. for (i = 0; i < error->ngtier; i++)
  531. err_printf(m, "GTIER[%d]: 0x%08x\n", i, error->gtier[i]);
  532. err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
  533. err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
  534. err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
  535. err_printf(m, "CCID: 0x%08x\n", error->ccid);
  536. err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings);
  537. for (i = 0; i < error->nfence; i++)
  538. err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
  539. if (INTEL_GEN(dev_priv) >= 6) {
  540. err_printf(m, "ERROR: 0x%08x\n", error->error);
  541. if (INTEL_GEN(dev_priv) >= 8)
  542. err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
  543. error->fault_data1, error->fault_data0);
  544. err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
  545. }
  546. if (IS_GEN7(dev_priv))
  547. err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
  548. for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
  549. if (error->engine[i].engine_id != -1)
  550. error_print_engine(m, &error->engine[i]);
  551. }
  552. for (i = 0; i < ARRAY_SIZE(error->active_vm); i++) {
  553. char buf[128];
  554. int len, first = 1;
  555. if (!error->active_vm[i])
  556. break;
  557. len = scnprintf(buf, sizeof(buf), "Active (");
  558. for (j = 0; j < ARRAY_SIZE(error->engine); j++) {
  559. if (error->engine[j].vm != error->active_vm[i])
  560. continue;
  561. len += scnprintf(buf + len, sizeof(buf), "%s%s",
  562. first ? "" : ", ",
  563. dev_priv->engine[j]->name);
  564. first = 0;
  565. }
  566. scnprintf(buf + len, sizeof(buf), ")");
  567. print_error_buffers(m, buf,
  568. error->active_bo[i],
  569. error->active_bo_count[i]);
  570. }
  571. print_error_buffers(m, "Pinned (global)",
  572. error->pinned_bo,
  573. error->pinned_bo_count);
  574. for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
  575. const struct drm_i915_error_engine *ee = &error->engine[i];
  576. obj = ee->batchbuffer;
  577. if (obj) {
  578. err_puts(m, dev_priv->engine[i]->name);
  579. if (ee->context.pid)
  580. err_printf(m, " (submitted by %s [%d], ctx %d [%d], score %d)",
  581. ee->context.comm,
  582. ee->context.pid,
  583. ee->context.handle,
  584. ee->context.hw_id,
  585. ee->context.ban_score);
  586. err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
  587. upper_32_bits(obj->gtt_offset),
  588. lower_32_bits(obj->gtt_offset));
  589. print_error_obj(m, dev_priv->engine[i], NULL, obj);
  590. }
  591. for (j = 0; j < ee->user_bo_count; j++)
  592. print_error_obj(m, dev_priv->engine[i],
  593. "user", ee->user_bo[j]);
  594. if (ee->num_requests) {
  595. err_printf(m, "%s --- %d requests\n",
  596. dev_priv->engine[i]->name,
  597. ee->num_requests);
  598. for (j = 0; j < ee->num_requests; j++)
  599. error_print_request(m, " ", &ee->requests[j]);
  600. }
  601. if (IS_ERR(ee->waiters)) {
  602. err_printf(m, "%s --- ? waiters [unable to acquire spinlock]\n",
  603. dev_priv->engine[i]->name);
  604. } else if (ee->num_waiters) {
  605. err_printf(m, "%s --- %d waiters\n",
  606. dev_priv->engine[i]->name,
  607. ee->num_waiters);
  608. for (j = 0; j < ee->num_waiters; j++) {
  609. err_printf(m, " seqno 0x%08x for %s [%d]\n",
  610. ee->waiters[j].seqno,
  611. ee->waiters[j].comm,
  612. ee->waiters[j].pid);
  613. }
  614. }
  615. print_error_obj(m, dev_priv->engine[i],
  616. "ringbuffer", ee->ringbuffer);
  617. print_error_obj(m, dev_priv->engine[i],
  618. "HW Status", ee->hws_page);
  619. print_error_obj(m, dev_priv->engine[i],
  620. "HW context", ee->ctx);
  621. print_error_obj(m, dev_priv->engine[i],
  622. "WA context", ee->wa_ctx);
  623. print_error_obj(m, dev_priv->engine[i],
  624. "WA batchbuffer", ee->wa_batchbuffer);
  625. }
  626. print_error_obj(m, NULL, "Semaphores", error->semaphore);
  627. print_error_obj(m, NULL, "GuC log buffer", error->guc_log);
  628. if (error->overlay)
  629. intel_overlay_print_error_state(m, error->overlay);
  630. if (error->display)
  631. intel_display_print_error_state(m, error->display);
  632. err_print_capabilities(m, &error->device_info);
  633. err_print_params(m, &error->params);
  634. if (m->bytes == 0 && m->err)
  635. return m->err;
  636. return 0;
  637. }
  638. int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf,
  639. struct drm_i915_private *i915,
  640. size_t count, loff_t pos)
  641. {
  642. memset(ebuf, 0, sizeof(*ebuf));
  643. ebuf->i915 = i915;
  644. /* We need to have enough room to store any i915_error_state printf
  645. * so that we can move it to start position.
  646. */
  647. ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
  648. ebuf->buf = kmalloc(ebuf->size,
  649. GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN);
  650. if (ebuf->buf == NULL) {
  651. ebuf->size = PAGE_SIZE;
  652. ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
  653. }
  654. if (ebuf->buf == NULL) {
  655. ebuf->size = 128;
  656. ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
  657. }
  658. if (ebuf->buf == NULL)
  659. return -ENOMEM;
  660. ebuf->start = pos;
  661. return 0;
  662. }
  663. static void i915_error_object_free(struct drm_i915_error_object *obj)
  664. {
  665. int page;
  666. if (obj == NULL)
  667. return;
  668. for (page = 0; page < obj->page_count; page++)
  669. free_page((unsigned long)obj->pages[page]);
  670. kfree(obj);
  671. }
  672. static __always_inline void free_param(const char *type, void *x)
  673. {
  674. if (!__builtin_strcmp(type, "char *"))
  675. kfree(*(void **)x);
  676. }
  677. void __i915_gpu_state_free(struct kref *error_ref)
  678. {
  679. struct i915_gpu_state *error =
  680. container_of(error_ref, typeof(*error), ref);
  681. long i, j;
  682. for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
  683. struct drm_i915_error_engine *ee = &error->engine[i];
  684. for (j = 0; j < ee->user_bo_count; j++)
  685. i915_error_object_free(ee->user_bo[j]);
  686. kfree(ee->user_bo);
  687. i915_error_object_free(ee->batchbuffer);
  688. i915_error_object_free(ee->wa_batchbuffer);
  689. i915_error_object_free(ee->ringbuffer);
  690. i915_error_object_free(ee->hws_page);
  691. i915_error_object_free(ee->ctx);
  692. i915_error_object_free(ee->wa_ctx);
  693. kfree(ee->requests);
  694. if (!IS_ERR_OR_NULL(ee->waiters))
  695. kfree(ee->waiters);
  696. }
  697. i915_error_object_free(error->semaphore);
  698. i915_error_object_free(error->guc_log);
  699. for (i = 0; i < ARRAY_SIZE(error->active_bo); i++)
  700. kfree(error->active_bo[i]);
  701. kfree(error->pinned_bo);
  702. kfree(error->overlay);
  703. kfree(error->display);
  704. #define FREE(T, x) free_param(#T, &error->params.x);
  705. I915_PARAMS_FOR_EACH(FREE);
  706. #undef FREE
  707. kfree(error);
  708. }
  709. static struct drm_i915_error_object *
  710. i915_error_object_create(struct drm_i915_private *i915,
  711. struct i915_vma *vma)
  712. {
  713. struct i915_ggtt *ggtt = &i915->ggtt;
  714. const u64 slot = ggtt->error_capture.start;
  715. struct drm_i915_error_object *dst;
  716. struct compress compress;
  717. unsigned long num_pages;
  718. struct sgt_iter iter;
  719. dma_addr_t dma;
  720. if (!vma)
  721. return NULL;
  722. num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT;
  723. num_pages = DIV_ROUND_UP(10 * num_pages, 8); /* worstcase zlib growth */
  724. dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *),
  725. GFP_ATOMIC | __GFP_NOWARN);
  726. if (!dst)
  727. return NULL;
  728. dst->gtt_offset = vma->node.start;
  729. dst->gtt_size = vma->node.size;
  730. dst->page_count = 0;
  731. dst->unused = 0;
  732. if (!compress_init(&compress)) {
  733. kfree(dst);
  734. return NULL;
  735. }
  736. for_each_sgt_dma(dma, iter, vma->pages) {
  737. void __iomem *s;
  738. int ret;
  739. ggtt->base.insert_page(&ggtt->base, dma, slot,
  740. I915_CACHE_NONE, 0);
  741. s = io_mapping_map_atomic_wc(&ggtt->mappable, slot);
  742. ret = compress_page(&compress, (void __force *)s, dst);
  743. io_mapping_unmap_atomic(s);
  744. if (ret)
  745. goto unwind;
  746. }
  747. goto out;
  748. unwind:
  749. while (dst->page_count--)
  750. free_page((unsigned long)dst->pages[dst->page_count]);
  751. kfree(dst);
  752. dst = NULL;
  753. out:
  754. compress_fini(&compress, dst);
  755. ggtt->base.clear_range(&ggtt->base, slot, PAGE_SIZE);
  756. return dst;
  757. }
  758. /* The error capture is special as tries to run underneath the normal
  759. * locking rules - so we use the raw version of the i915_gem_active lookup.
  760. */
  761. static inline uint32_t
  762. __active_get_seqno(struct i915_gem_active *active)
  763. {
  764. struct drm_i915_gem_request *request;
  765. request = __i915_gem_active_peek(active);
  766. return request ? request->global_seqno : 0;
  767. }
  768. static inline int
  769. __active_get_engine_id(struct i915_gem_active *active)
  770. {
  771. struct drm_i915_gem_request *request;
  772. request = __i915_gem_active_peek(active);
  773. return request ? request->engine->id : -1;
  774. }
  775. static void capture_bo(struct drm_i915_error_buffer *err,
  776. struct i915_vma *vma)
  777. {
  778. struct drm_i915_gem_object *obj = vma->obj;
  779. int i;
  780. err->size = obj->base.size;
  781. err->name = obj->base.name;
  782. for (i = 0; i < I915_NUM_ENGINES; i++)
  783. err->rseqno[i] = __active_get_seqno(&vma->last_read[i]);
  784. err->wseqno = __active_get_seqno(&obj->frontbuffer_write);
  785. err->engine = __active_get_engine_id(&obj->frontbuffer_write);
  786. err->gtt_offset = vma->node.start;
  787. err->read_domains = obj->base.read_domains;
  788. err->write_domain = obj->base.write_domain;
  789. err->fence_reg = vma->fence ? vma->fence->id : -1;
  790. err->tiling = i915_gem_object_get_tiling(obj);
  791. err->dirty = obj->mm.dirty;
  792. err->purgeable = obj->mm.madv != I915_MADV_WILLNEED;
  793. err->userptr = obj->userptr.mm != NULL;
  794. err->cache_level = obj->cache_level;
  795. }
  796. static u32 capture_error_bo(struct drm_i915_error_buffer *err,
  797. int count, struct list_head *head,
  798. bool pinned_only)
  799. {
  800. struct i915_vma *vma;
  801. int i = 0;
  802. list_for_each_entry(vma, head, vm_link) {
  803. if (pinned_only && !i915_vma_is_pinned(vma))
  804. continue;
  805. capture_bo(err++, vma);
  806. if (++i == count)
  807. break;
  808. }
  809. return i;
  810. }
  811. /* Generate a semi-unique error code. The code is not meant to have meaning, The
  812. * code's only purpose is to try to prevent false duplicated bug reports by
  813. * grossly estimating a GPU error state.
  814. *
  815. * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
  816. * the hang if we could strip the GTT offset information from it.
  817. *
  818. * It's only a small step better than a random number in its current form.
  819. */
  820. static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
  821. struct i915_gpu_state *error,
  822. int *engine_id)
  823. {
  824. uint32_t error_code = 0;
  825. int i;
  826. /* IPEHR would be an ideal way to detect errors, as it's the gross
  827. * measure of "the command that hung." However, has some very common
  828. * synchronization commands which almost always appear in the case
  829. * strictly a client bug. Use instdone to differentiate those some.
  830. */
  831. for (i = 0; i < I915_NUM_ENGINES; i++) {
  832. if (error->engine[i].hangcheck_stalled) {
  833. if (engine_id)
  834. *engine_id = i;
  835. return error->engine[i].ipehr ^
  836. error->engine[i].instdone.instdone;
  837. }
  838. }
  839. return error_code;
  840. }
  841. static void i915_gem_record_fences(struct drm_i915_private *dev_priv,
  842. struct i915_gpu_state *error)
  843. {
  844. int i;
  845. if (INTEL_GEN(dev_priv) >= 6) {
  846. for (i = 0; i < dev_priv->num_fence_regs; i++)
  847. error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i));
  848. } else if (INTEL_GEN(dev_priv) >= 4) {
  849. for (i = 0; i < dev_priv->num_fence_regs; i++)
  850. error->fence[i] = I915_READ64(FENCE_REG_965_LO(i));
  851. } else {
  852. for (i = 0; i < dev_priv->num_fence_regs; i++)
  853. error->fence[i] = I915_READ(FENCE_REG(i));
  854. }
  855. error->nfence = i;
  856. }
  857. static inline u32
  858. gen8_engine_sync_index(struct intel_engine_cs *engine,
  859. struct intel_engine_cs *other)
  860. {
  861. int idx;
  862. /*
  863. * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2;
  864. * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs;
  865. * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs;
  866. * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs;
  867. * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
  868. */
  869. idx = (other - engine) - 1;
  870. if (idx < 0)
  871. idx += I915_NUM_ENGINES;
  872. return idx;
  873. }
  874. static void gen8_record_semaphore_state(struct i915_gpu_state *error,
  875. struct intel_engine_cs *engine,
  876. struct drm_i915_error_engine *ee)
  877. {
  878. struct drm_i915_private *dev_priv = engine->i915;
  879. struct intel_engine_cs *to;
  880. enum intel_engine_id id;
  881. if (!error->semaphore)
  882. return;
  883. for_each_engine(to, dev_priv, id) {
  884. int idx;
  885. u16 signal_offset;
  886. u32 *tmp;
  887. if (engine == to)
  888. continue;
  889. signal_offset =
  890. (GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1)) / 4;
  891. tmp = error->semaphore->pages[0];
  892. idx = gen8_engine_sync_index(engine, to);
  893. ee->semaphore_mboxes[idx] = tmp[signal_offset];
  894. }
  895. }
  896. static void gen6_record_semaphore_state(struct intel_engine_cs *engine,
  897. struct drm_i915_error_engine *ee)
  898. {
  899. struct drm_i915_private *dev_priv = engine->i915;
  900. ee->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base));
  901. ee->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base));
  902. if (HAS_VEBOX(dev_priv))
  903. ee->semaphore_mboxes[2] =
  904. I915_READ(RING_SYNC_2(engine->mmio_base));
  905. }
  906. static void error_record_engine_waiters(struct intel_engine_cs *engine,
  907. struct drm_i915_error_engine *ee)
  908. {
  909. struct intel_breadcrumbs *b = &engine->breadcrumbs;
  910. struct drm_i915_error_waiter *waiter;
  911. struct rb_node *rb;
  912. int count;
  913. ee->num_waiters = 0;
  914. ee->waiters = NULL;
  915. if (RB_EMPTY_ROOT(&b->waiters))
  916. return;
  917. if (!spin_trylock_irq(&b->rb_lock)) {
  918. ee->waiters = ERR_PTR(-EDEADLK);
  919. return;
  920. }
  921. count = 0;
  922. for (rb = rb_first(&b->waiters); rb != NULL; rb = rb_next(rb))
  923. count++;
  924. spin_unlock_irq(&b->rb_lock);
  925. waiter = NULL;
  926. if (count)
  927. waiter = kmalloc_array(count,
  928. sizeof(struct drm_i915_error_waiter),
  929. GFP_ATOMIC);
  930. if (!waiter)
  931. return;
  932. if (!spin_trylock_irq(&b->rb_lock)) {
  933. kfree(waiter);
  934. ee->waiters = ERR_PTR(-EDEADLK);
  935. return;
  936. }
  937. ee->waiters = waiter;
  938. for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
  939. struct intel_wait *w = rb_entry(rb, typeof(*w), node);
  940. strcpy(waiter->comm, w->tsk->comm);
  941. waiter->pid = w->tsk->pid;
  942. waiter->seqno = w->seqno;
  943. waiter++;
  944. if (++ee->num_waiters == count)
  945. break;
  946. }
  947. spin_unlock_irq(&b->rb_lock);
  948. }
  949. static void error_record_engine_registers(struct i915_gpu_state *error,
  950. struct intel_engine_cs *engine,
  951. struct drm_i915_error_engine *ee)
  952. {
  953. struct drm_i915_private *dev_priv = engine->i915;
  954. if (INTEL_GEN(dev_priv) >= 6) {
  955. ee->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
  956. ee->fault_reg = I915_READ(RING_FAULT_REG(engine));
  957. if (INTEL_GEN(dev_priv) >= 8)
  958. gen8_record_semaphore_state(error, engine, ee);
  959. else
  960. gen6_record_semaphore_state(engine, ee);
  961. }
  962. if (INTEL_GEN(dev_priv) >= 4) {
  963. ee->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
  964. ee->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
  965. ee->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
  966. ee->instps = I915_READ(RING_INSTPS(engine->mmio_base));
  967. ee->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
  968. if (INTEL_GEN(dev_priv) >= 8) {
  969. ee->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
  970. ee->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
  971. }
  972. ee->bbstate = I915_READ(RING_BBSTATE(engine->mmio_base));
  973. } else {
  974. ee->faddr = I915_READ(DMA_FADD_I8XX);
  975. ee->ipeir = I915_READ(IPEIR);
  976. ee->ipehr = I915_READ(IPEHR);
  977. }
  978. intel_engine_get_instdone(engine, &ee->instdone);
  979. ee->waiting = intel_engine_has_waiter(engine);
  980. ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
  981. ee->acthd = intel_engine_get_active_head(engine);
  982. ee->seqno = intel_engine_get_seqno(engine);
  983. ee->last_seqno = intel_engine_last_submit(engine);
  984. ee->start = I915_READ_START(engine);
  985. ee->head = I915_READ_HEAD(engine);
  986. ee->tail = I915_READ_TAIL(engine);
  987. ee->ctl = I915_READ_CTL(engine);
  988. if (INTEL_GEN(dev_priv) > 2)
  989. ee->mode = I915_READ_MODE(engine);
  990. if (!HWS_NEEDS_PHYSICAL(dev_priv)) {
  991. i915_reg_t mmio;
  992. if (IS_GEN7(dev_priv)) {
  993. switch (engine->id) {
  994. default:
  995. case RCS:
  996. mmio = RENDER_HWS_PGA_GEN7;
  997. break;
  998. case BCS:
  999. mmio = BLT_HWS_PGA_GEN7;
  1000. break;
  1001. case VCS:
  1002. mmio = BSD_HWS_PGA_GEN7;
  1003. break;
  1004. case VECS:
  1005. mmio = VEBOX_HWS_PGA_GEN7;
  1006. break;
  1007. }
  1008. } else if (IS_GEN6(engine->i915)) {
  1009. mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
  1010. } else {
  1011. /* XXX: gen8 returns to sanity */
  1012. mmio = RING_HWS_PGA(engine->mmio_base);
  1013. }
  1014. ee->hws = I915_READ(mmio);
  1015. }
  1016. ee->hangcheck_timestamp = engine->hangcheck.action_timestamp;
  1017. ee->hangcheck_action = engine->hangcheck.action;
  1018. ee->hangcheck_stalled = engine->hangcheck.stalled;
  1019. if (USES_PPGTT(dev_priv)) {
  1020. int i;
  1021. ee->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
  1022. if (IS_GEN6(dev_priv))
  1023. ee->vm_info.pp_dir_base =
  1024. I915_READ(RING_PP_DIR_BASE_READ(engine));
  1025. else if (IS_GEN7(dev_priv))
  1026. ee->vm_info.pp_dir_base =
  1027. I915_READ(RING_PP_DIR_BASE(engine));
  1028. else if (INTEL_GEN(dev_priv) >= 8)
  1029. for (i = 0; i < 4; i++) {
  1030. ee->vm_info.pdp[i] =
  1031. I915_READ(GEN8_RING_PDP_UDW(engine, i));
  1032. ee->vm_info.pdp[i] <<= 32;
  1033. ee->vm_info.pdp[i] |=
  1034. I915_READ(GEN8_RING_PDP_LDW(engine, i));
  1035. }
  1036. }
  1037. }
  1038. static void record_request(struct drm_i915_gem_request *request,
  1039. struct drm_i915_error_request *erq)
  1040. {
  1041. erq->context = request->ctx->hw_id;
  1042. erq->ban_score = request->ctx->ban_score;
  1043. erq->seqno = request->global_seqno;
  1044. erq->jiffies = request->emitted_jiffies;
  1045. erq->head = request->head;
  1046. erq->tail = request->tail;
  1047. rcu_read_lock();
  1048. erq->pid = request->ctx->pid ? pid_nr(request->ctx->pid) : 0;
  1049. rcu_read_unlock();
  1050. }
  1051. static void engine_record_requests(struct intel_engine_cs *engine,
  1052. struct drm_i915_gem_request *first,
  1053. struct drm_i915_error_engine *ee)
  1054. {
  1055. struct drm_i915_gem_request *request;
  1056. int count;
  1057. count = 0;
  1058. request = first;
  1059. list_for_each_entry_from(request, &engine->timeline->requests, link)
  1060. count++;
  1061. if (!count)
  1062. return;
  1063. ee->requests = kcalloc(count, sizeof(*ee->requests), GFP_ATOMIC);
  1064. if (!ee->requests)
  1065. return;
  1066. ee->num_requests = count;
  1067. count = 0;
  1068. request = first;
  1069. list_for_each_entry_from(request, &engine->timeline->requests, link) {
  1070. if (count >= ee->num_requests) {
  1071. /*
  1072. * If the ring request list was changed in
  1073. * between the point where the error request
  1074. * list was created and dimensioned and this
  1075. * point then just exit early to avoid crashes.
  1076. *
  1077. * We don't need to communicate that the
  1078. * request list changed state during error
  1079. * state capture and that the error state is
  1080. * slightly incorrect as a consequence since we
  1081. * are typically only interested in the request
  1082. * list state at the point of error state
  1083. * capture, not in any changes happening during
  1084. * the capture.
  1085. */
  1086. break;
  1087. }
  1088. record_request(request, &ee->requests[count++]);
  1089. }
  1090. ee->num_requests = count;
  1091. }
  1092. static void error_record_engine_execlists(struct intel_engine_cs *engine,
  1093. struct drm_i915_error_engine *ee)
  1094. {
  1095. unsigned int n;
  1096. for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++)
  1097. if (engine->execlist_port[n].request)
  1098. record_request(engine->execlist_port[n].request,
  1099. &ee->execlist[n]);
  1100. }
  1101. static void record_context(struct drm_i915_error_context *e,
  1102. struct i915_gem_context *ctx)
  1103. {
  1104. if (ctx->pid) {
  1105. struct task_struct *task;
  1106. rcu_read_lock();
  1107. task = pid_task(ctx->pid, PIDTYPE_PID);
  1108. if (task) {
  1109. strcpy(e->comm, task->comm);
  1110. e->pid = task->pid;
  1111. }
  1112. rcu_read_unlock();
  1113. }
  1114. e->handle = ctx->user_handle;
  1115. e->hw_id = ctx->hw_id;
  1116. e->ban_score = ctx->ban_score;
  1117. e->guilty = ctx->guilty_count;
  1118. e->active = ctx->active_count;
  1119. }
  1120. static void request_record_user_bo(struct drm_i915_gem_request *request,
  1121. struct drm_i915_error_engine *ee)
  1122. {
  1123. struct i915_gem_capture_list *c;
  1124. struct drm_i915_error_object **bo;
  1125. long count;
  1126. count = 0;
  1127. for (c = request->capture_list; c; c = c->next)
  1128. count++;
  1129. bo = NULL;
  1130. if (count)
  1131. bo = kcalloc(count, sizeof(*bo), GFP_ATOMIC);
  1132. if (!bo)
  1133. return;
  1134. count = 0;
  1135. for (c = request->capture_list; c; c = c->next) {
  1136. bo[count] = i915_error_object_create(request->i915, c->vma);
  1137. if (!bo[count])
  1138. break;
  1139. count++;
  1140. }
  1141. ee->user_bo = bo;
  1142. ee->user_bo_count = count;
  1143. }
  1144. static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
  1145. struct i915_gpu_state *error)
  1146. {
  1147. struct i915_ggtt *ggtt = &dev_priv->ggtt;
  1148. int i;
  1149. error->semaphore =
  1150. i915_error_object_create(dev_priv, dev_priv->semaphore);
  1151. for (i = 0; i < I915_NUM_ENGINES; i++) {
  1152. struct intel_engine_cs *engine = dev_priv->engine[i];
  1153. struct drm_i915_error_engine *ee = &error->engine[i];
  1154. struct drm_i915_gem_request *request;
  1155. ee->engine_id = -1;
  1156. if (!engine)
  1157. continue;
  1158. ee->engine_id = i;
  1159. error_record_engine_registers(error, engine, ee);
  1160. error_record_engine_waiters(engine, ee);
  1161. error_record_engine_execlists(engine, ee);
  1162. request = i915_gem_find_active_request(engine);
  1163. if (request) {
  1164. struct intel_ring *ring;
  1165. ee->vm = request->ctx->ppgtt ?
  1166. &request->ctx->ppgtt->base : &ggtt->base;
  1167. record_context(&ee->context, request->ctx);
  1168. /* We need to copy these to an anonymous buffer
  1169. * as the simplest method to avoid being overwritten
  1170. * by userspace.
  1171. */
  1172. ee->batchbuffer =
  1173. i915_error_object_create(dev_priv,
  1174. request->batch);
  1175. if (HAS_BROKEN_CS_TLB(dev_priv))
  1176. ee->wa_batchbuffer =
  1177. i915_error_object_create(dev_priv,
  1178. engine->scratch);
  1179. request_record_user_bo(request, ee);
  1180. ee->ctx =
  1181. i915_error_object_create(dev_priv,
  1182. request->ctx->engine[i].state);
  1183. error->simulated |=
  1184. i915_gem_context_no_error_capture(request->ctx);
  1185. ee->rq_head = request->head;
  1186. ee->rq_post = request->postfix;
  1187. ee->rq_tail = request->tail;
  1188. ring = request->ring;
  1189. ee->cpu_ring_head = ring->head;
  1190. ee->cpu_ring_tail = ring->tail;
  1191. ee->ringbuffer =
  1192. i915_error_object_create(dev_priv, ring->vma);
  1193. engine_record_requests(engine, request, ee);
  1194. }
  1195. ee->hws_page =
  1196. i915_error_object_create(dev_priv,
  1197. engine->status_page.vma);
  1198. ee->wa_ctx =
  1199. i915_error_object_create(dev_priv, engine->wa_ctx.vma);
  1200. }
  1201. }
  1202. static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
  1203. struct i915_gpu_state *error,
  1204. struct i915_address_space *vm,
  1205. int idx)
  1206. {
  1207. struct drm_i915_error_buffer *active_bo;
  1208. struct i915_vma *vma;
  1209. int count;
  1210. count = 0;
  1211. list_for_each_entry(vma, &vm->active_list, vm_link)
  1212. count++;
  1213. active_bo = NULL;
  1214. if (count)
  1215. active_bo = kcalloc(count, sizeof(*active_bo), GFP_ATOMIC);
  1216. if (active_bo)
  1217. count = capture_error_bo(active_bo, count, &vm->active_list, false);
  1218. else
  1219. count = 0;
  1220. error->active_vm[idx] = vm;
  1221. error->active_bo[idx] = active_bo;
  1222. error->active_bo_count[idx] = count;
  1223. }
  1224. static void i915_capture_active_buffers(struct drm_i915_private *dev_priv,
  1225. struct i915_gpu_state *error)
  1226. {
  1227. int cnt = 0, i, j;
  1228. BUILD_BUG_ON(ARRAY_SIZE(error->engine) > ARRAY_SIZE(error->active_bo));
  1229. BUILD_BUG_ON(ARRAY_SIZE(error->active_bo) != ARRAY_SIZE(error->active_vm));
  1230. BUILD_BUG_ON(ARRAY_SIZE(error->active_bo) != ARRAY_SIZE(error->active_bo_count));
  1231. /* Scan each engine looking for unique active contexts/vm */
  1232. for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
  1233. struct drm_i915_error_engine *ee = &error->engine[i];
  1234. bool found;
  1235. if (!ee->vm)
  1236. continue;
  1237. found = false;
  1238. for (j = 0; j < i && !found; j++)
  1239. found = error->engine[j].vm == ee->vm;
  1240. if (!found)
  1241. i915_gem_capture_vm(dev_priv, error, ee->vm, cnt++);
  1242. }
  1243. }
  1244. static void i915_capture_pinned_buffers(struct drm_i915_private *dev_priv,
  1245. struct i915_gpu_state *error)
  1246. {
  1247. struct i915_address_space *vm = &dev_priv->ggtt.base;
  1248. struct drm_i915_error_buffer *bo;
  1249. struct i915_vma *vma;
  1250. int count_inactive, count_active;
  1251. count_inactive = 0;
  1252. list_for_each_entry(vma, &vm->active_list, vm_link)
  1253. count_inactive++;
  1254. count_active = 0;
  1255. list_for_each_entry(vma, &vm->inactive_list, vm_link)
  1256. count_active++;
  1257. bo = NULL;
  1258. if (count_inactive + count_active)
  1259. bo = kcalloc(count_inactive + count_active,
  1260. sizeof(*bo), GFP_ATOMIC);
  1261. if (!bo)
  1262. return;
  1263. count_inactive = capture_error_bo(bo, count_inactive,
  1264. &vm->active_list, true);
  1265. count_active = capture_error_bo(bo + count_inactive, count_active,
  1266. &vm->inactive_list, true);
  1267. error->pinned_bo_count = count_inactive + count_active;
  1268. error->pinned_bo = bo;
  1269. }
  1270. static void i915_gem_capture_guc_log_buffer(struct drm_i915_private *dev_priv,
  1271. struct i915_gpu_state *error)
  1272. {
  1273. /* Capturing log buf contents won't be useful if logging was disabled */
  1274. if (!dev_priv->guc.log.vma || (i915.guc_log_level < 0))
  1275. return;
  1276. error->guc_log = i915_error_object_create(dev_priv,
  1277. dev_priv->guc.log.vma);
  1278. }
  1279. /* Capture all registers which don't fit into another category. */
  1280. static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
  1281. struct i915_gpu_state *error)
  1282. {
  1283. int i;
  1284. /* General organization
  1285. * 1. Registers specific to a single generation
  1286. * 2. Registers which belong to multiple generations
  1287. * 3. Feature specific registers.
  1288. * 4. Everything else
  1289. * Please try to follow the order.
  1290. */
  1291. /* 1: Registers specific to a single generation */
  1292. if (IS_VALLEYVIEW(dev_priv)) {
  1293. error->gtier[0] = I915_READ(GTIER);
  1294. error->ier = I915_READ(VLV_IER);
  1295. error->forcewake = I915_READ_FW(FORCEWAKE_VLV);
  1296. }
  1297. if (IS_GEN7(dev_priv))
  1298. error->err_int = I915_READ(GEN7_ERR_INT);
  1299. if (INTEL_GEN(dev_priv) >= 8) {
  1300. error->fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
  1301. error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
  1302. }
  1303. if (IS_GEN6(dev_priv)) {
  1304. error->forcewake = I915_READ_FW(FORCEWAKE);
  1305. error->gab_ctl = I915_READ(GAB_CTL);
  1306. error->gfx_mode = I915_READ(GFX_MODE);
  1307. }
  1308. /* 2: Registers which belong to multiple generations */
  1309. if (INTEL_GEN(dev_priv) >= 7)
  1310. error->forcewake = I915_READ_FW(FORCEWAKE_MT);
  1311. if (INTEL_GEN(dev_priv) >= 6) {
  1312. error->derrmr = I915_READ(DERRMR);
  1313. error->error = I915_READ(ERROR_GEN6);
  1314. error->done_reg = I915_READ(DONE_REG);
  1315. }
  1316. if (INTEL_GEN(dev_priv) >= 5)
  1317. error->ccid = I915_READ(CCID);
  1318. /* 3: Feature specific registers */
  1319. if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
  1320. error->gam_ecochk = I915_READ(GAM_ECOCHK);
  1321. error->gac_eco = I915_READ(GAC_ECO_BITS);
  1322. }
  1323. /* 4: Everything else */
  1324. if (INTEL_GEN(dev_priv) >= 8) {
  1325. error->ier = I915_READ(GEN8_DE_MISC_IER);
  1326. for (i = 0; i < 4; i++)
  1327. error->gtier[i] = I915_READ(GEN8_GT_IER(i));
  1328. error->ngtier = 4;
  1329. } else if (HAS_PCH_SPLIT(dev_priv)) {
  1330. error->ier = I915_READ(DEIER);
  1331. error->gtier[0] = I915_READ(GTIER);
  1332. error->ngtier = 1;
  1333. } else if (IS_GEN2(dev_priv)) {
  1334. error->ier = I915_READ16(IER);
  1335. } else if (!IS_VALLEYVIEW(dev_priv)) {
  1336. error->ier = I915_READ(IER);
  1337. }
  1338. error->eir = I915_READ(EIR);
  1339. error->pgtbl_er = I915_READ(PGTBL_ER);
  1340. }
  1341. static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
  1342. struct i915_gpu_state *error,
  1343. u32 engine_mask,
  1344. const char *error_msg)
  1345. {
  1346. u32 ecode;
  1347. int engine_id = -1, len;
  1348. ecode = i915_error_generate_code(dev_priv, error, &engine_id);
  1349. len = scnprintf(error->error_msg, sizeof(error->error_msg),
  1350. "GPU HANG: ecode %d:%d:0x%08x",
  1351. INTEL_GEN(dev_priv), engine_id, ecode);
  1352. if (engine_id != -1 && error->engine[engine_id].context.pid)
  1353. len += scnprintf(error->error_msg + len,
  1354. sizeof(error->error_msg) - len,
  1355. ", in %s [%d]",
  1356. error->engine[engine_id].context.comm,
  1357. error->engine[engine_id].context.pid);
  1358. scnprintf(error->error_msg + len, sizeof(error->error_msg) - len,
  1359. ", reason: %s, action: %s",
  1360. error_msg,
  1361. engine_mask ? "reset" : "continue");
  1362. }
  1363. static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
  1364. struct i915_gpu_state *error)
  1365. {
  1366. error->awake = dev_priv->gt.awake;
  1367. error->wakelock = atomic_read(&dev_priv->pm.wakeref_count);
  1368. error->suspended = dev_priv->pm.suspended;
  1369. error->iommu = -1;
  1370. #ifdef CONFIG_INTEL_IOMMU
  1371. error->iommu = intel_iommu_gfx_mapped;
  1372. #endif
  1373. error->reset_count = i915_reset_count(&dev_priv->gpu_error);
  1374. error->suspend_count = dev_priv->suspend_count;
  1375. memcpy(&error->device_info,
  1376. INTEL_INFO(dev_priv),
  1377. sizeof(error->device_info));
  1378. }
  1379. static __always_inline void dup_param(const char *type, void *x)
  1380. {
  1381. if (!__builtin_strcmp(type, "char *"))
  1382. *(void **)x = kstrdup(*(void **)x, GFP_ATOMIC);
  1383. }
  1384. static int capture(void *data)
  1385. {
  1386. struct i915_gpu_state *error = data;
  1387. do_gettimeofday(&error->time);
  1388. error->boottime = ktime_to_timeval(ktime_get_boottime());
  1389. error->uptime =
  1390. ktime_to_timeval(ktime_sub(ktime_get(),
  1391. error->i915->gt.last_init_time));
  1392. error->params = i915;
  1393. #define DUP(T, x) dup_param(#T, &error->params.x);
  1394. I915_PARAMS_FOR_EACH(DUP);
  1395. #undef DUP
  1396. i915_capture_gen_state(error->i915, error);
  1397. i915_capture_reg_state(error->i915, error);
  1398. i915_gem_record_fences(error->i915, error);
  1399. i915_gem_record_rings(error->i915, error);
  1400. i915_capture_active_buffers(error->i915, error);
  1401. i915_capture_pinned_buffers(error->i915, error);
  1402. i915_gem_capture_guc_log_buffer(error->i915, error);
  1403. error->overlay = intel_overlay_capture_error_state(error->i915);
  1404. error->display = intel_display_capture_error_state(error->i915);
  1405. return 0;
  1406. }
  1407. #define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
  1408. struct i915_gpu_state *
  1409. i915_capture_gpu_state(struct drm_i915_private *i915)
  1410. {
  1411. struct i915_gpu_state *error;
  1412. error = kzalloc(sizeof(*error), GFP_ATOMIC);
  1413. if (!error)
  1414. return NULL;
  1415. kref_init(&error->ref);
  1416. error->i915 = i915;
  1417. stop_machine(capture, error, NULL);
  1418. return error;
  1419. }
  1420. /**
  1421. * i915_capture_error_state - capture an error record for later analysis
  1422. * @dev: drm device
  1423. *
  1424. * Should be called when an error is detected (either a hang or an error
  1425. * interrupt) to capture error state from the time of the error. Fills
  1426. * out a structure which becomes available in debugfs for user level tools
  1427. * to pick up.
  1428. */
  1429. void i915_capture_error_state(struct drm_i915_private *dev_priv,
  1430. u32 engine_mask,
  1431. const char *error_msg)
  1432. {
  1433. static bool warned;
  1434. struct i915_gpu_state *error;
  1435. unsigned long flags;
  1436. if (!i915.error_capture)
  1437. return;
  1438. if (READ_ONCE(dev_priv->gpu_error.first_error))
  1439. return;
  1440. error = i915_capture_gpu_state(dev_priv);
  1441. if (!error) {
  1442. DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
  1443. return;
  1444. }
  1445. i915_error_capture_msg(dev_priv, error, engine_mask, error_msg);
  1446. DRM_INFO("%s\n", error->error_msg);
  1447. if (!error->simulated) {
  1448. spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
  1449. if (!dev_priv->gpu_error.first_error) {
  1450. dev_priv->gpu_error.first_error = error;
  1451. error = NULL;
  1452. }
  1453. spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
  1454. }
  1455. if (error) {
  1456. __i915_gpu_state_free(&error->ref);
  1457. return;
  1458. }
  1459. if (!warned &&
  1460. ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
  1461. DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
  1462. DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
  1463. DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
  1464. DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
  1465. DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
  1466. dev_priv->drm.primary->index);
  1467. warned = true;
  1468. }
  1469. }
  1470. struct i915_gpu_state *
  1471. i915_first_error_state(struct drm_i915_private *i915)
  1472. {
  1473. struct i915_gpu_state *error;
  1474. spin_lock_irq(&i915->gpu_error.lock);
  1475. error = i915->gpu_error.first_error;
  1476. if (error)
  1477. i915_gpu_state_get(error);
  1478. spin_unlock_irq(&i915->gpu_error.lock);
  1479. return error;
  1480. }
  1481. void i915_reset_error_state(struct drm_i915_private *i915)
  1482. {
  1483. struct i915_gpu_state *error;
  1484. spin_lock_irq(&i915->gpu_error.lock);
  1485. error = i915->gpu_error.first_error;
  1486. i915->gpu_error.first_error = NULL;
  1487. spin_unlock_irq(&i915->gpu_error.lock);
  1488. i915_gpu_state_put(error);
  1489. }