i915_gpu_error.c 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508
  1. /*
  2. * Copyright (c) 2008 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. * Keith Packard <keithp@keithp.com>
  26. * Mika Kuoppala <mika.kuoppala@intel.com>
  27. *
  28. */
  29. #include <generated/utsrelease.h>
  30. #include "i915_drv.h"
  31. static const char *ring_str(int ring)
  32. {
  33. switch (ring) {
  34. case RCS: return "render";
  35. case VCS: return "bsd";
  36. case BCS: return "blt";
  37. case VECS: return "vebox";
  38. case VCS2: return "bsd2";
  39. default: return "";
  40. }
  41. }
  42. static const char *pin_flag(int pinned)
  43. {
  44. if (pinned > 0)
  45. return " P";
  46. else if (pinned < 0)
  47. return " p";
  48. else
  49. return "";
  50. }
  51. static const char *tiling_flag(int tiling)
  52. {
  53. switch (tiling) {
  54. default:
  55. case I915_TILING_NONE: return "";
  56. case I915_TILING_X: return " X";
  57. case I915_TILING_Y: return " Y";
  58. }
  59. }
  60. static const char *dirty_flag(int dirty)
  61. {
  62. return dirty ? " dirty" : "";
  63. }
  64. static const char *purgeable_flag(int purgeable)
  65. {
  66. return purgeable ? " purgeable" : "";
  67. }
  68. static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
  69. {
  70. if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
  71. e->err = -ENOSPC;
  72. return false;
  73. }
  74. if (e->bytes == e->size - 1 || e->err)
  75. return false;
  76. return true;
  77. }
  78. static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
  79. unsigned len)
  80. {
  81. if (e->pos + len <= e->start) {
  82. e->pos += len;
  83. return false;
  84. }
  85. /* First vsnprintf needs to fit in its entirety for memmove */
  86. if (len >= e->size) {
  87. e->err = -EIO;
  88. return false;
  89. }
  90. return true;
  91. }
  92. static void __i915_error_advance(struct drm_i915_error_state_buf *e,
  93. unsigned len)
  94. {
  95. /* If this is first printf in this window, adjust it so that
  96. * start position matches start of the buffer
  97. */
  98. if (e->pos < e->start) {
  99. const size_t off = e->start - e->pos;
  100. /* Should not happen but be paranoid */
  101. if (off > len || e->bytes) {
  102. e->err = -EIO;
  103. return;
  104. }
  105. memmove(e->buf, e->buf + off, len - off);
  106. e->bytes = len - off;
  107. e->pos = e->start;
  108. return;
  109. }
  110. e->bytes += len;
  111. e->pos += len;
  112. }
  113. static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
  114. const char *f, va_list args)
  115. {
  116. unsigned len;
  117. if (!__i915_error_ok(e))
  118. return;
  119. /* Seek the first printf which is hits start position */
  120. if (e->pos < e->start) {
  121. va_list tmp;
  122. va_copy(tmp, args);
  123. len = vsnprintf(NULL, 0, f, tmp);
  124. va_end(tmp);
  125. if (!__i915_error_seek(e, len))
  126. return;
  127. }
  128. len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
  129. if (len >= e->size - e->bytes)
  130. len = e->size - e->bytes - 1;
  131. __i915_error_advance(e, len);
  132. }
  133. static void i915_error_puts(struct drm_i915_error_state_buf *e,
  134. const char *str)
  135. {
  136. unsigned len;
  137. if (!__i915_error_ok(e))
  138. return;
  139. len = strlen(str);
  140. /* Seek the first printf which is hits start position */
  141. if (e->pos < e->start) {
  142. if (!__i915_error_seek(e, len))
  143. return;
  144. }
  145. if (len >= e->size - e->bytes)
  146. len = e->size - e->bytes - 1;
  147. memcpy(e->buf + e->bytes, str, len);
  148. __i915_error_advance(e, len);
  149. }
  150. #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
  151. #define err_puts(e, s) i915_error_puts(e, s)
  152. static void print_error_buffers(struct drm_i915_error_state_buf *m,
  153. const char *name,
  154. struct drm_i915_error_buffer *err,
  155. int count)
  156. {
  157. int i;
  158. err_printf(m, " %s [%d]:\n", name, count);
  159. while (count--) {
  160. err_printf(m, " %08x_%08x %8u %02x %02x [ ",
  161. upper_32_bits(err->gtt_offset),
  162. lower_32_bits(err->gtt_offset),
  163. err->size,
  164. err->read_domains,
  165. err->write_domain);
  166. for (i = 0; i < I915_NUM_ENGINES; i++)
  167. err_printf(m, "%02x ", err->rseqno[i]);
  168. err_printf(m, "] %02x", err->wseqno);
  169. err_puts(m, pin_flag(err->pinned));
  170. err_puts(m, tiling_flag(err->tiling));
  171. err_puts(m, dirty_flag(err->dirty));
  172. err_puts(m, purgeable_flag(err->purgeable));
  173. err_puts(m, err->userptr ? " userptr" : "");
  174. err_puts(m, err->ring != -1 ? " " : "");
  175. err_puts(m, ring_str(err->ring));
  176. err_puts(m, i915_cache_level_str(m->i915, err->cache_level));
  177. if (err->name)
  178. err_printf(m, " (name: %d)", err->name);
  179. if (err->fence_reg != I915_FENCE_REG_NONE)
  180. err_printf(m, " (fence: %d)", err->fence_reg);
  181. err_puts(m, "\n");
  182. err++;
  183. }
  184. }
  185. static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
  186. {
  187. switch (a) {
  188. case HANGCHECK_IDLE:
  189. return "idle";
  190. case HANGCHECK_WAIT:
  191. return "wait";
  192. case HANGCHECK_ACTIVE:
  193. return "active";
  194. case HANGCHECK_KICK:
  195. return "kick";
  196. case HANGCHECK_HUNG:
  197. return "hung";
  198. }
  199. return "unknown";
  200. }
  201. static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
  202. struct drm_device *dev,
  203. struct drm_i915_error_state *error,
  204. int ring_idx)
  205. {
  206. struct drm_i915_error_ring *ring = &error->ring[ring_idx];
  207. if (!ring->valid)
  208. return;
  209. err_printf(m, "%s command stream:\n", ring_str(ring_idx));
  210. err_printf(m, " START: 0x%08x\n", ring->start);
  211. err_printf(m, " HEAD: 0x%08x\n", ring->head);
  212. err_printf(m, " TAIL: 0x%08x\n", ring->tail);
  213. err_printf(m, " CTL: 0x%08x\n", ring->ctl);
  214. err_printf(m, " HWS: 0x%08x\n", ring->hws);
  215. err_printf(m, " ACTHD: 0x%08x %08x\n", (u32)(ring->acthd>>32), (u32)ring->acthd);
  216. err_printf(m, " IPEIR: 0x%08x\n", ring->ipeir);
  217. err_printf(m, " IPEHR: 0x%08x\n", ring->ipehr);
  218. err_printf(m, " INSTDONE: 0x%08x\n", ring->instdone);
  219. if (INTEL_INFO(dev)->gen >= 4) {
  220. err_printf(m, " BBADDR: 0x%08x %08x\n", (u32)(ring->bbaddr>>32), (u32)ring->bbaddr);
  221. err_printf(m, " BB_STATE: 0x%08x\n", ring->bbstate);
  222. err_printf(m, " INSTPS: 0x%08x\n", ring->instps);
  223. }
  224. err_printf(m, " INSTPM: 0x%08x\n", ring->instpm);
  225. err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ring->faddr),
  226. lower_32_bits(ring->faddr));
  227. if (INTEL_INFO(dev)->gen >= 6) {
  228. err_printf(m, " RC PSMI: 0x%08x\n", ring->rc_psmi);
  229. err_printf(m, " FAULT_REG: 0x%08x\n", ring->fault_reg);
  230. err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
  231. ring->semaphore_mboxes[0],
  232. ring->semaphore_seqno[0]);
  233. err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
  234. ring->semaphore_mboxes[1],
  235. ring->semaphore_seqno[1]);
  236. if (HAS_VEBOX(dev)) {
  237. err_printf(m, " SYNC_2: 0x%08x [last synced 0x%08x]\n",
  238. ring->semaphore_mboxes[2],
  239. ring->semaphore_seqno[2]);
  240. }
  241. }
  242. if (USES_PPGTT(dev)) {
  243. err_printf(m, " GFX_MODE: 0x%08x\n", ring->vm_info.gfx_mode);
  244. if (INTEL_INFO(dev)->gen >= 8) {
  245. int i;
  246. for (i = 0; i < 4; i++)
  247. err_printf(m, " PDP%d: 0x%016llx\n",
  248. i, ring->vm_info.pdp[i]);
  249. } else {
  250. err_printf(m, " PP_DIR_BASE: 0x%08x\n",
  251. ring->vm_info.pp_dir_base);
  252. }
  253. }
  254. err_printf(m, " seqno: 0x%08x\n", ring->seqno);
  255. err_printf(m, " last_seqno: 0x%08x\n", ring->last_seqno);
  256. err_printf(m, " waiting: %s\n", yesno(ring->waiting));
  257. err_printf(m, " ring->head: 0x%08x\n", ring->cpu_ring_head);
  258. err_printf(m, " ring->tail: 0x%08x\n", ring->cpu_ring_tail);
  259. err_printf(m, " hangcheck: %s [%d]\n",
  260. hangcheck_action_to_str(ring->hangcheck_action),
  261. ring->hangcheck_score);
  262. }
  263. void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
  264. {
  265. va_list args;
  266. va_start(args, f);
  267. i915_error_vprintf(e, f, args);
  268. va_end(args);
  269. }
  270. static void print_error_obj(struct drm_i915_error_state_buf *m,
  271. struct drm_i915_error_object *obj)
  272. {
  273. int page, offset, elt;
  274. for (page = offset = 0; page < obj->page_count; page++) {
  275. for (elt = 0; elt < PAGE_SIZE/4; elt++) {
  276. err_printf(m, "%08x : %08x\n", offset,
  277. obj->pages[page][elt]);
  278. offset += 4;
  279. }
  280. }
  281. }
  282. int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
  283. const struct i915_error_state_file_priv *error_priv)
  284. {
  285. struct drm_device *dev = error_priv->dev;
  286. struct drm_i915_private *dev_priv = dev->dev_private;
  287. struct drm_i915_error_state *error = error_priv->error;
  288. struct drm_i915_error_object *obj;
  289. int i, j, offset, elt;
  290. int max_hangcheck_score;
  291. if (!error) {
  292. err_printf(m, "no error state collected\n");
  293. goto out;
  294. }
  295. err_printf(m, "%s\n", error->error_msg);
  296. err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
  297. error->time.tv_usec);
  298. err_printf(m, "Kernel: " UTS_RELEASE "\n");
  299. max_hangcheck_score = 0;
  300. for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
  301. if (error->ring[i].hangcheck_score > max_hangcheck_score)
  302. max_hangcheck_score = error->ring[i].hangcheck_score;
  303. }
  304. for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
  305. if (error->ring[i].hangcheck_score == max_hangcheck_score &&
  306. error->ring[i].pid != -1) {
  307. err_printf(m, "Active process (on ring %s): %s [%d]\n",
  308. ring_str(i),
  309. error->ring[i].comm,
  310. error->ring[i].pid);
  311. }
  312. }
  313. err_printf(m, "Reset count: %u\n", error->reset_count);
  314. err_printf(m, "Suspend count: %u\n", error->suspend_count);
  315. err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
  316. err_printf(m, "PCI Revision: 0x%02x\n", dev->pdev->revision);
  317. err_printf(m, "PCI Subsystem: %04x:%04x\n",
  318. dev->pdev->subsystem_vendor,
  319. dev->pdev->subsystem_device);
  320. err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
  321. if (HAS_CSR(dev)) {
  322. struct intel_csr *csr = &dev_priv->csr;
  323. err_printf(m, "DMC loaded: %s\n",
  324. yesno(csr->dmc_payload != NULL));
  325. err_printf(m, "DMC fw version: %d.%d\n",
  326. CSR_VERSION_MAJOR(csr->version),
  327. CSR_VERSION_MINOR(csr->version));
  328. }
  329. err_printf(m, "EIR: 0x%08x\n", error->eir);
  330. err_printf(m, "IER: 0x%08x\n", error->ier);
  331. if (INTEL_INFO(dev)->gen >= 8) {
  332. for (i = 0; i < 4; i++)
  333. err_printf(m, "GTIER gt %d: 0x%08x\n", i,
  334. error->gtier[i]);
  335. } else if (HAS_PCH_SPLIT(dev) || IS_VALLEYVIEW(dev))
  336. err_printf(m, "GTIER: 0x%08x\n", error->gtier[0]);
  337. err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
  338. err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
  339. err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
  340. err_printf(m, "CCID: 0x%08x\n", error->ccid);
  341. err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings);
  342. for (i = 0; i < dev_priv->num_fence_regs; i++)
  343. err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
  344. for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
  345. err_printf(m, " INSTDONE_%d: 0x%08x\n", i,
  346. error->extra_instdone[i]);
  347. if (INTEL_INFO(dev)->gen >= 6) {
  348. err_printf(m, "ERROR: 0x%08x\n", error->error);
  349. if (INTEL_INFO(dev)->gen >= 8)
  350. err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
  351. error->fault_data1, error->fault_data0);
  352. err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
  353. }
  354. if (IS_GEN7(dev))
  355. err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
  356. for (i = 0; i < ARRAY_SIZE(error->ring); i++)
  357. i915_ring_error_state(m, dev, error, i);
  358. for (i = 0; i < error->vm_count; i++) {
  359. err_printf(m, "vm[%d]\n", i);
  360. print_error_buffers(m, "Active",
  361. error->active_bo[i],
  362. error->active_bo_count[i]);
  363. print_error_buffers(m, "Pinned",
  364. error->pinned_bo[i],
  365. error->pinned_bo_count[i]);
  366. }
  367. for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
  368. obj = error->ring[i].batchbuffer;
  369. if (obj) {
  370. err_puts(m, dev_priv->engine[i].name);
  371. if (error->ring[i].pid != -1)
  372. err_printf(m, " (submitted by %s [%d])",
  373. error->ring[i].comm,
  374. error->ring[i].pid);
  375. err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
  376. upper_32_bits(obj->gtt_offset),
  377. lower_32_bits(obj->gtt_offset));
  378. print_error_obj(m, obj);
  379. }
  380. obj = error->ring[i].wa_batchbuffer;
  381. if (obj) {
  382. err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n",
  383. dev_priv->engine[i].name,
  384. lower_32_bits(obj->gtt_offset));
  385. print_error_obj(m, obj);
  386. }
  387. if (error->ring[i].num_requests) {
  388. err_printf(m, "%s --- %d requests\n",
  389. dev_priv->engine[i].name,
  390. error->ring[i].num_requests);
  391. for (j = 0; j < error->ring[i].num_requests; j++) {
  392. err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
  393. error->ring[i].requests[j].seqno,
  394. error->ring[i].requests[j].jiffies,
  395. error->ring[i].requests[j].tail);
  396. }
  397. }
  398. if (error->ring[i].num_waiters) {
  399. err_printf(m, "%s --- %d waiters\n",
  400. dev_priv->engine[i].name,
  401. error->ring[i].num_waiters);
  402. for (j = 0; j < error->ring[i].num_waiters; j++) {
  403. err_printf(m, " seqno 0x%08x for %s [%d]\n",
  404. error->ring[i].waiters[j].seqno,
  405. error->ring[i].waiters[j].comm,
  406. error->ring[i].waiters[j].pid);
  407. }
  408. }
  409. if ((obj = error->ring[i].ringbuffer)) {
  410. err_printf(m, "%s --- ringbuffer = 0x%08x\n",
  411. dev_priv->engine[i].name,
  412. lower_32_bits(obj->gtt_offset));
  413. print_error_obj(m, obj);
  414. }
  415. if ((obj = error->ring[i].hws_page)) {
  416. u64 hws_offset = obj->gtt_offset;
  417. u32 *hws_page = &obj->pages[0][0];
  418. if (i915.enable_execlists) {
  419. hws_offset += LRC_PPHWSP_PN * PAGE_SIZE;
  420. hws_page = &obj->pages[LRC_PPHWSP_PN][0];
  421. }
  422. err_printf(m, "%s --- HW Status = 0x%08llx\n",
  423. dev_priv->engine[i].name, hws_offset);
  424. offset = 0;
  425. for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
  426. err_printf(m, "[%04x] %08x %08x %08x %08x\n",
  427. offset,
  428. hws_page[elt],
  429. hws_page[elt+1],
  430. hws_page[elt+2],
  431. hws_page[elt+3]);
  432. offset += 16;
  433. }
  434. }
  435. obj = error->ring[i].wa_ctx;
  436. if (obj) {
  437. u64 wa_ctx_offset = obj->gtt_offset;
  438. u32 *wa_ctx_page = &obj->pages[0][0];
  439. struct intel_engine_cs *engine = &dev_priv->engine[RCS];
  440. u32 wa_ctx_size = (engine->wa_ctx.indirect_ctx.size +
  441. engine->wa_ctx.per_ctx.size);
  442. err_printf(m, "%s --- WA ctx batch buffer = 0x%08llx\n",
  443. dev_priv->engine[i].name, wa_ctx_offset);
  444. offset = 0;
  445. for (elt = 0; elt < wa_ctx_size; elt += 4) {
  446. err_printf(m, "[%04x] %08x %08x %08x %08x\n",
  447. offset,
  448. wa_ctx_page[elt + 0],
  449. wa_ctx_page[elt + 1],
  450. wa_ctx_page[elt + 2],
  451. wa_ctx_page[elt + 3]);
  452. offset += 16;
  453. }
  454. }
  455. if ((obj = error->ring[i].ctx)) {
  456. err_printf(m, "%s --- HW Context = 0x%08x\n",
  457. dev_priv->engine[i].name,
  458. lower_32_bits(obj->gtt_offset));
  459. print_error_obj(m, obj);
  460. }
  461. }
  462. if ((obj = error->semaphore_obj)) {
  463. err_printf(m, "Semaphore page = 0x%08x\n",
  464. lower_32_bits(obj->gtt_offset));
  465. for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
  466. err_printf(m, "[%04x] %08x %08x %08x %08x\n",
  467. elt * 4,
  468. obj->pages[0][elt],
  469. obj->pages[0][elt+1],
  470. obj->pages[0][elt+2],
  471. obj->pages[0][elt+3]);
  472. }
  473. }
  474. if (error->overlay)
  475. intel_overlay_print_error_state(m, error->overlay);
  476. if (error->display)
  477. intel_display_print_error_state(m, dev, error->display);
  478. out:
  479. if (m->bytes == 0 && m->err)
  480. return m->err;
  481. return 0;
  482. }
  483. int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf,
  484. struct drm_i915_private *i915,
  485. size_t count, loff_t pos)
  486. {
  487. memset(ebuf, 0, sizeof(*ebuf));
  488. ebuf->i915 = i915;
  489. /* We need to have enough room to store any i915_error_state printf
  490. * so that we can move it to start position.
  491. */
  492. ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
  493. ebuf->buf = kmalloc(ebuf->size,
  494. GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN);
  495. if (ebuf->buf == NULL) {
  496. ebuf->size = PAGE_SIZE;
  497. ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
  498. }
  499. if (ebuf->buf == NULL) {
  500. ebuf->size = 128;
  501. ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
  502. }
  503. if (ebuf->buf == NULL)
  504. return -ENOMEM;
  505. ebuf->start = pos;
  506. return 0;
  507. }
  508. static void i915_error_object_free(struct drm_i915_error_object *obj)
  509. {
  510. int page;
  511. if (obj == NULL)
  512. return;
  513. for (page = 0; page < obj->page_count; page++)
  514. kfree(obj->pages[page]);
  515. kfree(obj);
  516. }
  517. static void i915_error_state_free(struct kref *error_ref)
  518. {
  519. struct drm_i915_error_state *error = container_of(error_ref,
  520. typeof(*error), ref);
  521. int i;
  522. for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
  523. i915_error_object_free(error->ring[i].batchbuffer);
  524. i915_error_object_free(error->ring[i].wa_batchbuffer);
  525. i915_error_object_free(error->ring[i].ringbuffer);
  526. i915_error_object_free(error->ring[i].hws_page);
  527. i915_error_object_free(error->ring[i].ctx);
  528. i915_error_object_free(error->ring[i].wa_ctx);
  529. kfree(error->ring[i].requests);
  530. kfree(error->ring[i].waiters);
  531. }
  532. i915_error_object_free(error->semaphore_obj);
  533. for (i = 0; i < error->vm_count; i++)
  534. kfree(error->active_bo[i]);
  535. kfree(error->active_bo);
  536. kfree(error->active_bo_count);
  537. kfree(error->pinned_bo);
  538. kfree(error->pinned_bo_count);
  539. kfree(error->overlay);
  540. kfree(error->display);
  541. kfree(error);
  542. }
  543. static struct drm_i915_error_object *
  544. i915_error_object_create(struct drm_i915_private *dev_priv,
  545. struct drm_i915_gem_object *src,
  546. struct i915_address_space *vm)
  547. {
  548. struct i915_ggtt *ggtt = &dev_priv->ggtt;
  549. struct drm_i915_error_object *dst;
  550. struct i915_vma *vma = NULL;
  551. int num_pages;
  552. bool use_ggtt;
  553. int i = 0;
  554. u64 reloc_offset;
  555. if (src == NULL || src->pages == NULL)
  556. return NULL;
  557. num_pages = src->base.size >> PAGE_SHIFT;
  558. dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
  559. if (dst == NULL)
  560. return NULL;
  561. if (i915_gem_obj_bound(src, vm))
  562. dst->gtt_offset = i915_gem_obj_offset(src, vm);
  563. else
  564. dst->gtt_offset = -1;
  565. reloc_offset = dst->gtt_offset;
  566. if (i915_is_ggtt(vm))
  567. vma = i915_gem_obj_to_ggtt(src);
  568. use_ggtt = (src->cache_level == I915_CACHE_NONE &&
  569. vma && (vma->bound & GLOBAL_BIND) &&
  570. reloc_offset + num_pages * PAGE_SIZE <= ggtt->mappable_end);
  571. /* Cannot access stolen address directly, try to use the aperture */
  572. if (src->stolen) {
  573. use_ggtt = true;
  574. if (!(vma && vma->bound & GLOBAL_BIND))
  575. goto unwind;
  576. reloc_offset = i915_gem_obj_ggtt_offset(src);
  577. if (reloc_offset + num_pages * PAGE_SIZE > ggtt->mappable_end)
  578. goto unwind;
  579. }
  580. /* Cannot access snooped pages through the aperture */
  581. if (use_ggtt && src->cache_level != I915_CACHE_NONE &&
  582. !HAS_LLC(dev_priv))
  583. goto unwind;
  584. dst->page_count = num_pages;
  585. while (num_pages--) {
  586. unsigned long flags;
  587. void *d;
  588. d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
  589. if (d == NULL)
  590. goto unwind;
  591. local_irq_save(flags);
  592. if (use_ggtt) {
  593. void __iomem *s;
  594. /* Simply ignore tiling or any overlapping fence.
  595. * It's part of the error state, and this hopefully
  596. * captures what the GPU read.
  597. */
  598. s = io_mapping_map_atomic_wc(ggtt->mappable,
  599. reloc_offset);
  600. memcpy_fromio(d, s, PAGE_SIZE);
  601. io_mapping_unmap_atomic(s);
  602. } else {
  603. struct page *page;
  604. void *s;
  605. page = i915_gem_object_get_page(src, i);
  606. drm_clflush_pages(&page, 1);
  607. s = kmap_atomic(page);
  608. memcpy(d, s, PAGE_SIZE);
  609. kunmap_atomic(s);
  610. drm_clflush_pages(&page, 1);
  611. }
  612. local_irq_restore(flags);
  613. dst->pages[i++] = d;
  614. reloc_offset += PAGE_SIZE;
  615. }
  616. return dst;
  617. unwind:
  618. while (i--)
  619. kfree(dst->pages[i]);
  620. kfree(dst);
  621. return NULL;
  622. }
  623. #define i915_error_ggtt_object_create(dev_priv, src) \
  624. i915_error_object_create((dev_priv), (src), &(dev_priv)->ggtt.base)
  625. static void capture_bo(struct drm_i915_error_buffer *err,
  626. struct i915_vma *vma)
  627. {
  628. struct drm_i915_gem_object *obj = vma->obj;
  629. int i;
  630. err->size = obj->base.size;
  631. err->name = obj->base.name;
  632. for (i = 0; i < I915_NUM_ENGINES; i++)
  633. err->rseqno[i] = i915_gem_request_get_seqno(obj->last_read_req[i]);
  634. err->wseqno = i915_gem_request_get_seqno(obj->last_write_req);
  635. err->gtt_offset = vma->node.start;
  636. err->read_domains = obj->base.read_domains;
  637. err->write_domain = obj->base.write_domain;
  638. err->fence_reg = obj->fence_reg;
  639. err->pinned = 0;
  640. if (i915_gem_obj_is_pinned(obj))
  641. err->pinned = 1;
  642. err->tiling = obj->tiling_mode;
  643. err->dirty = obj->dirty;
  644. err->purgeable = obj->madv != I915_MADV_WILLNEED;
  645. err->userptr = obj->userptr.mm != NULL;
  646. err->ring = obj->last_write_req ?
  647. i915_gem_request_get_engine(obj->last_write_req)->id : -1;
  648. err->cache_level = obj->cache_level;
  649. }
  650. static u32 capture_active_bo(struct drm_i915_error_buffer *err,
  651. int count, struct list_head *head)
  652. {
  653. struct i915_vma *vma;
  654. int i = 0;
  655. list_for_each_entry(vma, head, vm_link) {
  656. capture_bo(err++, vma);
  657. if (++i == count)
  658. break;
  659. }
  660. return i;
  661. }
  662. static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
  663. int count, struct list_head *head,
  664. struct i915_address_space *vm)
  665. {
  666. struct drm_i915_gem_object *obj;
  667. struct drm_i915_error_buffer * const first = err;
  668. struct drm_i915_error_buffer * const last = err + count;
  669. list_for_each_entry(obj, head, global_list) {
  670. struct i915_vma *vma;
  671. if (err == last)
  672. break;
  673. list_for_each_entry(vma, &obj->vma_list, obj_link)
  674. if (vma->vm == vm && vma->pin_count > 0)
  675. capture_bo(err++, vma);
  676. }
  677. return err - first;
  678. }
  679. /* Generate a semi-unique error code. The code is not meant to have meaning, The
  680. * code's only purpose is to try to prevent false duplicated bug reports by
  681. * grossly estimating a GPU error state.
  682. *
  683. * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
  684. * the hang if we could strip the GTT offset information from it.
  685. *
  686. * It's only a small step better than a random number in its current form.
  687. */
  688. static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
  689. struct drm_i915_error_state *error,
  690. int *ring_id)
  691. {
  692. uint32_t error_code = 0;
  693. int i;
  694. /* IPEHR would be an ideal way to detect errors, as it's the gross
  695. * measure of "the command that hung." However, has some very common
  696. * synchronization commands which almost always appear in the case
  697. * strictly a client bug. Use instdone to differentiate those some.
  698. */
  699. for (i = 0; i < I915_NUM_ENGINES; i++) {
  700. if (error->ring[i].hangcheck_action == HANGCHECK_HUNG) {
  701. if (ring_id)
  702. *ring_id = i;
  703. return error->ring[i].ipehr ^ error->ring[i].instdone;
  704. }
  705. }
  706. return error_code;
  707. }
  708. static void i915_gem_record_fences(struct drm_i915_private *dev_priv,
  709. struct drm_i915_error_state *error)
  710. {
  711. int i;
  712. if (IS_GEN3(dev_priv) || IS_GEN2(dev_priv)) {
  713. for (i = 0; i < dev_priv->num_fence_regs; i++)
  714. error->fence[i] = I915_READ(FENCE_REG(i));
  715. } else if (IS_GEN5(dev_priv) || IS_GEN4(dev_priv)) {
  716. for (i = 0; i < dev_priv->num_fence_regs; i++)
  717. error->fence[i] = I915_READ64(FENCE_REG_965_LO(i));
  718. } else if (INTEL_GEN(dev_priv) >= 6) {
  719. for (i = 0; i < dev_priv->num_fence_regs; i++)
  720. error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i));
  721. }
  722. }
  723. static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
  724. struct drm_i915_error_state *error,
  725. struct intel_engine_cs *engine,
  726. struct drm_i915_error_ring *ering)
  727. {
  728. struct intel_engine_cs *to;
  729. enum intel_engine_id id;
  730. if (!i915_semaphore_is_enabled(dev_priv))
  731. return;
  732. if (!error->semaphore_obj)
  733. error->semaphore_obj =
  734. i915_error_ggtt_object_create(dev_priv,
  735. dev_priv->semaphore_obj);
  736. for_each_engine_id(to, dev_priv, id) {
  737. int idx;
  738. u16 signal_offset;
  739. u32 *tmp;
  740. if (engine == to)
  741. continue;
  742. signal_offset = (GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1))
  743. / 4;
  744. tmp = error->semaphore_obj->pages[0];
  745. idx = intel_ring_sync_index(engine, to);
  746. ering->semaphore_mboxes[idx] = tmp[signal_offset];
  747. ering->semaphore_seqno[idx] = engine->semaphore.sync_seqno[idx];
  748. }
  749. }
  750. static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
  751. struct intel_engine_cs *engine,
  752. struct drm_i915_error_ring *ering)
  753. {
  754. ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base));
  755. ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base));
  756. ering->semaphore_seqno[0] = engine->semaphore.sync_seqno[0];
  757. ering->semaphore_seqno[1] = engine->semaphore.sync_seqno[1];
  758. if (HAS_VEBOX(dev_priv)) {
  759. ering->semaphore_mboxes[2] =
  760. I915_READ(RING_SYNC_2(engine->mmio_base));
  761. ering->semaphore_seqno[2] = engine->semaphore.sync_seqno[2];
  762. }
  763. }
  764. static void engine_record_waiters(struct intel_engine_cs *engine,
  765. struct drm_i915_error_ring *ering)
  766. {
  767. struct intel_breadcrumbs *b = &engine->breadcrumbs;
  768. struct drm_i915_error_waiter *waiter;
  769. struct rb_node *rb;
  770. int count;
  771. ering->num_waiters = 0;
  772. ering->waiters = NULL;
  773. spin_lock(&b->lock);
  774. count = 0;
  775. for (rb = rb_first(&b->waiters); rb != NULL; rb = rb_next(rb))
  776. count++;
  777. spin_unlock(&b->lock);
  778. waiter = NULL;
  779. if (count)
  780. waiter = kmalloc_array(count,
  781. sizeof(struct drm_i915_error_waiter),
  782. GFP_ATOMIC);
  783. if (!waiter)
  784. return;
  785. ering->waiters = waiter;
  786. spin_lock(&b->lock);
  787. for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
  788. struct intel_wait *w = container_of(rb, typeof(*w), node);
  789. strcpy(waiter->comm, w->tsk->comm);
  790. waiter->pid = w->tsk->pid;
  791. waiter->seqno = w->seqno;
  792. waiter++;
  793. if (++ering->num_waiters == count)
  794. break;
  795. }
  796. spin_unlock(&b->lock);
  797. }
  798. static void i915_record_ring_state(struct drm_i915_private *dev_priv,
  799. struct drm_i915_error_state *error,
  800. struct intel_engine_cs *engine,
  801. struct drm_i915_error_ring *ering)
  802. {
  803. if (INTEL_GEN(dev_priv) >= 6) {
  804. ering->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
  805. ering->fault_reg = I915_READ(RING_FAULT_REG(engine));
  806. if (INTEL_GEN(dev_priv) >= 8)
  807. gen8_record_semaphore_state(dev_priv, error, engine,
  808. ering);
  809. else
  810. gen6_record_semaphore_state(dev_priv, engine, ering);
  811. }
  812. if (INTEL_GEN(dev_priv) >= 4) {
  813. ering->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
  814. ering->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
  815. ering->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
  816. ering->instdone = I915_READ(RING_INSTDONE(engine->mmio_base));
  817. ering->instps = I915_READ(RING_INSTPS(engine->mmio_base));
  818. ering->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
  819. if (INTEL_GEN(dev_priv) >= 8) {
  820. ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
  821. ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
  822. }
  823. ering->bbstate = I915_READ(RING_BBSTATE(engine->mmio_base));
  824. } else {
  825. ering->faddr = I915_READ(DMA_FADD_I8XX);
  826. ering->ipeir = I915_READ(IPEIR);
  827. ering->ipehr = I915_READ(IPEHR);
  828. ering->instdone = I915_READ(GEN2_INSTDONE);
  829. }
  830. ering->waiting = intel_engine_has_waiter(engine);
  831. ering->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
  832. ering->acthd = intel_ring_get_active_head(engine);
  833. ering->seqno = intel_engine_get_seqno(engine);
  834. ering->last_seqno = engine->last_submitted_seqno;
  835. ering->start = I915_READ_START(engine);
  836. ering->head = I915_READ_HEAD(engine);
  837. ering->tail = I915_READ_TAIL(engine);
  838. ering->ctl = I915_READ_CTL(engine);
  839. if (I915_NEED_GFX_HWS(dev_priv)) {
  840. i915_reg_t mmio;
  841. if (IS_GEN7(dev_priv)) {
  842. switch (engine->id) {
  843. default:
  844. case RCS:
  845. mmio = RENDER_HWS_PGA_GEN7;
  846. break;
  847. case BCS:
  848. mmio = BLT_HWS_PGA_GEN7;
  849. break;
  850. case VCS:
  851. mmio = BSD_HWS_PGA_GEN7;
  852. break;
  853. case VECS:
  854. mmio = VEBOX_HWS_PGA_GEN7;
  855. break;
  856. }
  857. } else if (IS_GEN6(engine->i915)) {
  858. mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
  859. } else {
  860. /* XXX: gen8 returns to sanity */
  861. mmio = RING_HWS_PGA(engine->mmio_base);
  862. }
  863. ering->hws = I915_READ(mmio);
  864. }
  865. ering->hangcheck_score = engine->hangcheck.score;
  866. ering->hangcheck_action = engine->hangcheck.action;
  867. if (USES_PPGTT(dev_priv)) {
  868. int i;
  869. ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
  870. if (IS_GEN6(dev_priv))
  871. ering->vm_info.pp_dir_base =
  872. I915_READ(RING_PP_DIR_BASE_READ(engine));
  873. else if (IS_GEN7(dev_priv))
  874. ering->vm_info.pp_dir_base =
  875. I915_READ(RING_PP_DIR_BASE(engine));
  876. else if (INTEL_GEN(dev_priv) >= 8)
  877. for (i = 0; i < 4; i++) {
  878. ering->vm_info.pdp[i] =
  879. I915_READ(GEN8_RING_PDP_UDW(engine, i));
  880. ering->vm_info.pdp[i] <<= 32;
  881. ering->vm_info.pdp[i] |=
  882. I915_READ(GEN8_RING_PDP_LDW(engine, i));
  883. }
  884. }
  885. }
  886. static void i915_gem_record_active_context(struct intel_engine_cs *engine,
  887. struct drm_i915_error_state *error,
  888. struct drm_i915_error_ring *ering)
  889. {
  890. struct drm_i915_private *dev_priv = engine->i915;
  891. struct drm_i915_gem_object *obj;
  892. /* Currently render ring is the only HW context user */
  893. if (engine->id != RCS || !error->ccid)
  894. return;
  895. list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
  896. if (!i915_gem_obj_ggtt_bound(obj))
  897. continue;
  898. if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
  899. ering->ctx = i915_error_ggtt_object_create(dev_priv, obj);
  900. break;
  901. }
  902. }
  903. }
  904. static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
  905. struct drm_i915_error_state *error)
  906. {
  907. struct i915_ggtt *ggtt = &dev_priv->ggtt;
  908. struct drm_i915_gem_request *request;
  909. int i, count;
  910. for (i = 0; i < I915_NUM_ENGINES; i++) {
  911. struct intel_engine_cs *engine = &dev_priv->engine[i];
  912. error->ring[i].pid = -1;
  913. if (!intel_engine_initialized(engine))
  914. continue;
  915. error->ring[i].valid = true;
  916. i915_record_ring_state(dev_priv, error, engine, &error->ring[i]);
  917. engine_record_waiters(engine, &error->ring[i]);
  918. request = i915_gem_find_active_request(engine);
  919. if (request) {
  920. struct i915_address_space *vm;
  921. struct intel_ringbuffer *rb;
  922. vm = request->ctx && request->ctx->ppgtt ?
  923. &request->ctx->ppgtt->base :
  924. &ggtt->base;
  925. /* We need to copy these to an anonymous buffer
  926. * as the simplest method to avoid being overwritten
  927. * by userspace.
  928. */
  929. error->ring[i].batchbuffer =
  930. i915_error_object_create(dev_priv,
  931. request->batch_obj,
  932. vm);
  933. if (HAS_BROKEN_CS_TLB(dev_priv))
  934. error->ring[i].wa_batchbuffer =
  935. i915_error_ggtt_object_create(dev_priv,
  936. engine->scratch.obj);
  937. if (request->pid) {
  938. struct task_struct *task;
  939. rcu_read_lock();
  940. task = pid_task(request->pid, PIDTYPE_PID);
  941. if (task) {
  942. strcpy(error->ring[i].comm, task->comm);
  943. error->ring[i].pid = task->pid;
  944. }
  945. rcu_read_unlock();
  946. }
  947. rb = request->ringbuf;
  948. error->ring[i].cpu_ring_head = rb->head;
  949. error->ring[i].cpu_ring_tail = rb->tail;
  950. error->ring[i].ringbuffer =
  951. i915_error_ggtt_object_create(dev_priv,
  952. rb->obj);
  953. }
  954. error->ring[i].hws_page =
  955. i915_error_ggtt_object_create(dev_priv,
  956. engine->status_page.obj);
  957. if (engine->wa_ctx.obj) {
  958. error->ring[i].wa_ctx =
  959. i915_error_ggtt_object_create(dev_priv,
  960. engine->wa_ctx.obj);
  961. }
  962. i915_gem_record_active_context(engine, error, &error->ring[i]);
  963. count = 0;
  964. list_for_each_entry(request, &engine->request_list, list)
  965. count++;
  966. error->ring[i].num_requests = count;
  967. error->ring[i].requests =
  968. kcalloc(count, sizeof(*error->ring[i].requests),
  969. GFP_ATOMIC);
  970. if (error->ring[i].requests == NULL) {
  971. error->ring[i].num_requests = 0;
  972. continue;
  973. }
  974. count = 0;
  975. list_for_each_entry(request, &engine->request_list, list) {
  976. struct drm_i915_error_request *erq;
  977. if (count >= error->ring[i].num_requests) {
  978. /*
  979. * If the ring request list was changed in
  980. * between the point where the error request
  981. * list was created and dimensioned and this
  982. * point then just exit early to avoid crashes.
  983. *
  984. * We don't need to communicate that the
  985. * request list changed state during error
  986. * state capture and that the error state is
  987. * slightly incorrect as a consequence since we
  988. * are typically only interested in the request
  989. * list state at the point of error state
  990. * capture, not in any changes happening during
  991. * the capture.
  992. */
  993. break;
  994. }
  995. erq = &error->ring[i].requests[count++];
  996. erq->seqno = request->seqno;
  997. erq->jiffies = request->emitted_jiffies;
  998. erq->tail = request->postfix;
  999. }
  1000. }
  1001. }
  1002. /* FIXME: Since pin count/bound list is global, we duplicate what we capture per
  1003. * VM.
  1004. */
  1005. static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
  1006. struct drm_i915_error_state *error,
  1007. struct i915_address_space *vm,
  1008. const int ndx)
  1009. {
  1010. struct drm_i915_error_buffer *active_bo = NULL, *pinned_bo = NULL;
  1011. struct drm_i915_gem_object *obj;
  1012. struct i915_vma *vma;
  1013. int i;
  1014. i = 0;
  1015. list_for_each_entry(vma, &vm->active_list, vm_link)
  1016. i++;
  1017. error->active_bo_count[ndx] = i;
  1018. list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
  1019. list_for_each_entry(vma, &obj->vma_list, obj_link)
  1020. if (vma->vm == vm && vma->pin_count > 0)
  1021. i++;
  1022. }
  1023. error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
  1024. if (i) {
  1025. active_bo = kcalloc(i, sizeof(*active_bo), GFP_ATOMIC);
  1026. if (active_bo)
  1027. pinned_bo = active_bo + error->active_bo_count[ndx];
  1028. }
  1029. if (active_bo)
  1030. error->active_bo_count[ndx] =
  1031. capture_active_bo(active_bo,
  1032. error->active_bo_count[ndx],
  1033. &vm->active_list);
  1034. if (pinned_bo)
  1035. error->pinned_bo_count[ndx] =
  1036. capture_pinned_bo(pinned_bo,
  1037. error->pinned_bo_count[ndx],
  1038. &dev_priv->mm.bound_list, vm);
  1039. error->active_bo[ndx] = active_bo;
  1040. error->pinned_bo[ndx] = pinned_bo;
  1041. }
  1042. static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
  1043. struct drm_i915_error_state *error)
  1044. {
  1045. struct i915_address_space *vm;
  1046. int cnt = 0, i = 0;
  1047. list_for_each_entry(vm, &dev_priv->vm_list, global_link)
  1048. cnt++;
  1049. error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC);
  1050. error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC);
  1051. error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count),
  1052. GFP_ATOMIC);
  1053. error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count),
  1054. GFP_ATOMIC);
  1055. if (error->active_bo == NULL ||
  1056. error->pinned_bo == NULL ||
  1057. error->active_bo_count == NULL ||
  1058. error->pinned_bo_count == NULL) {
  1059. kfree(error->active_bo);
  1060. kfree(error->active_bo_count);
  1061. kfree(error->pinned_bo);
  1062. kfree(error->pinned_bo_count);
  1063. error->active_bo = NULL;
  1064. error->active_bo_count = NULL;
  1065. error->pinned_bo = NULL;
  1066. error->pinned_bo_count = NULL;
  1067. } else {
  1068. list_for_each_entry(vm, &dev_priv->vm_list, global_link)
  1069. i915_gem_capture_vm(dev_priv, error, vm, i++);
  1070. error->vm_count = cnt;
  1071. }
  1072. }
  1073. /* Capture all registers which don't fit into another category. */
  1074. static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
  1075. struct drm_i915_error_state *error)
  1076. {
  1077. struct drm_device *dev = dev_priv->dev;
  1078. int i;
  1079. /* General organization
  1080. * 1. Registers specific to a single generation
  1081. * 2. Registers which belong to multiple generations
  1082. * 3. Feature specific registers.
  1083. * 4. Everything else
  1084. * Please try to follow the order.
  1085. */
  1086. /* 1: Registers specific to a single generation */
  1087. if (IS_VALLEYVIEW(dev)) {
  1088. error->gtier[0] = I915_READ(GTIER);
  1089. error->ier = I915_READ(VLV_IER);
  1090. error->forcewake = I915_READ_FW(FORCEWAKE_VLV);
  1091. }
  1092. if (IS_GEN7(dev))
  1093. error->err_int = I915_READ(GEN7_ERR_INT);
  1094. if (INTEL_INFO(dev)->gen >= 8) {
  1095. error->fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
  1096. error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
  1097. }
  1098. if (IS_GEN6(dev)) {
  1099. error->forcewake = I915_READ_FW(FORCEWAKE);
  1100. error->gab_ctl = I915_READ(GAB_CTL);
  1101. error->gfx_mode = I915_READ(GFX_MODE);
  1102. }
  1103. /* 2: Registers which belong to multiple generations */
  1104. if (INTEL_INFO(dev)->gen >= 7)
  1105. error->forcewake = I915_READ_FW(FORCEWAKE_MT);
  1106. if (INTEL_INFO(dev)->gen >= 6) {
  1107. error->derrmr = I915_READ(DERRMR);
  1108. error->error = I915_READ(ERROR_GEN6);
  1109. error->done_reg = I915_READ(DONE_REG);
  1110. }
  1111. /* 3: Feature specific registers */
  1112. if (IS_GEN6(dev) || IS_GEN7(dev)) {
  1113. error->gam_ecochk = I915_READ(GAM_ECOCHK);
  1114. error->gac_eco = I915_READ(GAC_ECO_BITS);
  1115. }
  1116. /* 4: Everything else */
  1117. if (HAS_HW_CONTEXTS(dev))
  1118. error->ccid = I915_READ(CCID);
  1119. if (INTEL_INFO(dev)->gen >= 8) {
  1120. error->ier = I915_READ(GEN8_DE_MISC_IER);
  1121. for (i = 0; i < 4; i++)
  1122. error->gtier[i] = I915_READ(GEN8_GT_IER(i));
  1123. } else if (HAS_PCH_SPLIT(dev)) {
  1124. error->ier = I915_READ(DEIER);
  1125. error->gtier[0] = I915_READ(GTIER);
  1126. } else if (IS_GEN2(dev)) {
  1127. error->ier = I915_READ16(IER);
  1128. } else if (!IS_VALLEYVIEW(dev)) {
  1129. error->ier = I915_READ(IER);
  1130. }
  1131. error->eir = I915_READ(EIR);
  1132. error->pgtbl_er = I915_READ(PGTBL_ER);
  1133. i915_get_extra_instdone(dev_priv, error->extra_instdone);
  1134. }
  1135. static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
  1136. struct drm_i915_error_state *error,
  1137. u32 engine_mask,
  1138. const char *error_msg)
  1139. {
  1140. u32 ecode;
  1141. int ring_id = -1, len;
  1142. ecode = i915_error_generate_code(dev_priv, error, &ring_id);
  1143. len = scnprintf(error->error_msg, sizeof(error->error_msg),
  1144. "GPU HANG: ecode %d:%d:0x%08x",
  1145. INTEL_GEN(dev_priv), ring_id, ecode);
  1146. if (ring_id != -1 && error->ring[ring_id].pid != -1)
  1147. len += scnprintf(error->error_msg + len,
  1148. sizeof(error->error_msg) - len,
  1149. ", in %s [%d]",
  1150. error->ring[ring_id].comm,
  1151. error->ring[ring_id].pid);
  1152. scnprintf(error->error_msg + len, sizeof(error->error_msg) - len,
  1153. ", reason: %s, action: %s",
  1154. error_msg,
  1155. engine_mask ? "reset" : "continue");
  1156. }
  1157. static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
  1158. struct drm_i915_error_state *error)
  1159. {
  1160. error->iommu = -1;
  1161. #ifdef CONFIG_INTEL_IOMMU
  1162. error->iommu = intel_iommu_gfx_mapped;
  1163. #endif
  1164. error->reset_count = i915_reset_count(&dev_priv->gpu_error);
  1165. error->suspend_count = dev_priv->suspend_count;
  1166. }
  1167. /**
  1168. * i915_capture_error_state - capture an error record for later analysis
  1169. * @dev: drm device
  1170. *
  1171. * Should be called when an error is detected (either a hang or an error
  1172. * interrupt) to capture error state from the time of the error. Fills
  1173. * out a structure which becomes available in debugfs for user level tools
  1174. * to pick up.
  1175. */
  1176. void i915_capture_error_state(struct drm_i915_private *dev_priv,
  1177. u32 engine_mask,
  1178. const char *error_msg)
  1179. {
  1180. static bool warned;
  1181. struct drm_i915_error_state *error;
  1182. unsigned long flags;
  1183. /* Account for pipe specific data like PIPE*STAT */
  1184. error = kzalloc(sizeof(*error), GFP_ATOMIC);
  1185. if (!error) {
  1186. DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
  1187. return;
  1188. }
  1189. kref_init(&error->ref);
  1190. i915_capture_gen_state(dev_priv, error);
  1191. i915_capture_reg_state(dev_priv, error);
  1192. i915_gem_capture_buffers(dev_priv, error);
  1193. i915_gem_record_fences(dev_priv, error);
  1194. i915_gem_record_rings(dev_priv, error);
  1195. do_gettimeofday(&error->time);
  1196. error->overlay = intel_overlay_capture_error_state(dev_priv);
  1197. error->display = intel_display_capture_error_state(dev_priv);
  1198. i915_error_capture_msg(dev_priv, error, engine_mask, error_msg);
  1199. DRM_INFO("%s\n", error->error_msg);
  1200. spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
  1201. if (dev_priv->gpu_error.first_error == NULL) {
  1202. dev_priv->gpu_error.first_error = error;
  1203. error = NULL;
  1204. }
  1205. spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
  1206. if (error) {
  1207. i915_error_state_free(&error->ref);
  1208. return;
  1209. }
  1210. if (!warned) {
  1211. DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
  1212. DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
  1213. DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
  1214. DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
  1215. DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev_priv->dev->primary->index);
  1216. warned = true;
  1217. }
  1218. }
  1219. void i915_error_state_get(struct drm_device *dev,
  1220. struct i915_error_state_file_priv *error_priv)
  1221. {
  1222. struct drm_i915_private *dev_priv = dev->dev_private;
  1223. spin_lock_irq(&dev_priv->gpu_error.lock);
  1224. error_priv->error = dev_priv->gpu_error.first_error;
  1225. if (error_priv->error)
  1226. kref_get(&error_priv->error->ref);
  1227. spin_unlock_irq(&dev_priv->gpu_error.lock);
  1228. }
  1229. void i915_error_state_put(struct i915_error_state_file_priv *error_priv)
  1230. {
  1231. if (error_priv->error)
  1232. kref_put(&error_priv->error->ref, i915_error_state_free);
  1233. }
  1234. void i915_destroy_error_state(struct drm_device *dev)
  1235. {
  1236. struct drm_i915_private *dev_priv = dev->dev_private;
  1237. struct drm_i915_error_state *error;
  1238. spin_lock_irq(&dev_priv->gpu_error.lock);
  1239. error = dev_priv->gpu_error.first_error;
  1240. dev_priv->gpu_error.first_error = NULL;
  1241. spin_unlock_irq(&dev_priv->gpu_error.lock);
  1242. if (error)
  1243. kref_put(&error->ref, i915_error_state_free);
  1244. }
  1245. const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
  1246. {
  1247. switch (type) {
  1248. case I915_CACHE_NONE: return " uncached";
  1249. case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
  1250. case I915_CACHE_L3_LLC: return " L3+LLC";
  1251. case I915_CACHE_WT: return " WT";
  1252. default: return "";
  1253. }
  1254. }
  1255. /* NB: please notice the memset */
  1256. void i915_get_extra_instdone(struct drm_i915_private *dev_priv,
  1257. uint32_t *instdone)
  1258. {
  1259. memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
  1260. if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
  1261. instdone[0] = I915_READ(GEN2_INSTDONE);
  1262. else if (IS_GEN4(dev_priv) || IS_GEN5(dev_priv) || IS_GEN6(dev_priv)) {
  1263. instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
  1264. instdone[1] = I915_READ(GEN4_INSTDONE1);
  1265. } else if (INTEL_GEN(dev_priv) >= 7) {
  1266. instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
  1267. instdone[1] = I915_READ(GEN7_SC_INSTDONE);
  1268. instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
  1269. instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
  1270. }
  1271. }