i915_gpu_error.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386
  1. /*
  2. * Copyright (c) 2008 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. * Keith Packard <keithp@keithp.com>
  26. * Mika Kuoppala <mika.kuoppala@intel.com>
  27. *
  28. */
  29. #include <generated/utsrelease.h>
  30. #include "i915_drv.h"
  31. static const char *yesno(int v)
  32. {
  33. return v ? "yes" : "no";
  34. }
  35. static const char *ring_str(int ring)
  36. {
  37. switch (ring) {
  38. case RCS: return "render";
  39. case VCS: return "bsd";
  40. case BCS: return "blt";
  41. case VECS: return "vebox";
  42. case VCS2: return "bsd2";
  43. default: return "";
  44. }
  45. }
  46. static const char *pin_flag(int pinned)
  47. {
  48. if (pinned > 0)
  49. return " P";
  50. else if (pinned < 0)
  51. return " p";
  52. else
  53. return "";
  54. }
  55. static const char *tiling_flag(int tiling)
  56. {
  57. switch (tiling) {
  58. default:
  59. case I915_TILING_NONE: return "";
  60. case I915_TILING_X: return " X";
  61. case I915_TILING_Y: return " Y";
  62. }
  63. }
  64. static const char *dirty_flag(int dirty)
  65. {
  66. return dirty ? " dirty" : "";
  67. }
  68. static const char *purgeable_flag(int purgeable)
  69. {
  70. return purgeable ? " purgeable" : "";
  71. }
  72. static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
  73. {
  74. if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
  75. e->err = -ENOSPC;
  76. return false;
  77. }
  78. if (e->bytes == e->size - 1 || e->err)
  79. return false;
  80. return true;
  81. }
  82. static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
  83. unsigned len)
  84. {
  85. if (e->pos + len <= e->start) {
  86. e->pos += len;
  87. return false;
  88. }
  89. /* First vsnprintf needs to fit in its entirety for memmove */
  90. if (len >= e->size) {
  91. e->err = -EIO;
  92. return false;
  93. }
  94. return true;
  95. }
  96. static void __i915_error_advance(struct drm_i915_error_state_buf *e,
  97. unsigned len)
  98. {
  99. /* If this is first printf in this window, adjust it so that
  100. * start position matches start of the buffer
  101. */
  102. if (e->pos < e->start) {
  103. const size_t off = e->start - e->pos;
  104. /* Should not happen but be paranoid */
  105. if (off > len || e->bytes) {
  106. e->err = -EIO;
  107. return;
  108. }
  109. memmove(e->buf, e->buf + off, len - off);
  110. e->bytes = len - off;
  111. e->pos = e->start;
  112. return;
  113. }
  114. e->bytes += len;
  115. e->pos += len;
  116. }
  117. static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
  118. const char *f, va_list args)
  119. {
  120. unsigned len;
  121. if (!__i915_error_ok(e))
  122. return;
  123. /* Seek the first printf which is hits start position */
  124. if (e->pos < e->start) {
  125. va_list tmp;
  126. va_copy(tmp, args);
  127. len = vsnprintf(NULL, 0, f, tmp);
  128. va_end(tmp);
  129. if (!__i915_error_seek(e, len))
  130. return;
  131. }
  132. len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
  133. if (len >= e->size - e->bytes)
  134. len = e->size - e->bytes - 1;
  135. __i915_error_advance(e, len);
  136. }
  137. static void i915_error_puts(struct drm_i915_error_state_buf *e,
  138. const char *str)
  139. {
  140. unsigned len;
  141. if (!__i915_error_ok(e))
  142. return;
  143. len = strlen(str);
  144. /* Seek the first printf which is hits start position */
  145. if (e->pos < e->start) {
  146. if (!__i915_error_seek(e, len))
  147. return;
  148. }
  149. if (len >= e->size - e->bytes)
  150. len = e->size - e->bytes - 1;
  151. memcpy(e->buf + e->bytes, str, len);
  152. __i915_error_advance(e, len);
  153. }
  154. #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
  155. #define err_puts(e, s) i915_error_puts(e, s)
  156. static void print_error_buffers(struct drm_i915_error_state_buf *m,
  157. const char *name,
  158. struct drm_i915_error_buffer *err,
  159. int count)
  160. {
  161. err_printf(m, " %s [%d]:\n", name, count);
  162. while (count--) {
  163. err_printf(m, " %08x %8u %02x %02x %x %x",
  164. err->gtt_offset,
  165. err->size,
  166. err->read_domains,
  167. err->write_domain,
  168. err->rseqno, err->wseqno);
  169. err_puts(m, pin_flag(err->pinned));
  170. err_puts(m, tiling_flag(err->tiling));
  171. err_puts(m, dirty_flag(err->dirty));
  172. err_puts(m, purgeable_flag(err->purgeable));
  173. err_puts(m, err->userptr ? " userptr" : "");
  174. err_puts(m, err->ring != -1 ? " " : "");
  175. err_puts(m, ring_str(err->ring));
  176. err_puts(m, i915_cache_level_str(m->i915, err->cache_level));
  177. if (err->name)
  178. err_printf(m, " (name: %d)", err->name);
  179. if (err->fence_reg != I915_FENCE_REG_NONE)
  180. err_printf(m, " (fence: %d)", err->fence_reg);
  181. err_puts(m, "\n");
  182. err++;
  183. }
  184. }
  185. static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
  186. {
  187. switch (a) {
  188. case HANGCHECK_IDLE:
  189. return "idle";
  190. case HANGCHECK_WAIT:
  191. return "wait";
  192. case HANGCHECK_ACTIVE:
  193. return "active";
  194. case HANGCHECK_ACTIVE_LOOP:
  195. return "active (loop)";
  196. case HANGCHECK_KICK:
  197. return "kick";
  198. case HANGCHECK_HUNG:
  199. return "hung";
  200. }
  201. return "unknown";
  202. }
  203. static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
  204. struct drm_device *dev,
  205. struct drm_i915_error_state *error,
  206. int ring_idx)
  207. {
  208. struct drm_i915_error_ring *ring = &error->ring[ring_idx];
  209. if (!ring->valid)
  210. return;
  211. err_printf(m, "%s command stream:\n", ring_str(ring_idx));
  212. err_printf(m, " HEAD: 0x%08x\n", ring->head);
  213. err_printf(m, " TAIL: 0x%08x\n", ring->tail);
  214. err_printf(m, " CTL: 0x%08x\n", ring->ctl);
  215. err_printf(m, " HWS: 0x%08x\n", ring->hws);
  216. err_printf(m, " ACTHD: 0x%08x %08x\n", (u32)(ring->acthd>>32), (u32)ring->acthd);
  217. err_printf(m, " IPEIR: 0x%08x\n", ring->ipeir);
  218. err_printf(m, " IPEHR: 0x%08x\n", ring->ipehr);
  219. err_printf(m, " INSTDONE: 0x%08x\n", ring->instdone);
  220. if (INTEL_INFO(dev)->gen >= 4) {
  221. err_printf(m, " BBADDR: 0x%08x %08x\n", (u32)(ring->bbaddr>>32), (u32)ring->bbaddr);
  222. err_printf(m, " BB_STATE: 0x%08x\n", ring->bbstate);
  223. err_printf(m, " INSTPS: 0x%08x\n", ring->instps);
  224. }
  225. err_printf(m, " INSTPM: 0x%08x\n", ring->instpm);
  226. err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ring->faddr),
  227. lower_32_bits(ring->faddr));
  228. if (INTEL_INFO(dev)->gen >= 6) {
  229. err_printf(m, " RC PSMI: 0x%08x\n", ring->rc_psmi);
  230. err_printf(m, " FAULT_REG: 0x%08x\n", ring->fault_reg);
  231. err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
  232. ring->semaphore_mboxes[0],
  233. ring->semaphore_seqno[0]);
  234. err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
  235. ring->semaphore_mboxes[1],
  236. ring->semaphore_seqno[1]);
  237. if (HAS_VEBOX(dev)) {
  238. err_printf(m, " SYNC_2: 0x%08x [last synced 0x%08x]\n",
  239. ring->semaphore_mboxes[2],
  240. ring->semaphore_seqno[2]);
  241. }
  242. }
  243. if (USES_PPGTT(dev)) {
  244. err_printf(m, " GFX_MODE: 0x%08x\n", ring->vm_info.gfx_mode);
  245. if (INTEL_INFO(dev)->gen >= 8) {
  246. int i;
  247. for (i = 0; i < 4; i++)
  248. err_printf(m, " PDP%d: 0x%016llx\n",
  249. i, ring->vm_info.pdp[i]);
  250. } else {
  251. err_printf(m, " PP_DIR_BASE: 0x%08x\n",
  252. ring->vm_info.pp_dir_base);
  253. }
  254. }
  255. err_printf(m, " seqno: 0x%08x\n", ring->seqno);
  256. err_printf(m, " waiting: %s\n", yesno(ring->waiting));
  257. err_printf(m, " ring->head: 0x%08x\n", ring->cpu_ring_head);
  258. err_printf(m, " ring->tail: 0x%08x\n", ring->cpu_ring_tail);
  259. err_printf(m, " hangcheck: %s [%d]\n",
  260. hangcheck_action_to_str(ring->hangcheck_action),
  261. ring->hangcheck_score);
  262. }
  263. void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
  264. {
  265. va_list args;
  266. va_start(args, f);
  267. i915_error_vprintf(e, f, args);
  268. va_end(args);
  269. }
  270. static void print_error_obj(struct drm_i915_error_state_buf *m,
  271. struct drm_i915_error_object *obj)
  272. {
  273. int page, offset, elt;
  274. for (page = offset = 0; page < obj->page_count; page++) {
  275. for (elt = 0; elt < PAGE_SIZE/4; elt++) {
  276. err_printf(m, "%08x : %08x\n", offset,
  277. obj->pages[page][elt]);
  278. offset += 4;
  279. }
  280. }
  281. }
  282. int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
  283. const struct i915_error_state_file_priv *error_priv)
  284. {
  285. struct drm_device *dev = error_priv->dev;
  286. struct drm_i915_private *dev_priv = dev->dev_private;
  287. struct drm_i915_error_state *error = error_priv->error;
  288. struct drm_i915_error_object *obj;
  289. int i, j, offset, elt;
  290. int max_hangcheck_score;
  291. if (!error) {
  292. err_printf(m, "no error state collected\n");
  293. goto out;
  294. }
  295. err_printf(m, "%s\n", error->error_msg);
  296. err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
  297. error->time.tv_usec);
  298. err_printf(m, "Kernel: " UTS_RELEASE "\n");
  299. max_hangcheck_score = 0;
  300. for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
  301. if (error->ring[i].hangcheck_score > max_hangcheck_score)
  302. max_hangcheck_score = error->ring[i].hangcheck_score;
  303. }
  304. for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
  305. if (error->ring[i].hangcheck_score == max_hangcheck_score &&
  306. error->ring[i].pid != -1) {
  307. err_printf(m, "Active process (on ring %s): %s [%d]\n",
  308. ring_str(i),
  309. error->ring[i].comm,
  310. error->ring[i].pid);
  311. }
  312. }
  313. err_printf(m, "Reset count: %u\n", error->reset_count);
  314. err_printf(m, "Suspend count: %u\n", error->suspend_count);
  315. err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
  316. err_printf(m, "EIR: 0x%08x\n", error->eir);
  317. err_printf(m, "IER: 0x%08x\n", error->ier);
  318. if (INTEL_INFO(dev)->gen >= 8) {
  319. for (i = 0; i < 4; i++)
  320. err_printf(m, "GTIER gt %d: 0x%08x\n", i,
  321. error->gtier[i]);
  322. } else if (HAS_PCH_SPLIT(dev) || IS_VALLEYVIEW(dev))
  323. err_printf(m, "GTIER: 0x%08x\n", error->gtier[0]);
  324. err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
  325. err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
  326. err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
  327. err_printf(m, "CCID: 0x%08x\n", error->ccid);
  328. err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings);
  329. for (i = 0; i < dev_priv->num_fence_regs; i++)
  330. err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
  331. for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
  332. err_printf(m, " INSTDONE_%d: 0x%08x\n", i,
  333. error->extra_instdone[i]);
  334. if (INTEL_INFO(dev)->gen >= 6) {
  335. err_printf(m, "ERROR: 0x%08x\n", error->error);
  336. if (INTEL_INFO(dev)->gen >= 8)
  337. err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
  338. error->fault_data1, error->fault_data0);
  339. err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
  340. }
  341. if (INTEL_INFO(dev)->gen == 7)
  342. err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
  343. for (i = 0; i < ARRAY_SIZE(error->ring); i++)
  344. i915_ring_error_state(m, dev, error, i);
  345. for (i = 0; i < error->vm_count; i++) {
  346. err_printf(m, "vm[%d]\n", i);
  347. print_error_buffers(m, "Active",
  348. error->active_bo[i],
  349. error->active_bo_count[i]);
  350. print_error_buffers(m, "Pinned",
  351. error->pinned_bo[i],
  352. error->pinned_bo_count[i]);
  353. }
  354. for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
  355. obj = error->ring[i].batchbuffer;
  356. if (obj) {
  357. err_puts(m, dev_priv->ring[i].name);
  358. if (error->ring[i].pid != -1)
  359. err_printf(m, " (submitted by %s [%d])",
  360. error->ring[i].comm,
  361. error->ring[i].pid);
  362. err_printf(m, " --- gtt_offset = 0x%08x\n",
  363. obj->gtt_offset);
  364. print_error_obj(m, obj);
  365. }
  366. obj = error->ring[i].wa_batchbuffer;
  367. if (obj) {
  368. err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n",
  369. dev_priv->ring[i].name, obj->gtt_offset);
  370. print_error_obj(m, obj);
  371. }
  372. if (error->ring[i].num_requests) {
  373. err_printf(m, "%s --- %d requests\n",
  374. dev_priv->ring[i].name,
  375. error->ring[i].num_requests);
  376. for (j = 0; j < error->ring[i].num_requests; j++) {
  377. err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
  378. error->ring[i].requests[j].seqno,
  379. error->ring[i].requests[j].jiffies,
  380. error->ring[i].requests[j].tail);
  381. }
  382. }
  383. if ((obj = error->ring[i].ringbuffer)) {
  384. err_printf(m, "%s --- ringbuffer = 0x%08x\n",
  385. dev_priv->ring[i].name,
  386. obj->gtt_offset);
  387. print_error_obj(m, obj);
  388. }
  389. if ((obj = error->ring[i].hws_page)) {
  390. err_printf(m, "%s --- HW Status = 0x%08x\n",
  391. dev_priv->ring[i].name,
  392. obj->gtt_offset);
  393. offset = 0;
  394. for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
  395. err_printf(m, "[%04x] %08x %08x %08x %08x\n",
  396. offset,
  397. obj->pages[0][elt],
  398. obj->pages[0][elt+1],
  399. obj->pages[0][elt+2],
  400. obj->pages[0][elt+3]);
  401. offset += 16;
  402. }
  403. }
  404. if ((obj = error->ring[i].ctx)) {
  405. err_printf(m, "%s --- HW Context = 0x%08x\n",
  406. dev_priv->ring[i].name,
  407. obj->gtt_offset);
  408. print_error_obj(m, obj);
  409. }
  410. }
  411. if ((obj = error->semaphore_obj)) {
  412. err_printf(m, "Semaphore page = 0x%08x\n", obj->gtt_offset);
  413. for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
  414. err_printf(m, "[%04x] %08x %08x %08x %08x\n",
  415. elt * 4,
  416. obj->pages[0][elt],
  417. obj->pages[0][elt+1],
  418. obj->pages[0][elt+2],
  419. obj->pages[0][elt+3]);
  420. }
  421. }
  422. if (error->overlay)
  423. intel_overlay_print_error_state(m, error->overlay);
  424. if (error->display)
  425. intel_display_print_error_state(m, dev, error->display);
  426. out:
  427. if (m->bytes == 0 && m->err)
  428. return m->err;
  429. return 0;
  430. }
  431. int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf,
  432. struct drm_i915_private *i915,
  433. size_t count, loff_t pos)
  434. {
  435. memset(ebuf, 0, sizeof(*ebuf));
  436. ebuf->i915 = i915;
  437. /* We need to have enough room to store any i915_error_state printf
  438. * so that we can move it to start position.
  439. */
  440. ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
  441. ebuf->buf = kmalloc(ebuf->size,
  442. GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN);
  443. if (ebuf->buf == NULL) {
  444. ebuf->size = PAGE_SIZE;
  445. ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
  446. }
  447. if (ebuf->buf == NULL) {
  448. ebuf->size = 128;
  449. ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
  450. }
  451. if (ebuf->buf == NULL)
  452. return -ENOMEM;
  453. ebuf->start = pos;
  454. return 0;
  455. }
  456. static void i915_error_object_free(struct drm_i915_error_object *obj)
  457. {
  458. int page;
  459. if (obj == NULL)
  460. return;
  461. for (page = 0; page < obj->page_count; page++)
  462. kfree(obj->pages[page]);
  463. kfree(obj);
  464. }
  465. static void i915_error_state_free(struct kref *error_ref)
  466. {
  467. struct drm_i915_error_state *error = container_of(error_ref,
  468. typeof(*error), ref);
  469. int i;
  470. for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
  471. i915_error_object_free(error->ring[i].batchbuffer);
  472. i915_error_object_free(error->ring[i].ringbuffer);
  473. i915_error_object_free(error->ring[i].hws_page);
  474. i915_error_object_free(error->ring[i].ctx);
  475. kfree(error->ring[i].requests);
  476. }
  477. i915_error_object_free(error->semaphore_obj);
  478. for (i = 0; i < error->vm_count; i++)
  479. kfree(error->active_bo[i]);
  480. kfree(error->active_bo);
  481. kfree(error->active_bo_count);
  482. kfree(error->pinned_bo);
  483. kfree(error->pinned_bo_count);
  484. kfree(error->overlay);
  485. kfree(error->display);
  486. kfree(error);
  487. }
  488. static struct drm_i915_error_object *
  489. i915_error_object_create(struct drm_i915_private *dev_priv,
  490. struct drm_i915_gem_object *src,
  491. struct i915_address_space *vm)
  492. {
  493. struct drm_i915_error_object *dst;
  494. struct i915_vma *vma = NULL;
  495. int num_pages;
  496. bool use_ggtt;
  497. int i = 0;
  498. u32 reloc_offset;
  499. if (src == NULL || src->pages == NULL)
  500. return NULL;
  501. num_pages = src->base.size >> PAGE_SHIFT;
  502. dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
  503. if (dst == NULL)
  504. return NULL;
  505. if (i915_gem_obj_bound(src, vm))
  506. dst->gtt_offset = i915_gem_obj_offset(src, vm);
  507. else
  508. dst->gtt_offset = -1;
  509. reloc_offset = dst->gtt_offset;
  510. if (i915_is_ggtt(vm))
  511. vma = i915_gem_obj_to_ggtt(src);
  512. use_ggtt = (src->cache_level == I915_CACHE_NONE &&
  513. vma && (vma->bound & GLOBAL_BIND) &&
  514. reloc_offset + num_pages * PAGE_SIZE <= dev_priv->gtt.mappable_end);
  515. /* Cannot access stolen address directly, try to use the aperture */
  516. if (src->stolen) {
  517. use_ggtt = true;
  518. if (!(vma && vma->bound & GLOBAL_BIND))
  519. goto unwind;
  520. reloc_offset = i915_gem_obj_ggtt_offset(src);
  521. if (reloc_offset + num_pages * PAGE_SIZE > dev_priv->gtt.mappable_end)
  522. goto unwind;
  523. }
  524. /* Cannot access snooped pages through the aperture */
  525. if (use_ggtt && src->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv->dev))
  526. goto unwind;
  527. dst->page_count = num_pages;
  528. while (num_pages--) {
  529. unsigned long flags;
  530. void *d;
  531. d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
  532. if (d == NULL)
  533. goto unwind;
  534. local_irq_save(flags);
  535. if (use_ggtt) {
  536. void __iomem *s;
  537. /* Simply ignore tiling or any overlapping fence.
  538. * It's part of the error state, and this hopefully
  539. * captures what the GPU read.
  540. */
  541. s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
  542. reloc_offset);
  543. memcpy_fromio(d, s, PAGE_SIZE);
  544. io_mapping_unmap_atomic(s);
  545. } else {
  546. struct page *page;
  547. void *s;
  548. page = i915_gem_object_get_page(src, i);
  549. drm_clflush_pages(&page, 1);
  550. s = kmap_atomic(page);
  551. memcpy(d, s, PAGE_SIZE);
  552. kunmap_atomic(s);
  553. drm_clflush_pages(&page, 1);
  554. }
  555. local_irq_restore(flags);
  556. dst->pages[i++] = d;
  557. reloc_offset += PAGE_SIZE;
  558. }
  559. return dst;
  560. unwind:
  561. while (i--)
  562. kfree(dst->pages[i]);
  563. kfree(dst);
  564. return NULL;
  565. }
  566. #define i915_error_ggtt_object_create(dev_priv, src) \
  567. i915_error_object_create((dev_priv), (src), &(dev_priv)->gtt.base)
  568. static void capture_bo(struct drm_i915_error_buffer *err,
  569. struct i915_vma *vma)
  570. {
  571. struct drm_i915_gem_object *obj = vma->obj;
  572. err->size = obj->base.size;
  573. err->name = obj->base.name;
  574. err->rseqno = i915_gem_request_get_seqno(obj->last_read_req);
  575. err->wseqno = i915_gem_request_get_seqno(obj->last_write_req);
  576. err->gtt_offset = vma->node.start;
  577. err->read_domains = obj->base.read_domains;
  578. err->write_domain = obj->base.write_domain;
  579. err->fence_reg = obj->fence_reg;
  580. err->pinned = 0;
  581. if (i915_gem_obj_is_pinned(obj))
  582. err->pinned = 1;
  583. err->tiling = obj->tiling_mode;
  584. err->dirty = obj->dirty;
  585. err->purgeable = obj->madv != I915_MADV_WILLNEED;
  586. err->userptr = obj->userptr.mm != NULL;
  587. err->ring = obj->last_read_req ?
  588. i915_gem_request_get_ring(obj->last_read_req)->id : -1;
  589. err->cache_level = obj->cache_level;
  590. }
  591. static u32 capture_active_bo(struct drm_i915_error_buffer *err,
  592. int count, struct list_head *head)
  593. {
  594. struct i915_vma *vma;
  595. int i = 0;
  596. list_for_each_entry(vma, head, mm_list) {
  597. capture_bo(err++, vma);
  598. if (++i == count)
  599. break;
  600. }
  601. return i;
  602. }
  603. static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
  604. int count, struct list_head *head,
  605. struct i915_address_space *vm)
  606. {
  607. struct drm_i915_gem_object *obj;
  608. struct drm_i915_error_buffer * const first = err;
  609. struct drm_i915_error_buffer * const last = err + count;
  610. list_for_each_entry(obj, head, global_list) {
  611. struct i915_vma *vma;
  612. if (err == last)
  613. break;
  614. list_for_each_entry(vma, &obj->vma_list, vma_link)
  615. if (vma->vm == vm && vma->pin_count > 0)
  616. capture_bo(err++, vma);
  617. }
  618. return err - first;
  619. }
  620. /* Generate a semi-unique error code. The code is not meant to have meaning, The
  621. * code's only purpose is to try to prevent false duplicated bug reports by
  622. * grossly estimating a GPU error state.
  623. *
  624. * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
  625. * the hang if we could strip the GTT offset information from it.
  626. *
  627. * It's only a small step better than a random number in its current form.
  628. */
  629. static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
  630. struct drm_i915_error_state *error,
  631. int *ring_id)
  632. {
  633. uint32_t error_code = 0;
  634. int i;
  635. /* IPEHR would be an ideal way to detect errors, as it's the gross
  636. * measure of "the command that hung." However, has some very common
  637. * synchronization commands which almost always appear in the case
  638. * strictly a client bug. Use instdone to differentiate those some.
  639. */
  640. for (i = 0; i < I915_NUM_RINGS; i++) {
  641. if (error->ring[i].hangcheck_action == HANGCHECK_HUNG) {
  642. if (ring_id)
  643. *ring_id = i;
  644. return error->ring[i].ipehr ^ error->ring[i].instdone;
  645. }
  646. }
  647. return error_code;
  648. }
  649. static void i915_gem_record_fences(struct drm_device *dev,
  650. struct drm_i915_error_state *error)
  651. {
  652. struct drm_i915_private *dev_priv = dev->dev_private;
  653. int i;
  654. if (IS_GEN3(dev) || IS_GEN2(dev)) {
  655. for (i = 0; i < 8; i++)
  656. error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
  657. if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
  658. for (i = 0; i < 8; i++)
  659. error->fence[i+8] = I915_READ(FENCE_REG_945_8 +
  660. (i * 4));
  661. } else if (IS_GEN5(dev) || IS_GEN4(dev))
  662. for (i = 0; i < 16; i++)
  663. error->fence[i] = I915_READ64(FENCE_REG_965_0 +
  664. (i * 8));
  665. else if (INTEL_INFO(dev)->gen >= 6)
  666. for (i = 0; i < dev_priv->num_fence_regs; i++)
  667. error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 +
  668. (i * 8));
  669. }
  670. static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
  671. struct drm_i915_error_state *error,
  672. struct intel_engine_cs *ring,
  673. struct drm_i915_error_ring *ering)
  674. {
  675. struct intel_engine_cs *to;
  676. int i;
  677. if (!i915_semaphore_is_enabled(dev_priv->dev))
  678. return;
  679. if (!error->semaphore_obj)
  680. error->semaphore_obj =
  681. i915_error_ggtt_object_create(dev_priv,
  682. dev_priv->semaphore_obj);
  683. for_each_ring(to, dev_priv, i) {
  684. int idx;
  685. u16 signal_offset;
  686. u32 *tmp;
  687. if (ring == to)
  688. continue;
  689. signal_offset = (GEN8_SIGNAL_OFFSET(ring, i) & (PAGE_SIZE - 1))
  690. / 4;
  691. tmp = error->semaphore_obj->pages[0];
  692. idx = intel_ring_sync_index(ring, to);
  693. ering->semaphore_mboxes[idx] = tmp[signal_offset];
  694. ering->semaphore_seqno[idx] = ring->semaphore.sync_seqno[idx];
  695. }
  696. }
  697. static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
  698. struct intel_engine_cs *ring,
  699. struct drm_i915_error_ring *ering)
  700. {
  701. ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(ring->mmio_base));
  702. ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(ring->mmio_base));
  703. ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0];
  704. ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1];
  705. if (HAS_VEBOX(dev_priv->dev)) {
  706. ering->semaphore_mboxes[2] =
  707. I915_READ(RING_SYNC_2(ring->mmio_base));
  708. ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2];
  709. }
  710. }
  711. static void i915_record_ring_state(struct drm_device *dev,
  712. struct drm_i915_error_state *error,
  713. struct intel_engine_cs *ring,
  714. struct drm_i915_error_ring *ering)
  715. {
  716. struct drm_i915_private *dev_priv = dev->dev_private;
  717. if (INTEL_INFO(dev)->gen >= 6) {
  718. ering->rc_psmi = I915_READ(ring->mmio_base + 0x50);
  719. ering->fault_reg = I915_READ(RING_FAULT_REG(ring));
  720. if (INTEL_INFO(dev)->gen >= 8)
  721. gen8_record_semaphore_state(dev_priv, error, ring, ering);
  722. else
  723. gen6_record_semaphore_state(dev_priv, ring, ering);
  724. }
  725. if (INTEL_INFO(dev)->gen >= 4) {
  726. ering->faddr = I915_READ(RING_DMA_FADD(ring->mmio_base));
  727. ering->ipeir = I915_READ(RING_IPEIR(ring->mmio_base));
  728. ering->ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
  729. ering->instdone = I915_READ(RING_INSTDONE(ring->mmio_base));
  730. ering->instps = I915_READ(RING_INSTPS(ring->mmio_base));
  731. ering->bbaddr = I915_READ(RING_BBADDR(ring->mmio_base));
  732. if (INTEL_INFO(dev)->gen >= 8) {
  733. ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(ring->mmio_base)) << 32;
  734. ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32;
  735. }
  736. ering->bbstate = I915_READ(RING_BBSTATE(ring->mmio_base));
  737. } else {
  738. ering->faddr = I915_READ(DMA_FADD_I8XX);
  739. ering->ipeir = I915_READ(IPEIR);
  740. ering->ipehr = I915_READ(IPEHR);
  741. ering->instdone = I915_READ(INSTDONE);
  742. }
  743. ering->waiting = waitqueue_active(&ring->irq_queue);
  744. ering->instpm = I915_READ(RING_INSTPM(ring->mmio_base));
  745. ering->seqno = ring->get_seqno(ring, false);
  746. ering->acthd = intel_ring_get_active_head(ring);
  747. ering->head = I915_READ_HEAD(ring);
  748. ering->tail = I915_READ_TAIL(ring);
  749. ering->ctl = I915_READ_CTL(ring);
  750. if (I915_NEED_GFX_HWS(dev)) {
  751. int mmio;
  752. if (IS_GEN7(dev)) {
  753. switch (ring->id) {
  754. default:
  755. case RCS:
  756. mmio = RENDER_HWS_PGA_GEN7;
  757. break;
  758. case BCS:
  759. mmio = BLT_HWS_PGA_GEN7;
  760. break;
  761. case VCS:
  762. mmio = BSD_HWS_PGA_GEN7;
  763. break;
  764. case VECS:
  765. mmio = VEBOX_HWS_PGA_GEN7;
  766. break;
  767. }
  768. } else if (IS_GEN6(ring->dev)) {
  769. mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
  770. } else {
  771. /* XXX: gen8 returns to sanity */
  772. mmio = RING_HWS_PGA(ring->mmio_base);
  773. }
  774. ering->hws = I915_READ(mmio);
  775. }
  776. ering->hangcheck_score = ring->hangcheck.score;
  777. ering->hangcheck_action = ring->hangcheck.action;
  778. if (USES_PPGTT(dev)) {
  779. int i;
  780. ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring));
  781. if (IS_GEN6(dev))
  782. ering->vm_info.pp_dir_base =
  783. I915_READ(RING_PP_DIR_BASE_READ(ring));
  784. else if (IS_GEN7(dev))
  785. ering->vm_info.pp_dir_base =
  786. I915_READ(RING_PP_DIR_BASE(ring));
  787. else if (INTEL_INFO(dev)->gen >= 8)
  788. for (i = 0; i < 4; i++) {
  789. ering->vm_info.pdp[i] =
  790. I915_READ(GEN8_RING_PDP_UDW(ring, i));
  791. ering->vm_info.pdp[i] <<= 32;
  792. ering->vm_info.pdp[i] |=
  793. I915_READ(GEN8_RING_PDP_LDW(ring, i));
  794. }
  795. }
  796. }
  797. static void i915_gem_record_active_context(struct intel_engine_cs *ring,
  798. struct drm_i915_error_state *error,
  799. struct drm_i915_error_ring *ering)
  800. {
  801. struct drm_i915_private *dev_priv = ring->dev->dev_private;
  802. struct drm_i915_gem_object *obj;
  803. /* Currently render ring is the only HW context user */
  804. if (ring->id != RCS || !error->ccid)
  805. return;
  806. list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
  807. if (!i915_gem_obj_ggtt_bound(obj))
  808. continue;
  809. if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
  810. ering->ctx = i915_error_ggtt_object_create(dev_priv, obj);
  811. break;
  812. }
  813. }
  814. }
  815. static void i915_gem_record_rings(struct drm_device *dev,
  816. struct drm_i915_error_state *error)
  817. {
  818. struct drm_i915_private *dev_priv = dev->dev_private;
  819. struct drm_i915_gem_request *request;
  820. int i, count;
  821. for (i = 0; i < I915_NUM_RINGS; i++) {
  822. struct intel_engine_cs *ring = &dev_priv->ring[i];
  823. struct intel_ringbuffer *rbuf;
  824. error->ring[i].pid = -1;
  825. if (ring->dev == NULL)
  826. continue;
  827. error->ring[i].valid = true;
  828. i915_record_ring_state(dev, error, ring, &error->ring[i]);
  829. request = i915_gem_find_active_request(ring);
  830. if (request) {
  831. struct i915_address_space *vm;
  832. vm = request->ctx && request->ctx->ppgtt ?
  833. &request->ctx->ppgtt->base :
  834. &dev_priv->gtt.base;
  835. /* We need to copy these to an anonymous buffer
  836. * as the simplest method to avoid being overwritten
  837. * by userspace.
  838. */
  839. error->ring[i].batchbuffer =
  840. i915_error_object_create(dev_priv,
  841. request->batch_obj,
  842. vm);
  843. if (HAS_BROKEN_CS_TLB(dev_priv->dev))
  844. error->ring[i].wa_batchbuffer =
  845. i915_error_ggtt_object_create(dev_priv,
  846. ring->scratch.obj);
  847. if (request->pid) {
  848. struct task_struct *task;
  849. rcu_read_lock();
  850. task = pid_task(request->pid, PIDTYPE_PID);
  851. if (task) {
  852. strcpy(error->ring[i].comm, task->comm);
  853. error->ring[i].pid = task->pid;
  854. }
  855. rcu_read_unlock();
  856. }
  857. }
  858. if (i915.enable_execlists) {
  859. /* TODO: This is only a small fix to keep basic error
  860. * capture working, but we need to add more information
  861. * for it to be useful (e.g. dump the context being
  862. * executed).
  863. */
  864. if (request)
  865. rbuf = request->ctx->engine[ring->id].ringbuf;
  866. else
  867. rbuf = ring->default_context->engine[ring->id].ringbuf;
  868. } else
  869. rbuf = ring->buffer;
  870. error->ring[i].cpu_ring_head = rbuf->head;
  871. error->ring[i].cpu_ring_tail = rbuf->tail;
  872. error->ring[i].ringbuffer =
  873. i915_error_ggtt_object_create(dev_priv, rbuf->obj);
  874. error->ring[i].hws_page =
  875. i915_error_ggtt_object_create(dev_priv, ring->status_page.obj);
  876. i915_gem_record_active_context(ring, error, &error->ring[i]);
  877. count = 0;
  878. list_for_each_entry(request, &ring->request_list, list)
  879. count++;
  880. error->ring[i].num_requests = count;
  881. error->ring[i].requests =
  882. kcalloc(count, sizeof(*error->ring[i].requests),
  883. GFP_ATOMIC);
  884. if (error->ring[i].requests == NULL) {
  885. error->ring[i].num_requests = 0;
  886. continue;
  887. }
  888. count = 0;
  889. list_for_each_entry(request, &ring->request_list, list) {
  890. struct drm_i915_error_request *erq;
  891. erq = &error->ring[i].requests[count++];
  892. erq->seqno = request->seqno;
  893. erq->jiffies = request->emitted_jiffies;
  894. erq->tail = request->postfix;
  895. }
  896. }
  897. }
  898. /* FIXME: Since pin count/bound list is global, we duplicate what we capture per
  899. * VM.
  900. */
  901. static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
  902. struct drm_i915_error_state *error,
  903. struct i915_address_space *vm,
  904. const int ndx)
  905. {
  906. struct drm_i915_error_buffer *active_bo = NULL, *pinned_bo = NULL;
  907. struct drm_i915_gem_object *obj;
  908. struct i915_vma *vma;
  909. int i;
  910. i = 0;
  911. list_for_each_entry(vma, &vm->active_list, mm_list)
  912. i++;
  913. error->active_bo_count[ndx] = i;
  914. list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
  915. list_for_each_entry(vma, &obj->vma_list, vma_link)
  916. if (vma->vm == vm && vma->pin_count > 0)
  917. i++;
  918. }
  919. error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
  920. if (i) {
  921. active_bo = kcalloc(i, sizeof(*active_bo), GFP_ATOMIC);
  922. if (active_bo)
  923. pinned_bo = active_bo + error->active_bo_count[ndx];
  924. }
  925. if (active_bo)
  926. error->active_bo_count[ndx] =
  927. capture_active_bo(active_bo,
  928. error->active_bo_count[ndx],
  929. &vm->active_list);
  930. if (pinned_bo)
  931. error->pinned_bo_count[ndx] =
  932. capture_pinned_bo(pinned_bo,
  933. error->pinned_bo_count[ndx],
  934. &dev_priv->mm.bound_list, vm);
  935. error->active_bo[ndx] = active_bo;
  936. error->pinned_bo[ndx] = pinned_bo;
  937. }
  938. static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
  939. struct drm_i915_error_state *error)
  940. {
  941. struct i915_address_space *vm;
  942. int cnt = 0, i = 0;
  943. list_for_each_entry(vm, &dev_priv->vm_list, global_link)
  944. cnt++;
  945. error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC);
  946. error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC);
  947. error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count),
  948. GFP_ATOMIC);
  949. error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count),
  950. GFP_ATOMIC);
  951. if (error->active_bo == NULL ||
  952. error->pinned_bo == NULL ||
  953. error->active_bo_count == NULL ||
  954. error->pinned_bo_count == NULL) {
  955. kfree(error->active_bo);
  956. kfree(error->active_bo_count);
  957. kfree(error->pinned_bo);
  958. kfree(error->pinned_bo_count);
  959. error->active_bo = NULL;
  960. error->active_bo_count = NULL;
  961. error->pinned_bo = NULL;
  962. error->pinned_bo_count = NULL;
  963. } else {
  964. list_for_each_entry(vm, &dev_priv->vm_list, global_link)
  965. i915_gem_capture_vm(dev_priv, error, vm, i++);
  966. error->vm_count = cnt;
  967. }
  968. }
  969. /* Capture all registers which don't fit into another category. */
  970. static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
  971. struct drm_i915_error_state *error)
  972. {
  973. struct drm_device *dev = dev_priv->dev;
  974. int i;
  975. /* General organization
  976. * 1. Registers specific to a single generation
  977. * 2. Registers which belong to multiple generations
  978. * 3. Feature specific registers.
  979. * 4. Everything else
  980. * Please try to follow the order.
  981. */
  982. /* 1: Registers specific to a single generation */
  983. if (IS_VALLEYVIEW(dev)) {
  984. error->gtier[0] = I915_READ(GTIER);
  985. error->ier = I915_READ(VLV_IER);
  986. error->forcewake = I915_READ(FORCEWAKE_VLV);
  987. }
  988. if (IS_GEN7(dev))
  989. error->err_int = I915_READ(GEN7_ERR_INT);
  990. if (INTEL_INFO(dev)->gen >= 8) {
  991. error->fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
  992. error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
  993. }
  994. if (IS_GEN6(dev)) {
  995. error->forcewake = I915_READ(FORCEWAKE);
  996. error->gab_ctl = I915_READ(GAB_CTL);
  997. error->gfx_mode = I915_READ(GFX_MODE);
  998. }
  999. /* 2: Registers which belong to multiple generations */
  1000. if (INTEL_INFO(dev)->gen >= 7)
  1001. error->forcewake = I915_READ(FORCEWAKE_MT);
  1002. if (INTEL_INFO(dev)->gen >= 6) {
  1003. error->derrmr = I915_READ(DERRMR);
  1004. error->error = I915_READ(ERROR_GEN6);
  1005. error->done_reg = I915_READ(DONE_REG);
  1006. }
  1007. /* 3: Feature specific registers */
  1008. if (IS_GEN6(dev) || IS_GEN7(dev)) {
  1009. error->gam_ecochk = I915_READ(GAM_ECOCHK);
  1010. error->gac_eco = I915_READ(GAC_ECO_BITS);
  1011. }
  1012. /* 4: Everything else */
  1013. if (HAS_HW_CONTEXTS(dev))
  1014. error->ccid = I915_READ(CCID);
  1015. if (INTEL_INFO(dev)->gen >= 8) {
  1016. error->ier = I915_READ(GEN8_DE_MISC_IER);
  1017. for (i = 0; i < 4; i++)
  1018. error->gtier[i] = I915_READ(GEN8_GT_IER(i));
  1019. } else if (HAS_PCH_SPLIT(dev)) {
  1020. error->ier = I915_READ(DEIER);
  1021. error->gtier[0] = I915_READ(GTIER);
  1022. } else if (IS_GEN2(dev)) {
  1023. error->ier = I915_READ16(IER);
  1024. } else if (!IS_VALLEYVIEW(dev)) {
  1025. error->ier = I915_READ(IER);
  1026. }
  1027. error->eir = I915_READ(EIR);
  1028. error->pgtbl_er = I915_READ(PGTBL_ER);
  1029. i915_get_extra_instdone(dev, error->extra_instdone);
  1030. }
  1031. static void i915_error_capture_msg(struct drm_device *dev,
  1032. struct drm_i915_error_state *error,
  1033. bool wedged,
  1034. const char *error_msg)
  1035. {
  1036. struct drm_i915_private *dev_priv = dev->dev_private;
  1037. u32 ecode;
  1038. int ring_id = -1, len;
  1039. ecode = i915_error_generate_code(dev_priv, error, &ring_id);
  1040. len = scnprintf(error->error_msg, sizeof(error->error_msg),
  1041. "GPU HANG: ecode %d:%d:0x%08x",
  1042. INTEL_INFO(dev)->gen, ring_id, ecode);
  1043. if (ring_id != -1 && error->ring[ring_id].pid != -1)
  1044. len += scnprintf(error->error_msg + len,
  1045. sizeof(error->error_msg) - len,
  1046. ", in %s [%d]",
  1047. error->ring[ring_id].comm,
  1048. error->ring[ring_id].pid);
  1049. scnprintf(error->error_msg + len, sizeof(error->error_msg) - len,
  1050. ", reason: %s, action: %s",
  1051. error_msg,
  1052. wedged ? "reset" : "continue");
  1053. }
  1054. static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
  1055. struct drm_i915_error_state *error)
  1056. {
  1057. error->reset_count = i915_reset_count(&dev_priv->gpu_error);
  1058. error->suspend_count = dev_priv->suspend_count;
  1059. }
  1060. /**
  1061. * i915_capture_error_state - capture an error record for later analysis
  1062. * @dev: drm device
  1063. *
  1064. * Should be called when an error is detected (either a hang or an error
  1065. * interrupt) to capture error state from the time of the error. Fills
  1066. * out a structure which becomes available in debugfs for user level tools
  1067. * to pick up.
  1068. */
  1069. void i915_capture_error_state(struct drm_device *dev, bool wedged,
  1070. const char *error_msg)
  1071. {
  1072. static bool warned;
  1073. struct drm_i915_private *dev_priv = dev->dev_private;
  1074. struct drm_i915_error_state *error;
  1075. unsigned long flags;
  1076. /* Account for pipe specific data like PIPE*STAT */
  1077. error = kzalloc(sizeof(*error), GFP_ATOMIC);
  1078. if (!error) {
  1079. DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
  1080. return;
  1081. }
  1082. kref_init(&error->ref);
  1083. i915_capture_gen_state(dev_priv, error);
  1084. i915_capture_reg_state(dev_priv, error);
  1085. i915_gem_capture_buffers(dev_priv, error);
  1086. i915_gem_record_fences(dev, error);
  1087. i915_gem_record_rings(dev, error);
  1088. do_gettimeofday(&error->time);
  1089. error->overlay = intel_overlay_capture_error_state(dev);
  1090. error->display = intel_display_capture_error_state(dev);
  1091. i915_error_capture_msg(dev, error, wedged, error_msg);
  1092. DRM_INFO("%s\n", error->error_msg);
  1093. spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
  1094. if (dev_priv->gpu_error.first_error == NULL) {
  1095. dev_priv->gpu_error.first_error = error;
  1096. error = NULL;
  1097. }
  1098. spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
  1099. if (error) {
  1100. i915_error_state_free(&error->ref);
  1101. return;
  1102. }
  1103. if (!warned) {
  1104. DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
  1105. DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
  1106. DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
  1107. DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
  1108. DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev->primary->index);
  1109. warned = true;
  1110. }
  1111. }
  1112. void i915_error_state_get(struct drm_device *dev,
  1113. struct i915_error_state_file_priv *error_priv)
  1114. {
  1115. struct drm_i915_private *dev_priv = dev->dev_private;
  1116. spin_lock_irq(&dev_priv->gpu_error.lock);
  1117. error_priv->error = dev_priv->gpu_error.first_error;
  1118. if (error_priv->error)
  1119. kref_get(&error_priv->error->ref);
  1120. spin_unlock_irq(&dev_priv->gpu_error.lock);
  1121. }
  1122. void i915_error_state_put(struct i915_error_state_file_priv *error_priv)
  1123. {
  1124. if (error_priv->error)
  1125. kref_put(&error_priv->error->ref, i915_error_state_free);
  1126. }
  1127. void i915_destroy_error_state(struct drm_device *dev)
  1128. {
  1129. struct drm_i915_private *dev_priv = dev->dev_private;
  1130. struct drm_i915_error_state *error;
  1131. spin_lock_irq(&dev_priv->gpu_error.lock);
  1132. error = dev_priv->gpu_error.first_error;
  1133. dev_priv->gpu_error.first_error = NULL;
  1134. spin_unlock_irq(&dev_priv->gpu_error.lock);
  1135. if (error)
  1136. kref_put(&error->ref, i915_error_state_free);
  1137. }
  1138. const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
  1139. {
  1140. switch (type) {
  1141. case I915_CACHE_NONE: return " uncached";
  1142. case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
  1143. case I915_CACHE_L3_LLC: return " L3+LLC";
  1144. case I915_CACHE_WT: return " WT";
  1145. default: return "";
  1146. }
  1147. }
  1148. /* NB: please notice the memset */
  1149. void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
  1150. {
  1151. struct drm_i915_private *dev_priv = dev->dev_private;
  1152. memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
  1153. if (IS_GEN2(dev) || IS_GEN3(dev))
  1154. instdone[0] = I915_READ(INSTDONE);
  1155. else if (IS_GEN4(dev) || IS_GEN5(dev) || IS_GEN6(dev)) {
  1156. instdone[0] = I915_READ(INSTDONE_I965);
  1157. instdone[1] = I915_READ(INSTDONE1);
  1158. } else if (INTEL_INFO(dev)->gen >= 7) {
  1159. instdone[0] = I915_READ(GEN7_INSTDONE_1);
  1160. instdone[1] = I915_READ(GEN7_SC_INSTDONE);
  1161. instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
  1162. instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
  1163. }
  1164. }