intel_lrc.c 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459
  1. /*
  2. * SPDX-License-Identifier: MIT
  3. *
  4. * Copyright © 2018 Intel Corporation
  5. */
  6. #include "../i915_selftest.h"
  7. #include "igt_flush_test.h"
  8. #include "mock_context.h"
  9. struct spinner {
  10. struct drm_i915_private *i915;
  11. struct drm_i915_gem_object *hws;
  12. struct drm_i915_gem_object *obj;
  13. u32 *batch;
  14. void *seqno;
  15. };
  16. static int spinner_init(struct spinner *spin, struct drm_i915_private *i915)
  17. {
  18. unsigned int mode;
  19. void *vaddr;
  20. int err;
  21. GEM_BUG_ON(INTEL_GEN(i915) < 8);
  22. memset(spin, 0, sizeof(*spin));
  23. spin->i915 = i915;
  24. spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
  25. if (IS_ERR(spin->hws)) {
  26. err = PTR_ERR(spin->hws);
  27. goto err;
  28. }
  29. spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
  30. if (IS_ERR(spin->obj)) {
  31. err = PTR_ERR(spin->obj);
  32. goto err_hws;
  33. }
  34. i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC);
  35. vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
  36. if (IS_ERR(vaddr)) {
  37. err = PTR_ERR(vaddr);
  38. goto err_obj;
  39. }
  40. spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
  41. mode = HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
  42. vaddr = i915_gem_object_pin_map(spin->obj, mode);
  43. if (IS_ERR(vaddr)) {
  44. err = PTR_ERR(vaddr);
  45. goto err_unpin_hws;
  46. }
  47. spin->batch = vaddr;
  48. return 0;
  49. err_unpin_hws:
  50. i915_gem_object_unpin_map(spin->hws);
  51. err_obj:
  52. i915_gem_object_put(spin->obj);
  53. err_hws:
  54. i915_gem_object_put(spin->hws);
  55. err:
  56. return err;
  57. }
  58. static unsigned int seqno_offset(u64 fence)
  59. {
  60. return offset_in_page(sizeof(u32) * fence);
  61. }
  62. static u64 hws_address(const struct i915_vma *hws,
  63. const struct i915_request *rq)
  64. {
  65. return hws->node.start + seqno_offset(rq->fence.context);
  66. }
  67. static int emit_recurse_batch(struct spinner *spin,
  68. struct i915_request *rq,
  69. u32 arbitration_command)
  70. {
  71. struct i915_address_space *vm = &rq->ctx->ppgtt->base;
  72. struct i915_vma *hws, *vma;
  73. u32 *batch;
  74. int err;
  75. vma = i915_vma_instance(spin->obj, vm, NULL);
  76. if (IS_ERR(vma))
  77. return PTR_ERR(vma);
  78. hws = i915_vma_instance(spin->hws, vm, NULL);
  79. if (IS_ERR(hws))
  80. return PTR_ERR(hws);
  81. err = i915_vma_pin(vma, 0, 0, PIN_USER);
  82. if (err)
  83. return err;
  84. err = i915_vma_pin(hws, 0, 0, PIN_USER);
  85. if (err)
  86. goto unpin_vma;
  87. i915_vma_move_to_active(vma, rq, 0);
  88. if (!i915_gem_object_has_active_reference(vma->obj)) {
  89. i915_gem_object_get(vma->obj);
  90. i915_gem_object_set_active_reference(vma->obj);
  91. }
  92. i915_vma_move_to_active(hws, rq, 0);
  93. if (!i915_gem_object_has_active_reference(hws->obj)) {
  94. i915_gem_object_get(hws->obj);
  95. i915_gem_object_set_active_reference(hws->obj);
  96. }
  97. batch = spin->batch;
  98. *batch++ = MI_STORE_DWORD_IMM_GEN4;
  99. *batch++ = lower_32_bits(hws_address(hws, rq));
  100. *batch++ = upper_32_bits(hws_address(hws, rq));
  101. *batch++ = rq->fence.seqno;
  102. *batch++ = arbitration_command;
  103. *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
  104. *batch++ = lower_32_bits(vma->node.start);
  105. *batch++ = upper_32_bits(vma->node.start);
  106. *batch++ = MI_BATCH_BUFFER_END; /* not reached */
  107. i915_gem_chipset_flush(spin->i915);
  108. err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
  109. i915_vma_unpin(hws);
  110. unpin_vma:
  111. i915_vma_unpin(vma);
  112. return err;
  113. }
  114. static struct i915_request *
  115. spinner_create_request(struct spinner *spin,
  116. struct i915_gem_context *ctx,
  117. struct intel_engine_cs *engine,
  118. u32 arbitration_command)
  119. {
  120. struct i915_request *rq;
  121. int err;
  122. rq = i915_request_alloc(engine, ctx);
  123. if (IS_ERR(rq))
  124. return rq;
  125. err = emit_recurse_batch(spin, rq, arbitration_command);
  126. if (err) {
  127. __i915_request_add(rq, false);
  128. return ERR_PTR(err);
  129. }
  130. return rq;
  131. }
  132. static u32 hws_seqno(const struct spinner *spin, const struct i915_request *rq)
  133. {
  134. u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
  135. return READ_ONCE(*seqno);
  136. }
  137. static void spinner_end(struct spinner *spin)
  138. {
  139. *spin->batch = MI_BATCH_BUFFER_END;
  140. i915_gem_chipset_flush(spin->i915);
  141. }
  142. static void spinner_fini(struct spinner *spin)
  143. {
  144. spinner_end(spin);
  145. i915_gem_object_unpin_map(spin->obj);
  146. i915_gem_object_put(spin->obj);
  147. i915_gem_object_unpin_map(spin->hws);
  148. i915_gem_object_put(spin->hws);
  149. }
  150. static bool wait_for_spinner(struct spinner *spin, struct i915_request *rq)
  151. {
  152. if (!wait_event_timeout(rq->execute,
  153. READ_ONCE(rq->global_seqno),
  154. msecs_to_jiffies(10)))
  155. return false;
  156. return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
  157. rq->fence.seqno),
  158. 10) &&
  159. wait_for(i915_seqno_passed(hws_seqno(spin, rq),
  160. rq->fence.seqno),
  161. 1000));
  162. }
  163. static int live_sanitycheck(void *arg)
  164. {
  165. struct drm_i915_private *i915 = arg;
  166. struct intel_engine_cs *engine;
  167. struct i915_gem_context *ctx;
  168. enum intel_engine_id id;
  169. struct spinner spin;
  170. int err = -ENOMEM;
  171. if (!HAS_LOGICAL_RING_CONTEXTS(i915))
  172. return 0;
  173. mutex_lock(&i915->drm.struct_mutex);
  174. if (spinner_init(&spin, i915))
  175. goto err_unlock;
  176. ctx = kernel_context(i915);
  177. if (!ctx)
  178. goto err_spin;
  179. for_each_engine(engine, i915, id) {
  180. struct i915_request *rq;
  181. rq = spinner_create_request(&spin, ctx, engine, MI_NOOP);
  182. if (IS_ERR(rq)) {
  183. err = PTR_ERR(rq);
  184. goto err_ctx;
  185. }
  186. i915_request_add(rq);
  187. if (!wait_for_spinner(&spin, rq)) {
  188. GEM_TRACE("spinner failed to start\n");
  189. GEM_TRACE_DUMP();
  190. i915_gem_set_wedged(i915);
  191. err = -EIO;
  192. goto err_ctx;
  193. }
  194. spinner_end(&spin);
  195. if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
  196. err = -EIO;
  197. goto err_ctx;
  198. }
  199. }
  200. err = 0;
  201. err_ctx:
  202. kernel_context_close(ctx);
  203. err_spin:
  204. spinner_fini(&spin);
  205. err_unlock:
  206. igt_flush_test(i915, I915_WAIT_LOCKED);
  207. mutex_unlock(&i915->drm.struct_mutex);
  208. return err;
  209. }
  210. static int live_preempt(void *arg)
  211. {
  212. struct drm_i915_private *i915 = arg;
  213. struct i915_gem_context *ctx_hi, *ctx_lo;
  214. struct spinner spin_hi, spin_lo;
  215. struct intel_engine_cs *engine;
  216. enum intel_engine_id id;
  217. int err = -ENOMEM;
  218. if (!HAS_LOGICAL_RING_PREEMPTION(i915))
  219. return 0;
  220. mutex_lock(&i915->drm.struct_mutex);
  221. if (spinner_init(&spin_hi, i915))
  222. goto err_unlock;
  223. if (spinner_init(&spin_lo, i915))
  224. goto err_spin_hi;
  225. ctx_hi = kernel_context(i915);
  226. if (!ctx_hi)
  227. goto err_spin_lo;
  228. ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
  229. ctx_lo = kernel_context(i915);
  230. if (!ctx_lo)
  231. goto err_ctx_hi;
  232. ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
  233. for_each_engine(engine, i915, id) {
  234. struct i915_request *rq;
  235. rq = spinner_create_request(&spin_lo, ctx_lo, engine,
  236. MI_ARB_CHECK);
  237. if (IS_ERR(rq)) {
  238. err = PTR_ERR(rq);
  239. goto err_ctx_lo;
  240. }
  241. i915_request_add(rq);
  242. if (!wait_for_spinner(&spin_lo, rq)) {
  243. GEM_TRACE("lo spinner failed to start\n");
  244. GEM_TRACE_DUMP();
  245. i915_gem_set_wedged(i915);
  246. err = -EIO;
  247. goto err_ctx_lo;
  248. }
  249. rq = spinner_create_request(&spin_hi, ctx_hi, engine,
  250. MI_ARB_CHECK);
  251. if (IS_ERR(rq)) {
  252. spinner_end(&spin_lo);
  253. err = PTR_ERR(rq);
  254. goto err_ctx_lo;
  255. }
  256. i915_request_add(rq);
  257. if (!wait_for_spinner(&spin_hi, rq)) {
  258. GEM_TRACE("hi spinner failed to start\n");
  259. GEM_TRACE_DUMP();
  260. i915_gem_set_wedged(i915);
  261. err = -EIO;
  262. goto err_ctx_lo;
  263. }
  264. spinner_end(&spin_hi);
  265. spinner_end(&spin_lo);
  266. if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
  267. err = -EIO;
  268. goto err_ctx_lo;
  269. }
  270. }
  271. err = 0;
  272. err_ctx_lo:
  273. kernel_context_close(ctx_lo);
  274. err_ctx_hi:
  275. kernel_context_close(ctx_hi);
  276. err_spin_lo:
  277. spinner_fini(&spin_lo);
  278. err_spin_hi:
  279. spinner_fini(&spin_hi);
  280. err_unlock:
  281. igt_flush_test(i915, I915_WAIT_LOCKED);
  282. mutex_unlock(&i915->drm.struct_mutex);
  283. return err;
  284. }
  285. static int live_late_preempt(void *arg)
  286. {
  287. struct drm_i915_private *i915 = arg;
  288. struct i915_gem_context *ctx_hi, *ctx_lo;
  289. struct spinner spin_hi, spin_lo;
  290. struct intel_engine_cs *engine;
  291. struct i915_sched_attr attr = {};
  292. enum intel_engine_id id;
  293. int err = -ENOMEM;
  294. if (!HAS_LOGICAL_RING_PREEMPTION(i915))
  295. return 0;
  296. mutex_lock(&i915->drm.struct_mutex);
  297. if (spinner_init(&spin_hi, i915))
  298. goto err_unlock;
  299. if (spinner_init(&spin_lo, i915))
  300. goto err_spin_hi;
  301. ctx_hi = kernel_context(i915);
  302. if (!ctx_hi)
  303. goto err_spin_lo;
  304. ctx_lo = kernel_context(i915);
  305. if (!ctx_lo)
  306. goto err_ctx_hi;
  307. for_each_engine(engine, i915, id) {
  308. struct i915_request *rq;
  309. rq = spinner_create_request(&spin_lo, ctx_lo, engine,
  310. MI_ARB_CHECK);
  311. if (IS_ERR(rq)) {
  312. err = PTR_ERR(rq);
  313. goto err_ctx_lo;
  314. }
  315. i915_request_add(rq);
  316. if (!wait_for_spinner(&spin_lo, rq)) {
  317. pr_err("First context failed to start\n");
  318. goto err_wedged;
  319. }
  320. rq = spinner_create_request(&spin_hi, ctx_hi, engine, MI_NOOP);
  321. if (IS_ERR(rq)) {
  322. spinner_end(&spin_lo);
  323. err = PTR_ERR(rq);
  324. goto err_ctx_lo;
  325. }
  326. i915_request_add(rq);
  327. if (wait_for_spinner(&spin_hi, rq)) {
  328. pr_err("Second context overtook first?\n");
  329. goto err_wedged;
  330. }
  331. attr.priority = I915_PRIORITY_MAX;
  332. engine->schedule(rq, &attr);
  333. if (!wait_for_spinner(&spin_hi, rq)) {
  334. pr_err("High priority context failed to preempt the low priority context\n");
  335. GEM_TRACE_DUMP();
  336. goto err_wedged;
  337. }
  338. spinner_end(&spin_hi);
  339. spinner_end(&spin_lo);
  340. if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
  341. err = -EIO;
  342. goto err_ctx_lo;
  343. }
  344. }
  345. err = 0;
  346. err_ctx_lo:
  347. kernel_context_close(ctx_lo);
  348. err_ctx_hi:
  349. kernel_context_close(ctx_hi);
  350. err_spin_lo:
  351. spinner_fini(&spin_lo);
  352. err_spin_hi:
  353. spinner_fini(&spin_hi);
  354. err_unlock:
  355. igt_flush_test(i915, I915_WAIT_LOCKED);
  356. mutex_unlock(&i915->drm.struct_mutex);
  357. return err;
  358. err_wedged:
  359. spinner_end(&spin_hi);
  360. spinner_end(&spin_lo);
  361. i915_gem_set_wedged(i915);
  362. err = -EIO;
  363. goto err_ctx_lo;
  364. }
  365. int intel_execlists_live_selftests(struct drm_i915_private *i915)
  366. {
  367. static const struct i915_subtest tests[] = {
  368. SUBTEST(live_sanitycheck),
  369. SUBTEST(live_preempt),
  370. SUBTEST(live_late_preempt),
  371. };
  372. if (!HAS_EXECLISTS(i915))
  373. return 0;
  374. return i915_subtests(tests, i915);
  375. }