i915_gem_context.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948
  1. /*
  2. * Copyright © 2017 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. */
  24. #include <linux/prime_numbers.h>
  25. #include "../i915_selftest.h"
  26. #include "i915_random.h"
  27. #include "igt_flush_test.h"
  28. #include "mock_drm.h"
  29. #include "mock_gem_device.h"
  30. #include "huge_gem_object.h"
  31. #define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
  32. struct live_test {
  33. struct drm_i915_private *i915;
  34. const char *func;
  35. const char *name;
  36. unsigned int reset_count;
  37. };
  38. static int begin_live_test(struct live_test *t,
  39. struct drm_i915_private *i915,
  40. const char *func,
  41. const char *name)
  42. {
  43. int err;
  44. t->i915 = i915;
  45. t->func = func;
  46. t->name = name;
  47. err = i915_gem_wait_for_idle(i915,
  48. I915_WAIT_LOCKED,
  49. MAX_SCHEDULE_TIMEOUT);
  50. if (err) {
  51. pr_err("%s(%s): failed to idle before, with err=%d!",
  52. func, name, err);
  53. return err;
  54. }
  55. i915->gpu_error.missed_irq_rings = 0;
  56. t->reset_count = i915_reset_count(&i915->gpu_error);
  57. return 0;
  58. }
  59. static int end_live_test(struct live_test *t)
  60. {
  61. struct drm_i915_private *i915 = t->i915;
  62. if (igt_flush_test(i915, I915_WAIT_LOCKED))
  63. return -EIO;
  64. if (t->reset_count != i915_reset_count(&i915->gpu_error)) {
  65. pr_err("%s(%s): GPU was reset %d times!\n",
  66. t->func, t->name,
  67. i915_reset_count(&i915->gpu_error) - t->reset_count);
  68. return -EIO;
  69. }
  70. if (i915->gpu_error.missed_irq_rings) {
  71. pr_err("%s(%s): Missed interrupts on engines %lx\n",
  72. t->func, t->name, i915->gpu_error.missed_irq_rings);
  73. return -EIO;
  74. }
  75. return 0;
  76. }
  77. static int live_nop_switch(void *arg)
  78. {
  79. const unsigned int nctx = 1024;
  80. struct drm_i915_private *i915 = arg;
  81. struct intel_engine_cs *engine;
  82. struct i915_gem_context **ctx;
  83. enum intel_engine_id id;
  84. struct drm_file *file;
  85. struct live_test t;
  86. unsigned long n;
  87. int err = -ENODEV;
  88. /*
  89. * Create as many contexts as we can feasibly get away with
  90. * and check we can switch between them rapidly.
  91. *
  92. * Serves as very simple stress test for submission and HW switching
  93. * between contexts.
  94. */
  95. if (!DRIVER_CAPS(i915)->has_logical_contexts)
  96. return 0;
  97. file = mock_file(i915);
  98. if (IS_ERR(file))
  99. return PTR_ERR(file);
  100. mutex_lock(&i915->drm.struct_mutex);
  101. intel_runtime_pm_get(i915);
  102. ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL);
  103. if (!ctx) {
  104. err = -ENOMEM;
  105. goto out_unlock;
  106. }
  107. for (n = 0; n < nctx; n++) {
  108. ctx[n] = i915_gem_create_context(i915, file->driver_priv);
  109. if (IS_ERR(ctx[n])) {
  110. err = PTR_ERR(ctx[n]);
  111. goto out_unlock;
  112. }
  113. }
  114. for_each_engine(engine, i915, id) {
  115. struct i915_request *rq;
  116. unsigned long end_time, prime;
  117. ktime_t times[2] = {};
  118. times[0] = ktime_get_raw();
  119. for (n = 0; n < nctx; n++) {
  120. rq = i915_request_alloc(engine, ctx[n]);
  121. if (IS_ERR(rq)) {
  122. err = PTR_ERR(rq);
  123. goto out_unlock;
  124. }
  125. i915_request_add(rq);
  126. }
  127. if (i915_request_wait(rq,
  128. I915_WAIT_LOCKED,
  129. HZ / 5) < 0) {
  130. pr_err("Failed to populated %d contexts\n", nctx);
  131. i915_gem_set_wedged(i915);
  132. err = -EIO;
  133. goto out_unlock;
  134. }
  135. times[1] = ktime_get_raw();
  136. pr_info("Populated %d contexts on %s in %lluns\n",
  137. nctx, engine->name, ktime_to_ns(times[1] - times[0]));
  138. err = begin_live_test(&t, i915, __func__, engine->name);
  139. if (err)
  140. goto out_unlock;
  141. end_time = jiffies + i915_selftest.timeout_jiffies;
  142. for_each_prime_number_from(prime, 2, 8192) {
  143. times[1] = ktime_get_raw();
  144. for (n = 0; n < prime; n++) {
  145. rq = i915_request_alloc(engine, ctx[n % nctx]);
  146. if (IS_ERR(rq)) {
  147. err = PTR_ERR(rq);
  148. goto out_unlock;
  149. }
  150. /*
  151. * This space is left intentionally blank.
  152. *
  153. * We do not actually want to perform any
  154. * action with this request, we just want
  155. * to measure the latency in allocation
  156. * and submission of our breadcrumbs -
  157. * ensuring that the bare request is sufficient
  158. * for the system to work (i.e. proper HEAD
  159. * tracking of the rings, interrupt handling,
  160. * etc). It also gives us the lowest bounds
  161. * for latency.
  162. */
  163. i915_request_add(rq);
  164. }
  165. if (i915_request_wait(rq,
  166. I915_WAIT_LOCKED,
  167. HZ / 5) < 0) {
  168. pr_err("Switching between %ld contexts timed out\n",
  169. prime);
  170. i915_gem_set_wedged(i915);
  171. break;
  172. }
  173. times[1] = ktime_sub(ktime_get_raw(), times[1]);
  174. if (prime == 2)
  175. times[0] = times[1];
  176. if (__igt_timeout(end_time, NULL))
  177. break;
  178. }
  179. err = end_live_test(&t);
  180. if (err)
  181. goto out_unlock;
  182. pr_info("Switch latencies on %s: 1 = %lluns, %lu = %lluns\n",
  183. engine->name,
  184. ktime_to_ns(times[0]),
  185. prime - 1, div64_u64(ktime_to_ns(times[1]), prime - 1));
  186. }
  187. out_unlock:
  188. intel_runtime_pm_put(i915);
  189. mutex_unlock(&i915->drm.struct_mutex);
  190. mock_file_free(i915, file);
  191. return err;
  192. }
  193. static struct i915_vma *
  194. gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
  195. {
  196. struct drm_i915_gem_object *obj;
  197. const int gen = INTEL_GEN(vma->vm->i915);
  198. unsigned long n, size;
  199. u32 *cmd;
  200. int err;
  201. size = (4 * count + 1) * sizeof(u32);
  202. size = round_up(size, PAGE_SIZE);
  203. obj = i915_gem_object_create_internal(vma->vm->i915, size);
  204. if (IS_ERR(obj))
  205. return ERR_CAST(obj);
  206. cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
  207. if (IS_ERR(cmd)) {
  208. err = PTR_ERR(cmd);
  209. goto err;
  210. }
  211. GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size);
  212. offset += vma->node.start;
  213. for (n = 0; n < count; n++) {
  214. if (gen >= 8) {
  215. *cmd++ = MI_STORE_DWORD_IMM_GEN4;
  216. *cmd++ = lower_32_bits(offset);
  217. *cmd++ = upper_32_bits(offset);
  218. *cmd++ = value;
  219. } else if (gen >= 4) {
  220. *cmd++ = MI_STORE_DWORD_IMM_GEN4 |
  221. (gen < 6 ? MI_USE_GGTT : 0);
  222. *cmd++ = 0;
  223. *cmd++ = offset;
  224. *cmd++ = value;
  225. } else {
  226. *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
  227. *cmd++ = offset;
  228. *cmd++ = value;
  229. }
  230. offset += PAGE_SIZE;
  231. }
  232. *cmd = MI_BATCH_BUFFER_END;
  233. i915_gem_object_unpin_map(obj);
  234. err = i915_gem_object_set_to_gtt_domain(obj, false);
  235. if (err)
  236. goto err;
  237. vma = i915_vma_instance(obj, vma->vm, NULL);
  238. if (IS_ERR(vma)) {
  239. err = PTR_ERR(vma);
  240. goto err;
  241. }
  242. err = i915_vma_pin(vma, 0, 0, PIN_USER);
  243. if (err)
  244. goto err;
  245. return vma;
  246. err:
  247. i915_gem_object_put(obj);
  248. return ERR_PTR(err);
  249. }
  250. static unsigned long real_page_count(struct drm_i915_gem_object *obj)
  251. {
  252. return huge_gem_object_phys_size(obj) >> PAGE_SHIFT;
  253. }
  254. static unsigned long fake_page_count(struct drm_i915_gem_object *obj)
  255. {
  256. return huge_gem_object_dma_size(obj) >> PAGE_SHIFT;
  257. }
  258. static int gpu_fill(struct drm_i915_gem_object *obj,
  259. struct i915_gem_context *ctx,
  260. struct intel_engine_cs *engine,
  261. unsigned int dw)
  262. {
  263. struct drm_i915_private *i915 = to_i915(obj->base.dev);
  264. struct i915_address_space *vm =
  265. ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
  266. struct i915_request *rq;
  267. struct i915_vma *vma;
  268. struct i915_vma *batch;
  269. unsigned int flags;
  270. int err;
  271. GEM_BUG_ON(obj->base.size > vm->total);
  272. GEM_BUG_ON(!intel_engine_can_store_dword(engine));
  273. vma = i915_vma_instance(obj, vm, NULL);
  274. if (IS_ERR(vma))
  275. return PTR_ERR(vma);
  276. err = i915_gem_object_set_to_gtt_domain(obj, false);
  277. if (err)
  278. return err;
  279. err = i915_vma_pin(vma, 0, 0, PIN_HIGH | PIN_USER);
  280. if (err)
  281. return err;
  282. /* Within the GTT the huge objects maps every page onto
  283. * its 1024 real pages (using phys_pfn = dma_pfn % 1024).
  284. * We set the nth dword within the page using the nth
  285. * mapping via the GTT - this should exercise the GTT mapping
  286. * whilst checking that each context provides a unique view
  287. * into the object.
  288. */
  289. batch = gpu_fill_dw(vma,
  290. (dw * real_page_count(obj)) << PAGE_SHIFT |
  291. (dw * sizeof(u32)),
  292. real_page_count(obj),
  293. dw);
  294. if (IS_ERR(batch)) {
  295. err = PTR_ERR(batch);
  296. goto err_vma;
  297. }
  298. rq = i915_request_alloc(engine, ctx);
  299. if (IS_ERR(rq)) {
  300. err = PTR_ERR(rq);
  301. goto err_batch;
  302. }
  303. flags = 0;
  304. if (INTEL_GEN(vm->i915) <= 5)
  305. flags |= I915_DISPATCH_SECURE;
  306. err = engine->emit_bb_start(rq,
  307. batch->node.start, batch->node.size,
  308. flags);
  309. if (err)
  310. goto err_request;
  311. err = i915_vma_move_to_active(batch, rq, 0);
  312. if (err)
  313. goto skip_request;
  314. err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
  315. if (err)
  316. goto skip_request;
  317. i915_gem_object_set_active_reference(batch->obj);
  318. i915_vma_unpin(batch);
  319. i915_vma_close(batch);
  320. i915_vma_unpin(vma);
  321. i915_request_add(rq);
  322. return 0;
  323. skip_request:
  324. i915_request_skip(rq, err);
  325. err_request:
  326. i915_request_add(rq);
  327. err_batch:
  328. i915_vma_unpin(batch);
  329. i915_vma_put(batch);
  330. err_vma:
  331. i915_vma_unpin(vma);
  332. return err;
  333. }
  334. static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
  335. {
  336. const bool has_llc = HAS_LLC(to_i915(obj->base.dev));
  337. unsigned int n, m, need_flush;
  338. int err;
  339. err = i915_gem_obj_prepare_shmem_write(obj, &need_flush);
  340. if (err)
  341. return err;
  342. for (n = 0; n < real_page_count(obj); n++) {
  343. u32 *map;
  344. map = kmap_atomic(i915_gem_object_get_page(obj, n));
  345. for (m = 0; m < DW_PER_PAGE; m++)
  346. map[m] = value;
  347. if (!has_llc)
  348. drm_clflush_virt_range(map, PAGE_SIZE);
  349. kunmap_atomic(map);
  350. }
  351. i915_gem_obj_finish_shmem_access(obj);
  352. obj->read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU;
  353. obj->write_domain = 0;
  354. return 0;
  355. }
  356. static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max)
  357. {
  358. unsigned int n, m, needs_flush;
  359. int err;
  360. err = i915_gem_obj_prepare_shmem_read(obj, &needs_flush);
  361. if (err)
  362. return err;
  363. for (n = 0; n < real_page_count(obj); n++) {
  364. u32 *map;
  365. map = kmap_atomic(i915_gem_object_get_page(obj, n));
  366. if (needs_flush & CLFLUSH_BEFORE)
  367. drm_clflush_virt_range(map, PAGE_SIZE);
  368. for (m = 0; m < max; m++) {
  369. if (map[m] != m) {
  370. pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
  371. n, m, map[m], m);
  372. err = -EINVAL;
  373. goto out_unmap;
  374. }
  375. }
  376. for (; m < DW_PER_PAGE; m++) {
  377. if (map[m] != STACK_MAGIC) {
  378. pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
  379. n, m, map[m], STACK_MAGIC);
  380. err = -EINVAL;
  381. goto out_unmap;
  382. }
  383. }
  384. out_unmap:
  385. kunmap_atomic(map);
  386. if (err)
  387. break;
  388. }
  389. i915_gem_obj_finish_shmem_access(obj);
  390. return err;
  391. }
  392. static int file_add_object(struct drm_file *file,
  393. struct drm_i915_gem_object *obj)
  394. {
  395. int err;
  396. GEM_BUG_ON(obj->base.handle_count);
  397. /* tie the object to the drm_file for easy reaping */
  398. err = idr_alloc(&file->object_idr, &obj->base, 1, 0, GFP_KERNEL);
  399. if (err < 0)
  400. return err;
  401. i915_gem_object_get(obj);
  402. obj->base.handle_count++;
  403. return 0;
  404. }
  405. static struct drm_i915_gem_object *
  406. create_test_object(struct i915_gem_context *ctx,
  407. struct drm_file *file,
  408. struct list_head *objects)
  409. {
  410. struct drm_i915_gem_object *obj;
  411. struct i915_address_space *vm =
  412. ctx->ppgtt ? &ctx->ppgtt->vm : &ctx->i915->ggtt.vm;
  413. u64 size;
  414. int err;
  415. size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE);
  416. size = round_down(size, DW_PER_PAGE * PAGE_SIZE);
  417. obj = huge_gem_object(ctx->i915, DW_PER_PAGE * PAGE_SIZE, size);
  418. if (IS_ERR(obj))
  419. return obj;
  420. err = file_add_object(file, obj);
  421. i915_gem_object_put(obj);
  422. if (err)
  423. return ERR_PTR(err);
  424. err = cpu_fill(obj, STACK_MAGIC);
  425. if (err) {
  426. pr_err("Failed to fill object with cpu, err=%d\n",
  427. err);
  428. return ERR_PTR(err);
  429. }
  430. list_add_tail(&obj->st_link, objects);
  431. return obj;
  432. }
  433. static unsigned long max_dwords(struct drm_i915_gem_object *obj)
  434. {
  435. unsigned long npages = fake_page_count(obj);
  436. GEM_BUG_ON(!IS_ALIGNED(npages, DW_PER_PAGE));
  437. return npages / DW_PER_PAGE;
  438. }
  439. static int igt_ctx_exec(void *arg)
  440. {
  441. struct drm_i915_private *i915 = arg;
  442. struct drm_i915_gem_object *obj = NULL;
  443. struct drm_file *file;
  444. IGT_TIMEOUT(end_time);
  445. LIST_HEAD(objects);
  446. unsigned long ncontexts, ndwords, dw;
  447. bool first_shared_gtt = true;
  448. int err = -ENODEV;
  449. /*
  450. * Create a few different contexts (with different mm) and write
  451. * through each ctx/mm using the GPU making sure those writes end
  452. * up in the expected pages of our obj.
  453. */
  454. if (!DRIVER_CAPS(i915)->has_logical_contexts)
  455. return 0;
  456. file = mock_file(i915);
  457. if (IS_ERR(file))
  458. return PTR_ERR(file);
  459. mutex_lock(&i915->drm.struct_mutex);
  460. ncontexts = 0;
  461. ndwords = 0;
  462. dw = 0;
  463. while (!time_after(jiffies, end_time)) {
  464. struct intel_engine_cs *engine;
  465. struct i915_gem_context *ctx;
  466. unsigned int id;
  467. if (first_shared_gtt) {
  468. ctx = __create_hw_context(i915, file->driver_priv);
  469. first_shared_gtt = false;
  470. } else {
  471. ctx = i915_gem_create_context(i915, file->driver_priv);
  472. }
  473. if (IS_ERR(ctx)) {
  474. err = PTR_ERR(ctx);
  475. goto out_unlock;
  476. }
  477. for_each_engine(engine, i915, id) {
  478. if (!engine->context_size)
  479. continue; /* No logical context support in HW */
  480. if (!intel_engine_can_store_dword(engine))
  481. continue;
  482. if (!obj) {
  483. obj = create_test_object(ctx, file, &objects);
  484. if (IS_ERR(obj)) {
  485. err = PTR_ERR(obj);
  486. goto out_unlock;
  487. }
  488. }
  489. intel_runtime_pm_get(i915);
  490. err = gpu_fill(obj, ctx, engine, dw);
  491. intel_runtime_pm_put(i915);
  492. if (err) {
  493. pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
  494. ndwords, dw, max_dwords(obj),
  495. engine->name, ctx->hw_id,
  496. yesno(!!ctx->ppgtt), err);
  497. goto out_unlock;
  498. }
  499. if (++dw == max_dwords(obj)) {
  500. obj = NULL;
  501. dw = 0;
  502. }
  503. ndwords++;
  504. }
  505. ncontexts++;
  506. }
  507. pr_info("Submitted %lu contexts (across %u engines), filling %lu dwords\n",
  508. ncontexts, INTEL_INFO(i915)->num_rings, ndwords);
  509. dw = 0;
  510. list_for_each_entry(obj, &objects, st_link) {
  511. unsigned int rem =
  512. min_t(unsigned int, ndwords - dw, max_dwords(obj));
  513. err = cpu_check(obj, rem);
  514. if (err)
  515. break;
  516. dw += rem;
  517. }
  518. out_unlock:
  519. if (igt_flush_test(i915, I915_WAIT_LOCKED))
  520. err = -EIO;
  521. mutex_unlock(&i915->drm.struct_mutex);
  522. mock_file_free(i915, file);
  523. return err;
  524. }
  525. static int igt_ctx_readonly(void *arg)
  526. {
  527. struct drm_i915_private *i915 = arg;
  528. struct drm_i915_gem_object *obj = NULL;
  529. struct drm_file *file;
  530. I915_RND_STATE(prng);
  531. IGT_TIMEOUT(end_time);
  532. LIST_HEAD(objects);
  533. struct i915_gem_context *ctx;
  534. struct i915_hw_ppgtt *ppgtt;
  535. unsigned long ndwords, dw;
  536. int err = -ENODEV;
  537. /*
  538. * Create a few read-only objects (with the occasional writable object)
  539. * and try to write into these object checking that the GPU discards
  540. * any write to a read-only object.
  541. */
  542. file = mock_file(i915);
  543. if (IS_ERR(file))
  544. return PTR_ERR(file);
  545. mutex_lock(&i915->drm.struct_mutex);
  546. ctx = i915_gem_create_context(i915, file->driver_priv);
  547. if (IS_ERR(ctx)) {
  548. err = PTR_ERR(ctx);
  549. goto out_unlock;
  550. }
  551. ppgtt = ctx->ppgtt ?: i915->mm.aliasing_ppgtt;
  552. if (!ppgtt || !ppgtt->vm.has_read_only) {
  553. err = 0;
  554. goto out_unlock;
  555. }
  556. ndwords = 0;
  557. dw = 0;
  558. while (!time_after(jiffies, end_time)) {
  559. struct intel_engine_cs *engine;
  560. unsigned int id;
  561. for_each_engine(engine, i915, id) {
  562. if (!intel_engine_can_store_dword(engine))
  563. continue;
  564. if (!obj) {
  565. obj = create_test_object(ctx, file, &objects);
  566. if (IS_ERR(obj)) {
  567. err = PTR_ERR(obj);
  568. goto out_unlock;
  569. }
  570. if (prandom_u32_state(&prng) & 1)
  571. i915_gem_object_set_readonly(obj);
  572. }
  573. intel_runtime_pm_get(i915);
  574. err = gpu_fill(obj, ctx, engine, dw);
  575. intel_runtime_pm_put(i915);
  576. if (err) {
  577. pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
  578. ndwords, dw, max_dwords(obj),
  579. engine->name, ctx->hw_id,
  580. yesno(!!ctx->ppgtt), err);
  581. goto out_unlock;
  582. }
  583. if (++dw == max_dwords(obj)) {
  584. obj = NULL;
  585. dw = 0;
  586. }
  587. ndwords++;
  588. }
  589. }
  590. pr_info("Submitted %lu dwords (across %u engines)\n",
  591. ndwords, INTEL_INFO(i915)->num_rings);
  592. dw = 0;
  593. list_for_each_entry(obj, &objects, st_link) {
  594. unsigned int rem =
  595. min_t(unsigned int, ndwords - dw, max_dwords(obj));
  596. unsigned int num_writes;
  597. num_writes = rem;
  598. if (i915_gem_object_is_readonly(obj))
  599. num_writes = 0;
  600. err = cpu_check(obj, num_writes);
  601. if (err)
  602. break;
  603. dw += rem;
  604. }
  605. out_unlock:
  606. if (igt_flush_test(i915, I915_WAIT_LOCKED))
  607. err = -EIO;
  608. mutex_unlock(&i915->drm.struct_mutex);
  609. mock_file_free(i915, file);
  610. return err;
  611. }
  612. static __maybe_unused const char *
  613. __engine_name(struct drm_i915_private *i915, unsigned int engines)
  614. {
  615. struct intel_engine_cs *engine;
  616. unsigned int tmp;
  617. if (engines == ALL_ENGINES)
  618. return "all";
  619. for_each_engine_masked(engine, i915, engines, tmp)
  620. return engine->name;
  621. return "none";
  622. }
  623. static int __igt_switch_to_kernel_context(struct drm_i915_private *i915,
  624. struct i915_gem_context *ctx,
  625. unsigned int engines)
  626. {
  627. struct intel_engine_cs *engine;
  628. unsigned int tmp;
  629. int err;
  630. GEM_TRACE("Testing %s\n", __engine_name(i915, engines));
  631. for_each_engine_masked(engine, i915, engines, tmp) {
  632. struct i915_request *rq;
  633. rq = i915_request_alloc(engine, ctx);
  634. if (IS_ERR(rq))
  635. return PTR_ERR(rq);
  636. i915_request_add(rq);
  637. }
  638. err = i915_gem_switch_to_kernel_context(i915);
  639. if (err)
  640. return err;
  641. for_each_engine_masked(engine, i915, engines, tmp) {
  642. if (!engine_has_kernel_context_barrier(engine)) {
  643. pr_err("kernel context not last on engine %s!\n",
  644. engine->name);
  645. return -EINVAL;
  646. }
  647. }
  648. err = i915_gem_wait_for_idle(i915,
  649. I915_WAIT_LOCKED,
  650. MAX_SCHEDULE_TIMEOUT);
  651. if (err)
  652. return err;
  653. GEM_BUG_ON(i915->gt.active_requests);
  654. for_each_engine_masked(engine, i915, engines, tmp) {
  655. if (engine->last_retired_context->gem_context != i915->kernel_context) {
  656. pr_err("engine %s not idling in kernel context!\n",
  657. engine->name);
  658. return -EINVAL;
  659. }
  660. }
  661. err = i915_gem_switch_to_kernel_context(i915);
  662. if (err)
  663. return err;
  664. if (i915->gt.active_requests) {
  665. pr_err("switch-to-kernel-context emitted %d requests even though it should already be idling in the kernel context\n",
  666. i915->gt.active_requests);
  667. return -EINVAL;
  668. }
  669. for_each_engine_masked(engine, i915, engines, tmp) {
  670. if (!intel_engine_has_kernel_context(engine)) {
  671. pr_err("kernel context not last on engine %s!\n",
  672. engine->name);
  673. return -EINVAL;
  674. }
  675. }
  676. return 0;
  677. }
  678. static int igt_switch_to_kernel_context(void *arg)
  679. {
  680. struct drm_i915_private *i915 = arg;
  681. struct intel_engine_cs *engine;
  682. struct i915_gem_context *ctx;
  683. enum intel_engine_id id;
  684. int err;
  685. /*
  686. * A core premise of switching to the kernel context is that
  687. * if an engine is already idling in the kernel context, we
  688. * do not emit another request and wake it up. The other being
  689. * that we do indeed end up idling in the kernel context.
  690. */
  691. mutex_lock(&i915->drm.struct_mutex);
  692. intel_runtime_pm_get(i915);
  693. ctx = kernel_context(i915);
  694. if (IS_ERR(ctx)) {
  695. mutex_unlock(&i915->drm.struct_mutex);
  696. return PTR_ERR(ctx);
  697. }
  698. /* First check idling each individual engine */
  699. for_each_engine(engine, i915, id) {
  700. err = __igt_switch_to_kernel_context(i915, ctx, BIT(id));
  701. if (err)
  702. goto out_unlock;
  703. }
  704. /* Now en masse */
  705. err = __igt_switch_to_kernel_context(i915, ctx, ALL_ENGINES);
  706. if (err)
  707. goto out_unlock;
  708. out_unlock:
  709. GEM_TRACE_DUMP_ON(err);
  710. if (igt_flush_test(i915, I915_WAIT_LOCKED))
  711. err = -EIO;
  712. intel_runtime_pm_put(i915);
  713. mutex_unlock(&i915->drm.struct_mutex);
  714. kernel_context_close(ctx);
  715. return err;
  716. }
  717. static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915)
  718. {
  719. struct drm_i915_gem_object *obj;
  720. int err;
  721. err = i915_gem_init_aliasing_ppgtt(i915);
  722. if (err)
  723. return err;
  724. list_for_each_entry(obj, &i915->mm.bound_list, mm.link) {
  725. struct i915_vma *vma;
  726. vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
  727. if (IS_ERR(vma))
  728. continue;
  729. vma->flags &= ~I915_VMA_LOCAL_BIND;
  730. }
  731. return 0;
  732. }
  733. static void fake_aliasing_ppgtt_disable(struct drm_i915_private *i915)
  734. {
  735. i915_gem_fini_aliasing_ppgtt(i915);
  736. }
  737. int i915_gem_context_mock_selftests(void)
  738. {
  739. static const struct i915_subtest tests[] = {
  740. SUBTEST(igt_switch_to_kernel_context),
  741. };
  742. struct drm_i915_private *i915;
  743. int err;
  744. i915 = mock_gem_device();
  745. if (!i915)
  746. return -ENOMEM;
  747. err = i915_subtests(tests, i915);
  748. drm_dev_put(&i915->drm);
  749. return err;
  750. }
  751. int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
  752. {
  753. static const struct i915_subtest tests[] = {
  754. SUBTEST(igt_switch_to_kernel_context),
  755. SUBTEST(live_nop_switch),
  756. SUBTEST(igt_ctx_exec),
  757. SUBTEST(igt_ctx_readonly),
  758. };
  759. bool fake_alias = false;
  760. int err;
  761. if (i915_terminally_wedged(&dev_priv->gpu_error))
  762. return 0;
  763. /* Install a fake aliasing gtt for exercise */
  764. if (USES_PPGTT(dev_priv) && !dev_priv->mm.aliasing_ppgtt) {
  765. mutex_lock(&dev_priv->drm.struct_mutex);
  766. err = fake_aliasing_ppgtt_enable(dev_priv);
  767. mutex_unlock(&dev_priv->drm.struct_mutex);
  768. if (err)
  769. return err;
  770. GEM_BUG_ON(!dev_priv->mm.aliasing_ppgtt);
  771. fake_alias = true;
  772. }
  773. err = i915_subtests(tests, dev_priv);
  774. if (fake_alias) {
  775. mutex_lock(&dev_priv->drm.struct_mutex);
  776. fake_aliasing_ppgtt_disable(dev_priv);
  777. mutex_unlock(&dev_priv->drm.struct_mutex);
  778. }
  779. return err;
  780. }