i915_gem_context.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476
  1. /*
  2. * Copyright © 2017 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. */
  24. #include "../i915_selftest.h"
  25. #include "mock_drm.h"
  26. #include "huge_gem_object.h"
  27. #define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
  28. static struct i915_vma *
  29. gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
  30. {
  31. struct drm_i915_gem_object *obj;
  32. const int gen = INTEL_GEN(vma->vm->i915);
  33. unsigned long n, size;
  34. u32 *cmd;
  35. int err;
  36. size = (4 * count + 1) * sizeof(u32);
  37. size = round_up(size, PAGE_SIZE);
  38. obj = i915_gem_object_create_internal(vma->vm->i915, size);
  39. if (IS_ERR(obj))
  40. return ERR_CAST(obj);
  41. cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
  42. if (IS_ERR(cmd)) {
  43. err = PTR_ERR(cmd);
  44. goto err;
  45. }
  46. GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size);
  47. offset += vma->node.start;
  48. for (n = 0; n < count; n++) {
  49. if (gen >= 8) {
  50. *cmd++ = MI_STORE_DWORD_IMM_GEN4;
  51. *cmd++ = lower_32_bits(offset);
  52. *cmd++ = upper_32_bits(offset);
  53. *cmd++ = value;
  54. } else if (gen >= 4) {
  55. *cmd++ = MI_STORE_DWORD_IMM_GEN4 |
  56. (gen < 6 ? 1 << 22 : 0);
  57. *cmd++ = 0;
  58. *cmd++ = offset;
  59. *cmd++ = value;
  60. } else {
  61. *cmd++ = MI_STORE_DWORD_IMM | 1 << 22;
  62. *cmd++ = offset;
  63. *cmd++ = value;
  64. }
  65. offset += PAGE_SIZE;
  66. }
  67. *cmd = MI_BATCH_BUFFER_END;
  68. i915_gem_object_unpin_map(obj);
  69. err = i915_gem_object_set_to_gtt_domain(obj, false);
  70. if (err)
  71. goto err;
  72. vma = i915_vma_instance(obj, vma->vm, NULL);
  73. if (IS_ERR(vma)) {
  74. err = PTR_ERR(vma);
  75. goto err;
  76. }
  77. err = i915_vma_pin(vma, 0, 0, PIN_USER);
  78. if (err)
  79. goto err;
  80. return vma;
  81. err:
  82. i915_gem_object_put(obj);
  83. return ERR_PTR(err);
  84. }
  85. static unsigned long real_page_count(struct drm_i915_gem_object *obj)
  86. {
  87. return huge_gem_object_phys_size(obj) >> PAGE_SHIFT;
  88. }
  89. static unsigned long fake_page_count(struct drm_i915_gem_object *obj)
  90. {
  91. return huge_gem_object_dma_size(obj) >> PAGE_SHIFT;
  92. }
  93. static int gpu_fill(struct drm_i915_gem_object *obj,
  94. struct i915_gem_context *ctx,
  95. struct intel_engine_cs *engine,
  96. unsigned int dw)
  97. {
  98. struct drm_i915_private *i915 = to_i915(obj->base.dev);
  99. struct i915_address_space *vm =
  100. ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
  101. struct drm_i915_gem_request *rq;
  102. struct i915_vma *vma;
  103. struct i915_vma *batch;
  104. unsigned int flags;
  105. int err;
  106. GEM_BUG_ON(obj->base.size > vm->total);
  107. GEM_BUG_ON(!intel_engine_can_store_dword(engine));
  108. vma = i915_vma_instance(obj, vm, NULL);
  109. if (IS_ERR(vma))
  110. return PTR_ERR(vma);
  111. err = i915_gem_object_set_to_gtt_domain(obj, false);
  112. if (err)
  113. return err;
  114. err = i915_vma_pin(vma, 0, 0, PIN_HIGH | PIN_USER);
  115. if (err)
  116. return err;
  117. /* Within the GTT the huge objects maps every page onto
  118. * its 1024 real pages (using phys_pfn = dma_pfn % 1024).
  119. * We set the nth dword within the page using the nth
  120. * mapping via the GTT - this should exercise the GTT mapping
  121. * whilst checking that each context provides a unique view
  122. * into the object.
  123. */
  124. batch = gpu_fill_dw(vma,
  125. (dw * real_page_count(obj)) << PAGE_SHIFT |
  126. (dw * sizeof(u32)),
  127. real_page_count(obj),
  128. dw);
  129. if (IS_ERR(batch)) {
  130. err = PTR_ERR(batch);
  131. goto err_vma;
  132. }
  133. rq = i915_gem_request_alloc(engine, ctx);
  134. if (IS_ERR(rq)) {
  135. err = PTR_ERR(rq);
  136. goto err_batch;
  137. }
  138. flags = 0;
  139. if (INTEL_GEN(vm->i915) <= 5)
  140. flags |= I915_DISPATCH_SECURE;
  141. err = engine->emit_bb_start(rq,
  142. batch->node.start, batch->node.size,
  143. flags);
  144. if (err)
  145. goto err_request;
  146. i915_vma_move_to_active(batch, rq, 0);
  147. i915_gem_object_set_active_reference(batch->obj);
  148. i915_vma_unpin(batch);
  149. i915_vma_close(batch);
  150. i915_vma_move_to_active(vma, rq, 0);
  151. i915_vma_unpin(vma);
  152. reservation_object_lock(obj->resv, NULL);
  153. reservation_object_add_excl_fence(obj->resv, &rq->fence);
  154. reservation_object_unlock(obj->resv);
  155. __i915_add_request(rq, true);
  156. return 0;
  157. err_request:
  158. __i915_add_request(rq, false);
  159. err_batch:
  160. i915_vma_unpin(batch);
  161. err_vma:
  162. i915_vma_unpin(vma);
  163. return err;
  164. }
  165. static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
  166. {
  167. const bool has_llc = HAS_LLC(to_i915(obj->base.dev));
  168. unsigned int n, m, need_flush;
  169. int err;
  170. err = i915_gem_obj_prepare_shmem_write(obj, &need_flush);
  171. if (err)
  172. return err;
  173. for (n = 0; n < real_page_count(obj); n++) {
  174. u32 *map;
  175. map = kmap_atomic(i915_gem_object_get_page(obj, n));
  176. for (m = 0; m < DW_PER_PAGE; m++)
  177. map[m] = value;
  178. if (!has_llc)
  179. drm_clflush_virt_range(map, PAGE_SIZE);
  180. kunmap_atomic(map);
  181. }
  182. i915_gem_obj_finish_shmem_access(obj);
  183. obj->base.read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU;
  184. obj->base.write_domain = 0;
  185. return 0;
  186. }
  187. static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max)
  188. {
  189. unsigned int n, m, needs_flush;
  190. int err;
  191. err = i915_gem_obj_prepare_shmem_read(obj, &needs_flush);
  192. if (err)
  193. return err;
  194. for (n = 0; n < real_page_count(obj); n++) {
  195. u32 *map;
  196. map = kmap_atomic(i915_gem_object_get_page(obj, n));
  197. if (needs_flush & CLFLUSH_BEFORE)
  198. drm_clflush_virt_range(map, PAGE_SIZE);
  199. for (m = 0; m < max; m++) {
  200. if (map[m] != m) {
  201. pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
  202. n, m, map[m], m);
  203. err = -EINVAL;
  204. goto out_unmap;
  205. }
  206. }
  207. for (; m < DW_PER_PAGE; m++) {
  208. if (map[m] != 0xdeadbeef) {
  209. pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
  210. n, m, map[m], 0xdeadbeef);
  211. err = -EINVAL;
  212. goto out_unmap;
  213. }
  214. }
  215. out_unmap:
  216. kunmap_atomic(map);
  217. if (err)
  218. break;
  219. }
  220. i915_gem_obj_finish_shmem_access(obj);
  221. return err;
  222. }
  223. static int file_add_object(struct drm_file *file,
  224. struct drm_i915_gem_object *obj)
  225. {
  226. int err;
  227. GEM_BUG_ON(obj->base.handle_count);
  228. /* tie the object to the drm_file for easy reaping */
  229. err = idr_alloc(&file->object_idr, &obj->base, 1, 0, GFP_KERNEL);
  230. if (err < 0)
  231. return err;
  232. i915_gem_object_get(obj);
  233. obj->base.handle_count++;
  234. return 0;
  235. }
  236. static struct drm_i915_gem_object *
  237. create_test_object(struct i915_gem_context *ctx,
  238. struct drm_file *file,
  239. struct list_head *objects)
  240. {
  241. struct drm_i915_gem_object *obj;
  242. struct i915_address_space *vm =
  243. ctx->ppgtt ? &ctx->ppgtt->base : &ctx->i915->ggtt.base;
  244. u64 size;
  245. int err;
  246. size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE);
  247. size = round_down(size, DW_PER_PAGE * PAGE_SIZE);
  248. obj = huge_gem_object(ctx->i915, DW_PER_PAGE * PAGE_SIZE, size);
  249. if (IS_ERR(obj))
  250. return obj;
  251. err = file_add_object(file, obj);
  252. i915_gem_object_put(obj);
  253. if (err)
  254. return ERR_PTR(err);
  255. err = cpu_fill(obj, 0xdeadbeef);
  256. if (err) {
  257. pr_err("Failed to fill object with cpu, err=%d\n",
  258. err);
  259. return ERR_PTR(err);
  260. }
  261. list_add_tail(&obj->st_link, objects);
  262. return obj;
  263. }
  264. static unsigned long max_dwords(struct drm_i915_gem_object *obj)
  265. {
  266. unsigned long npages = fake_page_count(obj);
  267. GEM_BUG_ON(!IS_ALIGNED(npages, DW_PER_PAGE));
  268. return npages / DW_PER_PAGE;
  269. }
  270. static int igt_ctx_exec(void *arg)
  271. {
  272. struct drm_i915_private *i915 = arg;
  273. struct drm_i915_gem_object *obj = NULL;
  274. struct drm_file *file;
  275. IGT_TIMEOUT(end_time);
  276. LIST_HEAD(objects);
  277. unsigned long ncontexts, ndwords, dw;
  278. bool first_shared_gtt = true;
  279. int err = -ENODEV;
  280. /* Create a few different contexts (with different mm) and write
  281. * through each ctx/mm using the GPU making sure those writes end
  282. * up in the expected pages of our obj.
  283. */
  284. file = mock_file(i915);
  285. if (IS_ERR(file))
  286. return PTR_ERR(file);
  287. mutex_lock(&i915->drm.struct_mutex);
  288. ncontexts = 0;
  289. ndwords = 0;
  290. dw = 0;
  291. while (!time_after(jiffies, end_time)) {
  292. struct intel_engine_cs *engine;
  293. struct i915_gem_context *ctx;
  294. unsigned int id;
  295. if (first_shared_gtt) {
  296. ctx = __create_hw_context(i915, file->driver_priv);
  297. first_shared_gtt = false;
  298. } else {
  299. ctx = i915_gem_create_context(i915, file->driver_priv);
  300. }
  301. if (IS_ERR(ctx)) {
  302. err = PTR_ERR(ctx);
  303. goto out_unlock;
  304. }
  305. for_each_engine(engine, i915, id) {
  306. if (!intel_engine_can_store_dword(engine))
  307. continue;
  308. if (!obj) {
  309. obj = create_test_object(ctx, file, &objects);
  310. if (IS_ERR(obj)) {
  311. err = PTR_ERR(obj);
  312. goto out_unlock;
  313. }
  314. }
  315. intel_runtime_pm_get(i915);
  316. err = gpu_fill(obj, ctx, engine, dw);
  317. intel_runtime_pm_put(i915);
  318. if (err) {
  319. pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
  320. ndwords, dw, max_dwords(obj),
  321. engine->name, ctx->hw_id,
  322. yesno(!!ctx->ppgtt), err);
  323. goto out_unlock;
  324. }
  325. if (++dw == max_dwords(obj)) {
  326. obj = NULL;
  327. dw = 0;
  328. }
  329. ndwords++;
  330. }
  331. ncontexts++;
  332. }
  333. pr_info("Submitted %lu contexts (across %u engines), filling %lu dwords\n",
  334. ncontexts, INTEL_INFO(i915)->num_rings, ndwords);
  335. dw = 0;
  336. list_for_each_entry(obj, &objects, st_link) {
  337. unsigned int rem =
  338. min_t(unsigned int, ndwords - dw, max_dwords(obj));
  339. err = cpu_check(obj, rem);
  340. if (err)
  341. break;
  342. dw += rem;
  343. }
  344. out_unlock:
  345. mutex_unlock(&i915->drm.struct_mutex);
  346. mock_file_free(i915, file);
  347. return err;
  348. }
  349. static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915)
  350. {
  351. struct drm_i915_gem_object *obj;
  352. int err;
  353. err = i915_gem_init_aliasing_ppgtt(i915);
  354. if (err)
  355. return err;
  356. list_for_each_entry(obj, &i915->mm.bound_list, mm.link) {
  357. struct i915_vma *vma;
  358. vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
  359. if (IS_ERR(vma))
  360. continue;
  361. vma->flags &= ~I915_VMA_LOCAL_BIND;
  362. }
  363. return 0;
  364. }
  365. static void fake_aliasing_ppgtt_disable(struct drm_i915_private *i915)
  366. {
  367. i915_gem_fini_aliasing_ppgtt(i915);
  368. }
  369. int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
  370. {
  371. static const struct i915_subtest tests[] = {
  372. SUBTEST(igt_ctx_exec),
  373. };
  374. bool fake_alias = false;
  375. int err;
  376. /* Install a fake aliasing gtt for exercise */
  377. if (USES_PPGTT(dev_priv) && !dev_priv->mm.aliasing_ppgtt) {
  378. mutex_lock(&dev_priv->drm.struct_mutex);
  379. err = fake_aliasing_ppgtt_enable(dev_priv);
  380. mutex_unlock(&dev_priv->drm.struct_mutex);
  381. if (err)
  382. return err;
  383. GEM_BUG_ON(!dev_priv->mm.aliasing_ppgtt);
  384. fake_alias = true;
  385. }
  386. err = i915_subtests(tests, dev_priv);
  387. if (fake_alias) {
  388. mutex_lock(&dev_priv->drm.struct_mutex);
  389. fake_aliasing_ppgtt_disable(dev_priv);
  390. mutex_unlock(&dev_priv->drm.struct_mutex);
  391. }
  392. return err;
  393. }