i915_gem_object.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653
  1. /*
  2. * Copyright © 2016 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. */
  24. #include "../i915_selftest.h"
  25. #include "mock_gem_device.h"
  26. #include "huge_gem_object.h"
  27. static int igt_gem_object(void *arg)
  28. {
  29. struct drm_i915_private *i915 = arg;
  30. struct drm_i915_gem_object *obj;
  31. int err = -ENOMEM;
  32. /* Basic test to ensure we can create an object */
  33. obj = i915_gem_object_create(i915, PAGE_SIZE);
  34. if (IS_ERR(obj)) {
  35. err = PTR_ERR(obj);
  36. pr_err("i915_gem_object_create failed, err=%d\n", err);
  37. goto out;
  38. }
  39. err = 0;
  40. i915_gem_object_put(obj);
  41. out:
  42. return err;
  43. }
  44. static int igt_phys_object(void *arg)
  45. {
  46. struct drm_i915_private *i915 = arg;
  47. struct drm_i915_gem_object *obj;
  48. int err;
  49. /* Create an object and bind it to a contiguous set of physical pages,
  50. * i.e. exercise the i915_gem_object_phys API.
  51. */
  52. obj = i915_gem_object_create(i915, PAGE_SIZE);
  53. if (IS_ERR(obj)) {
  54. err = PTR_ERR(obj);
  55. pr_err("i915_gem_object_create failed, err=%d\n", err);
  56. goto out;
  57. }
  58. mutex_lock(&i915->drm.struct_mutex);
  59. err = i915_gem_object_attach_phys(obj, PAGE_SIZE);
  60. mutex_unlock(&i915->drm.struct_mutex);
  61. if (err) {
  62. pr_err("i915_gem_object_attach_phys failed, err=%d\n", err);
  63. goto out_obj;
  64. }
  65. if (obj->ops != &i915_gem_phys_ops) {
  66. pr_err("i915_gem_object_attach_phys did not create a phys object\n");
  67. err = -EINVAL;
  68. goto out_obj;
  69. }
  70. if (!atomic_read(&obj->mm.pages_pin_count)) {
  71. pr_err("i915_gem_object_attach_phys did not pin its phys pages\n");
  72. err = -EINVAL;
  73. goto out_obj;
  74. }
  75. /* Make the object dirty so that put_pages must do copy back the data */
  76. mutex_lock(&i915->drm.struct_mutex);
  77. err = i915_gem_object_set_to_gtt_domain(obj, true);
  78. mutex_unlock(&i915->drm.struct_mutex);
  79. if (err) {
  80. pr_err("i915_gem_object_set_to_gtt_domain failed with err=%d\n",
  81. err);
  82. goto out_obj;
  83. }
  84. out_obj:
  85. i915_gem_object_put(obj);
  86. out:
  87. return err;
  88. }
  89. static int igt_gem_huge(void *arg)
  90. {
  91. const unsigned int nreal = 509; /* just to be awkward */
  92. struct drm_i915_private *i915 = arg;
  93. struct drm_i915_gem_object *obj;
  94. unsigned int n;
  95. int err;
  96. /* Basic sanitycheck of our huge fake object allocation */
  97. obj = huge_gem_object(i915,
  98. nreal * PAGE_SIZE,
  99. i915->ggtt.vm.total + PAGE_SIZE);
  100. if (IS_ERR(obj))
  101. return PTR_ERR(obj);
  102. err = i915_gem_object_pin_pages(obj);
  103. if (err) {
  104. pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
  105. nreal, obj->base.size / PAGE_SIZE, err);
  106. goto out;
  107. }
  108. for (n = 0; n < obj->base.size / PAGE_SIZE; n++) {
  109. if (i915_gem_object_get_page(obj, n) !=
  110. i915_gem_object_get_page(obj, n % nreal)) {
  111. pr_err("Page lookup mismatch at index %u [%u]\n",
  112. n, n % nreal);
  113. err = -EINVAL;
  114. goto out_unpin;
  115. }
  116. }
  117. out_unpin:
  118. i915_gem_object_unpin_pages(obj);
  119. out:
  120. i915_gem_object_put(obj);
  121. return err;
  122. }
  123. struct tile {
  124. unsigned int width;
  125. unsigned int height;
  126. unsigned int stride;
  127. unsigned int size;
  128. unsigned int tiling;
  129. unsigned int swizzle;
  130. };
  131. static u64 swizzle_bit(unsigned int bit, u64 offset)
  132. {
  133. return (offset & BIT_ULL(bit)) >> (bit - 6);
  134. }
  135. static u64 tiled_offset(const struct tile *tile, u64 v)
  136. {
  137. u64 x, y;
  138. if (tile->tiling == I915_TILING_NONE)
  139. return v;
  140. y = div64_u64_rem(v, tile->stride, &x);
  141. v = div64_u64_rem(y, tile->height, &y) * tile->stride * tile->height;
  142. if (tile->tiling == I915_TILING_X) {
  143. v += y * tile->width;
  144. v += div64_u64_rem(x, tile->width, &x) << tile->size;
  145. v += x;
  146. } else if (tile->width == 128) {
  147. const unsigned int ytile_span = 16;
  148. const unsigned int ytile_height = 512;
  149. v += y * ytile_span;
  150. v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
  151. v += x;
  152. } else {
  153. const unsigned int ytile_span = 32;
  154. const unsigned int ytile_height = 256;
  155. v += y * ytile_span;
  156. v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
  157. v += x;
  158. }
  159. switch (tile->swizzle) {
  160. case I915_BIT_6_SWIZZLE_9:
  161. v ^= swizzle_bit(9, v);
  162. break;
  163. case I915_BIT_6_SWIZZLE_9_10:
  164. v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v);
  165. break;
  166. case I915_BIT_6_SWIZZLE_9_11:
  167. v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v);
  168. break;
  169. case I915_BIT_6_SWIZZLE_9_10_11:
  170. v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v);
  171. break;
  172. }
  173. return v;
  174. }
  175. static int check_partial_mapping(struct drm_i915_gem_object *obj,
  176. const struct tile *tile,
  177. unsigned long end_time)
  178. {
  179. const unsigned int nreal = obj->scratch / PAGE_SIZE;
  180. const unsigned long npages = obj->base.size / PAGE_SIZE;
  181. struct i915_vma *vma;
  182. unsigned long page;
  183. int err;
  184. if (igt_timeout(end_time,
  185. "%s: timed out before tiling=%d stride=%d\n",
  186. __func__, tile->tiling, tile->stride))
  187. return -EINTR;
  188. err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
  189. if (err) {
  190. pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
  191. tile->tiling, tile->stride, err);
  192. return err;
  193. }
  194. GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
  195. GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
  196. for_each_prime_number_from(page, 1, npages) {
  197. struct i915_ggtt_view view =
  198. compute_partial_view(obj, page, MIN_CHUNK_PAGES);
  199. u32 __iomem *io;
  200. struct page *p;
  201. unsigned int n;
  202. u64 offset;
  203. u32 *cpu;
  204. GEM_BUG_ON(view.partial.size > nreal);
  205. err = i915_gem_object_set_to_gtt_domain(obj, true);
  206. if (err) {
  207. pr_err("Failed to flush to GTT write domain; err=%d\n",
  208. err);
  209. return err;
  210. }
  211. vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
  212. if (IS_ERR(vma)) {
  213. pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
  214. page, (int)PTR_ERR(vma));
  215. return PTR_ERR(vma);
  216. }
  217. n = page - view.partial.offset;
  218. GEM_BUG_ON(n >= view.partial.size);
  219. io = i915_vma_pin_iomap(vma);
  220. i915_vma_unpin(vma);
  221. if (IS_ERR(io)) {
  222. pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
  223. page, (int)PTR_ERR(io));
  224. return PTR_ERR(io);
  225. }
  226. iowrite32(page, io + n * PAGE_SIZE/sizeof(*io));
  227. i915_vma_unpin_iomap(vma);
  228. offset = tiled_offset(tile, page << PAGE_SHIFT);
  229. if (offset >= obj->base.size)
  230. continue;
  231. flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
  232. p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
  233. cpu = kmap(p) + offset_in_page(offset);
  234. drm_clflush_virt_range(cpu, sizeof(*cpu));
  235. if (*cpu != (u32)page) {
  236. pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
  237. page, n,
  238. view.partial.offset,
  239. view.partial.size,
  240. vma->size >> PAGE_SHIFT,
  241. tile->tiling ? tile_row_pages(obj) : 0,
  242. vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
  243. offset >> PAGE_SHIFT,
  244. (unsigned int)offset_in_page(offset),
  245. offset,
  246. (u32)page, *cpu);
  247. err = -EINVAL;
  248. }
  249. *cpu = 0;
  250. drm_clflush_virt_range(cpu, sizeof(*cpu));
  251. kunmap(p);
  252. if (err)
  253. return err;
  254. i915_vma_destroy(vma);
  255. }
  256. return 0;
  257. }
  258. static int igt_partial_tiling(void *arg)
  259. {
  260. const unsigned int nreal = 1 << 12; /* largest tile row x2 */
  261. struct drm_i915_private *i915 = arg;
  262. struct drm_i915_gem_object *obj;
  263. int tiling;
  264. int err;
  265. /* We want to check the page mapping and fencing of a large object
  266. * mmapped through the GTT. The object we create is larger than can
  267. * possibly be mmaped as a whole, and so we must use partial GGTT vma.
  268. * We then check that a write through each partial GGTT vma ends up
  269. * in the right set of pages within the object, and with the expected
  270. * tiling, which we verify by manual swizzling.
  271. */
  272. obj = huge_gem_object(i915,
  273. nreal << PAGE_SHIFT,
  274. (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
  275. if (IS_ERR(obj))
  276. return PTR_ERR(obj);
  277. err = i915_gem_object_pin_pages(obj);
  278. if (err) {
  279. pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
  280. nreal, obj->base.size / PAGE_SIZE, err);
  281. goto out;
  282. }
  283. mutex_lock(&i915->drm.struct_mutex);
  284. intel_runtime_pm_get(i915);
  285. if (1) {
  286. IGT_TIMEOUT(end);
  287. struct tile tile;
  288. tile.height = 1;
  289. tile.width = 1;
  290. tile.size = 0;
  291. tile.stride = 0;
  292. tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
  293. tile.tiling = I915_TILING_NONE;
  294. err = check_partial_mapping(obj, &tile, end);
  295. if (err && err != -EINTR)
  296. goto out_unlock;
  297. }
  298. for (tiling = I915_TILING_X; tiling <= I915_TILING_Y; tiling++) {
  299. IGT_TIMEOUT(end);
  300. unsigned int max_pitch;
  301. unsigned int pitch;
  302. struct tile tile;
  303. if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
  304. /*
  305. * The swizzling pattern is actually unknown as it
  306. * varies based on physical address of each page.
  307. * See i915_gem_detect_bit_6_swizzle().
  308. */
  309. break;
  310. tile.tiling = tiling;
  311. switch (tiling) {
  312. case I915_TILING_X:
  313. tile.swizzle = i915->mm.bit_6_swizzle_x;
  314. break;
  315. case I915_TILING_Y:
  316. tile.swizzle = i915->mm.bit_6_swizzle_y;
  317. break;
  318. }
  319. GEM_BUG_ON(tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN);
  320. if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
  321. tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
  322. continue;
  323. if (INTEL_GEN(i915) <= 2) {
  324. tile.height = 16;
  325. tile.width = 128;
  326. tile.size = 11;
  327. } else if (tile.tiling == I915_TILING_Y &&
  328. HAS_128_BYTE_Y_TILING(i915)) {
  329. tile.height = 32;
  330. tile.width = 128;
  331. tile.size = 12;
  332. } else {
  333. tile.height = 8;
  334. tile.width = 512;
  335. tile.size = 12;
  336. }
  337. if (INTEL_GEN(i915) < 4)
  338. max_pitch = 8192 / tile.width;
  339. else if (INTEL_GEN(i915) < 7)
  340. max_pitch = 128 * I965_FENCE_MAX_PITCH_VAL / tile.width;
  341. else
  342. max_pitch = 128 * GEN7_FENCE_MAX_PITCH_VAL / tile.width;
  343. for (pitch = max_pitch; pitch; pitch >>= 1) {
  344. tile.stride = tile.width * pitch;
  345. err = check_partial_mapping(obj, &tile, end);
  346. if (err == -EINTR)
  347. goto next_tiling;
  348. if (err)
  349. goto out_unlock;
  350. if (pitch > 2 && INTEL_GEN(i915) >= 4) {
  351. tile.stride = tile.width * (pitch - 1);
  352. err = check_partial_mapping(obj, &tile, end);
  353. if (err == -EINTR)
  354. goto next_tiling;
  355. if (err)
  356. goto out_unlock;
  357. }
  358. if (pitch < max_pitch && INTEL_GEN(i915) >= 4) {
  359. tile.stride = tile.width * (pitch + 1);
  360. err = check_partial_mapping(obj, &tile, end);
  361. if (err == -EINTR)
  362. goto next_tiling;
  363. if (err)
  364. goto out_unlock;
  365. }
  366. }
  367. if (INTEL_GEN(i915) >= 4) {
  368. for_each_prime_number(pitch, max_pitch) {
  369. tile.stride = tile.width * pitch;
  370. err = check_partial_mapping(obj, &tile, end);
  371. if (err == -EINTR)
  372. goto next_tiling;
  373. if (err)
  374. goto out_unlock;
  375. }
  376. }
  377. next_tiling: ;
  378. }
  379. out_unlock:
  380. intel_runtime_pm_put(i915);
  381. mutex_unlock(&i915->drm.struct_mutex);
  382. i915_gem_object_unpin_pages(obj);
  383. out:
  384. i915_gem_object_put(obj);
  385. return err;
  386. }
  387. static int make_obj_busy(struct drm_i915_gem_object *obj)
  388. {
  389. struct drm_i915_private *i915 = to_i915(obj->base.dev);
  390. struct i915_request *rq;
  391. struct i915_vma *vma;
  392. int err;
  393. vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
  394. if (IS_ERR(vma))
  395. return PTR_ERR(vma);
  396. err = i915_vma_pin(vma, 0, 0, PIN_USER);
  397. if (err)
  398. return err;
  399. rq = i915_request_alloc(i915->engine[RCS], i915->kernel_context);
  400. if (IS_ERR(rq)) {
  401. i915_vma_unpin(vma);
  402. return PTR_ERR(rq);
  403. }
  404. err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
  405. i915_request_add(rq);
  406. __i915_gem_object_release_unless_active(obj);
  407. i915_vma_unpin(vma);
  408. return err;
  409. }
  410. static bool assert_mmap_offset(struct drm_i915_private *i915,
  411. unsigned long size,
  412. int expected)
  413. {
  414. struct drm_i915_gem_object *obj;
  415. int err;
  416. obj = i915_gem_object_create_internal(i915, size);
  417. if (IS_ERR(obj))
  418. return PTR_ERR(obj);
  419. err = i915_gem_object_create_mmap_offset(obj);
  420. i915_gem_object_put(obj);
  421. return err == expected;
  422. }
  423. static void disable_retire_worker(struct drm_i915_private *i915)
  424. {
  425. i915_gem_shrinker_unregister(i915);
  426. mutex_lock(&i915->drm.struct_mutex);
  427. if (!i915->gt.active_requests++) {
  428. intel_runtime_pm_get(i915);
  429. i915_gem_unpark(i915);
  430. intel_runtime_pm_put(i915);
  431. }
  432. mutex_unlock(&i915->drm.struct_mutex);
  433. cancel_delayed_work_sync(&i915->gt.retire_work);
  434. cancel_delayed_work_sync(&i915->gt.idle_work);
  435. }
  436. static int igt_mmap_offset_exhaustion(void *arg)
  437. {
  438. struct drm_i915_private *i915 = arg;
  439. struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
  440. struct drm_i915_gem_object *obj;
  441. struct drm_mm_node resv, *hole;
  442. u64 hole_start, hole_end;
  443. int loop, err;
  444. /* Disable background reaper */
  445. disable_retire_worker(i915);
  446. GEM_BUG_ON(!i915->gt.awake);
  447. /* Trim the device mmap space to only a page */
  448. memset(&resv, 0, sizeof(resv));
  449. drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
  450. resv.start = hole_start;
  451. resv.size = hole_end - hole_start - 1; /* PAGE_SIZE units */
  452. err = drm_mm_reserve_node(mm, &resv);
  453. if (err) {
  454. pr_err("Failed to trim VMA manager, err=%d\n", err);
  455. goto out_park;
  456. }
  457. break;
  458. }
  459. /* Just fits! */
  460. if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) {
  461. pr_err("Unable to insert object into single page hole\n");
  462. err = -EINVAL;
  463. goto out;
  464. }
  465. /* Too large */
  466. if (!assert_mmap_offset(i915, 2*PAGE_SIZE, -ENOSPC)) {
  467. pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
  468. err = -EINVAL;
  469. goto out;
  470. }
  471. /* Fill the hole, further allocation attempts should then fail */
  472. obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
  473. if (IS_ERR(obj)) {
  474. err = PTR_ERR(obj);
  475. goto out;
  476. }
  477. err = i915_gem_object_create_mmap_offset(obj);
  478. if (err) {
  479. pr_err("Unable to insert object into reclaimed hole\n");
  480. goto err_obj;
  481. }
  482. if (!assert_mmap_offset(i915, PAGE_SIZE, -ENOSPC)) {
  483. pr_err("Unexpectedly succeeded in inserting object into no holes!\n");
  484. err = -EINVAL;
  485. goto err_obj;
  486. }
  487. i915_gem_object_put(obj);
  488. /* Now fill with busy dead objects that we expect to reap */
  489. for (loop = 0; loop < 3; loop++) {
  490. if (i915_terminally_wedged(&i915->gpu_error))
  491. break;
  492. obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
  493. if (IS_ERR(obj)) {
  494. err = PTR_ERR(obj);
  495. goto out;
  496. }
  497. mutex_lock(&i915->drm.struct_mutex);
  498. intel_runtime_pm_get(i915);
  499. err = make_obj_busy(obj);
  500. intel_runtime_pm_put(i915);
  501. mutex_unlock(&i915->drm.struct_mutex);
  502. if (err) {
  503. pr_err("[loop %d] Failed to busy the object\n", loop);
  504. goto err_obj;
  505. }
  506. /* NB we rely on the _active_ reference to access obj now */
  507. GEM_BUG_ON(!i915_gem_object_is_active(obj));
  508. err = i915_gem_object_create_mmap_offset(obj);
  509. if (err) {
  510. pr_err("[loop %d] i915_gem_object_create_mmap_offset failed with err=%d\n",
  511. loop, err);
  512. goto out;
  513. }
  514. }
  515. out:
  516. drm_mm_remove_node(&resv);
  517. out_park:
  518. mutex_lock(&i915->drm.struct_mutex);
  519. if (--i915->gt.active_requests)
  520. queue_delayed_work(i915->wq, &i915->gt.retire_work, 0);
  521. else
  522. queue_delayed_work(i915->wq, &i915->gt.idle_work, 0);
  523. mutex_unlock(&i915->drm.struct_mutex);
  524. i915_gem_shrinker_register(i915);
  525. return err;
  526. err_obj:
  527. i915_gem_object_put(obj);
  528. goto out;
  529. }
  530. int i915_gem_object_mock_selftests(void)
  531. {
  532. static const struct i915_subtest tests[] = {
  533. SUBTEST(igt_gem_object),
  534. SUBTEST(igt_phys_object),
  535. };
  536. struct drm_i915_private *i915;
  537. int err;
  538. i915 = mock_gem_device();
  539. if (!i915)
  540. return -ENOMEM;
  541. err = i915_subtests(tests, i915);
  542. drm_dev_put(&i915->drm);
  543. return err;
  544. }
  545. int i915_gem_object_live_selftests(struct drm_i915_private *i915)
  546. {
  547. static const struct i915_subtest tests[] = {
  548. SUBTEST(igt_gem_huge),
  549. SUBTEST(igt_partial_tiling),
  550. SUBTEST(igt_mmap_offset_exhaustion),
  551. };
  552. return i915_subtests(tests, i915);
  553. }