i915_vma.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740
  1. /*
  2. * Copyright © 2016 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. */
  24. #include <linux/prime_numbers.h>
  25. #include "../i915_selftest.h"
  26. #include "mock_gem_device.h"
  27. #include "mock_context.h"
  28. static bool assert_vma(struct i915_vma *vma,
  29. struct drm_i915_gem_object *obj,
  30. struct i915_gem_context *ctx)
  31. {
  32. bool ok = true;
  33. if (vma->vm != &ctx->ppgtt->base) {
  34. pr_err("VMA created with wrong VM\n");
  35. ok = false;
  36. }
  37. if (vma->size != obj->base.size) {
  38. pr_err("VMA created with wrong size, found %llu, expected %zu\n",
  39. vma->size, obj->base.size);
  40. ok = false;
  41. }
  42. if (vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) {
  43. pr_err("VMA created with wrong type [%d]\n",
  44. vma->ggtt_view.type);
  45. ok = false;
  46. }
  47. return ok;
  48. }
  49. static struct i915_vma *
  50. checked_vma_instance(struct drm_i915_gem_object *obj,
  51. struct i915_address_space *vm,
  52. struct i915_ggtt_view *view)
  53. {
  54. struct i915_vma *vma;
  55. bool ok = true;
  56. vma = i915_vma_instance(obj, vm, view);
  57. if (IS_ERR(vma))
  58. return vma;
  59. /* Manual checks, will be reinforced by i915_vma_compare! */
  60. if (vma->vm != vm) {
  61. pr_err("VMA's vm [%p] does not match request [%p]\n",
  62. vma->vm, vm);
  63. ok = false;
  64. }
  65. if (i915_is_ggtt(vm) != i915_vma_is_ggtt(vma)) {
  66. pr_err("VMA ggtt status [%d] does not match parent [%d]\n",
  67. i915_vma_is_ggtt(vma), i915_is_ggtt(vm));
  68. ok = false;
  69. }
  70. if (i915_vma_compare(vma, vm, view)) {
  71. pr_err("i915_vma_compare failed with create parameters!\n");
  72. return ERR_PTR(-EINVAL);
  73. }
  74. if (i915_vma_compare(vma, vma->vm,
  75. i915_vma_is_ggtt(vma) ? &vma->ggtt_view : NULL)) {
  76. pr_err("i915_vma_compare failed with itself\n");
  77. return ERR_PTR(-EINVAL);
  78. }
  79. if (!ok) {
  80. pr_err("i915_vma_compare failed to detect the difference!\n");
  81. return ERR_PTR(-EINVAL);
  82. }
  83. return vma;
  84. }
  85. static int create_vmas(struct drm_i915_private *i915,
  86. struct list_head *objects,
  87. struct list_head *contexts)
  88. {
  89. struct drm_i915_gem_object *obj;
  90. struct i915_gem_context *ctx;
  91. int pinned;
  92. list_for_each_entry(obj, objects, st_link) {
  93. for (pinned = 0; pinned <= 1; pinned++) {
  94. list_for_each_entry(ctx, contexts, link) {
  95. struct i915_address_space *vm =
  96. &ctx->ppgtt->base;
  97. struct i915_vma *vma;
  98. int err;
  99. vma = checked_vma_instance(obj, vm, NULL);
  100. if (IS_ERR(vma))
  101. return PTR_ERR(vma);
  102. if (!assert_vma(vma, obj, ctx)) {
  103. pr_err("VMA lookup/create failed\n");
  104. return -EINVAL;
  105. }
  106. if (!pinned) {
  107. err = i915_vma_pin(vma, 0, 0, PIN_USER);
  108. if (err) {
  109. pr_err("Failed to pin VMA\n");
  110. return err;
  111. }
  112. } else {
  113. i915_vma_unpin(vma);
  114. }
  115. }
  116. }
  117. }
  118. return 0;
  119. }
  120. static int igt_vma_create(void *arg)
  121. {
  122. struct drm_i915_private *i915 = arg;
  123. struct drm_i915_gem_object *obj, *on;
  124. struct i915_gem_context *ctx, *cn;
  125. unsigned long num_obj, num_ctx;
  126. unsigned long no, nc;
  127. IGT_TIMEOUT(end_time);
  128. LIST_HEAD(contexts);
  129. LIST_HEAD(objects);
  130. int err = -ENOMEM;
  131. /* Exercise creating many vma amonst many objections, checking the
  132. * vma creation and lookup routines.
  133. */
  134. no = 0;
  135. for_each_prime_number(num_obj, ULONG_MAX - 1) {
  136. for (; no < num_obj; no++) {
  137. obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
  138. if (IS_ERR(obj))
  139. goto out;
  140. list_add(&obj->st_link, &objects);
  141. }
  142. nc = 0;
  143. for_each_prime_number(num_ctx, MAX_CONTEXT_HW_ID) {
  144. for (; nc < num_ctx; nc++) {
  145. ctx = mock_context(i915, "mock");
  146. if (!ctx)
  147. goto out;
  148. list_move(&ctx->link, &contexts);
  149. }
  150. err = create_vmas(i915, &objects, &contexts);
  151. if (err)
  152. goto out;
  153. if (igt_timeout(end_time,
  154. "%s timed out: after %lu objects in %lu contexts\n",
  155. __func__, no, nc))
  156. goto end;
  157. }
  158. list_for_each_entry_safe(ctx, cn, &contexts, link) {
  159. list_del_init(&ctx->link);
  160. mock_context_close(ctx);
  161. }
  162. }
  163. end:
  164. /* Final pass to lookup all created contexts */
  165. err = create_vmas(i915, &objects, &contexts);
  166. out:
  167. list_for_each_entry_safe(ctx, cn, &contexts, link) {
  168. list_del_init(&ctx->link);
  169. mock_context_close(ctx);
  170. }
  171. list_for_each_entry_safe(obj, on, &objects, st_link)
  172. i915_gem_object_put(obj);
  173. return err;
  174. }
  175. struct pin_mode {
  176. u64 size;
  177. u64 flags;
  178. bool (*assert)(const struct i915_vma *,
  179. const struct pin_mode *mode,
  180. int result);
  181. const char *string;
  182. };
  183. static bool assert_pin_valid(const struct i915_vma *vma,
  184. const struct pin_mode *mode,
  185. int result)
  186. {
  187. if (result)
  188. return false;
  189. if (i915_vma_misplaced(vma, mode->size, 0, mode->flags))
  190. return false;
  191. return true;
  192. }
  193. __maybe_unused
  194. static bool assert_pin_enospc(const struct i915_vma *vma,
  195. const struct pin_mode *mode,
  196. int result)
  197. {
  198. return result == -ENOSPC;
  199. }
  200. __maybe_unused
  201. static bool assert_pin_einval(const struct i915_vma *vma,
  202. const struct pin_mode *mode,
  203. int result)
  204. {
  205. return result == -EINVAL;
  206. }
  207. static int igt_vma_pin1(void *arg)
  208. {
  209. struct drm_i915_private *i915 = arg;
  210. const struct pin_mode modes[] = {
  211. #define VALID(sz, fl) { .size = (sz), .flags = (fl), .assert = assert_pin_valid, .string = #sz ", " #fl ", (valid) " }
  212. #define __INVALID(sz, fl, check, eval) { .size = (sz), .flags = (fl), .assert = (check), .string = #sz ", " #fl ", (invalid " #eval ")" }
  213. #define INVALID(sz, fl) __INVALID(sz, fl, assert_pin_einval, EINVAL)
  214. #define NOSPACE(sz, fl) __INVALID(sz, fl, assert_pin_enospc, ENOSPC)
  215. VALID(0, PIN_GLOBAL),
  216. VALID(0, PIN_GLOBAL | PIN_MAPPABLE),
  217. VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | 4096),
  218. VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | 8192),
  219. VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
  220. VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
  221. VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.base.total - 4096)),
  222. VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)),
  223. INVALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | i915->ggtt.mappable_end),
  224. VALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.base.total - 4096)),
  225. INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | i915->ggtt.base.total),
  226. INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | round_down(U64_MAX, PAGE_SIZE)),
  227. VALID(4096, PIN_GLOBAL),
  228. VALID(8192, PIN_GLOBAL),
  229. VALID(i915->ggtt.mappable_end - 4096, PIN_GLOBAL | PIN_MAPPABLE),
  230. VALID(i915->ggtt.mappable_end, PIN_GLOBAL | PIN_MAPPABLE),
  231. NOSPACE(i915->ggtt.mappable_end + 4096, PIN_GLOBAL | PIN_MAPPABLE),
  232. VALID(i915->ggtt.base.total - 4096, PIN_GLOBAL),
  233. VALID(i915->ggtt.base.total, PIN_GLOBAL),
  234. NOSPACE(i915->ggtt.base.total + 4096, PIN_GLOBAL),
  235. NOSPACE(round_down(U64_MAX, PAGE_SIZE), PIN_GLOBAL),
  236. INVALID(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)),
  237. INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.base.total - 4096)),
  238. INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (round_down(U64_MAX, PAGE_SIZE) - 4096)),
  239. VALID(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
  240. #if !IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
  241. /* Misusing BIAS is a programming error (it is not controllable
  242. * from userspace) so when debugging is enabled, it explodes.
  243. * However, the tests are still quite interesting for checking
  244. * variable start, end and size.
  245. */
  246. NOSPACE(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | i915->ggtt.mappable_end),
  247. NOSPACE(0, PIN_GLOBAL | PIN_OFFSET_BIAS | i915->ggtt.base.total),
  248. NOSPACE(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
  249. NOSPACE(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.base.total - 4096)),
  250. #endif
  251. { },
  252. #undef NOSPACE
  253. #undef INVALID
  254. #undef __INVALID
  255. #undef VALID
  256. }, *m;
  257. struct drm_i915_gem_object *obj;
  258. struct i915_vma *vma;
  259. int err = -EINVAL;
  260. /* Exercise all the weird and wonderful i915_vma_pin requests,
  261. * focusing on error handling of boundary conditions.
  262. */
  263. GEM_BUG_ON(!drm_mm_clean(&i915->ggtt.base.mm));
  264. obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
  265. if (IS_ERR(obj))
  266. return PTR_ERR(obj);
  267. vma = checked_vma_instance(obj, &i915->ggtt.base, NULL);
  268. if (IS_ERR(vma))
  269. goto out;
  270. for (m = modes; m->assert; m++) {
  271. err = i915_vma_pin(vma, m->size, 0, m->flags);
  272. if (!m->assert(vma, m, err)) {
  273. pr_err("%s to pin single page into GGTT with mode[%d:%s]: size=%llx flags=%llx, err=%d\n",
  274. m->assert == assert_pin_valid ? "Failed" : "Unexpectedly succeeded",
  275. (int)(m - modes), m->string, m->size, m->flags,
  276. err);
  277. if (!err)
  278. i915_vma_unpin(vma);
  279. err = -EINVAL;
  280. goto out;
  281. }
  282. if (!err) {
  283. i915_vma_unpin(vma);
  284. err = i915_vma_unbind(vma);
  285. if (err) {
  286. pr_err("Failed to unbind single page from GGTT, err=%d\n", err);
  287. goto out;
  288. }
  289. }
  290. }
  291. err = 0;
  292. out:
  293. i915_gem_object_put(obj);
  294. return err;
  295. }
  296. static unsigned long rotated_index(const struct intel_rotation_info *r,
  297. unsigned int n,
  298. unsigned int x,
  299. unsigned int y)
  300. {
  301. return (r->plane[n].stride * (r->plane[n].height - y - 1) +
  302. r->plane[n].offset + x);
  303. }
  304. static struct scatterlist *
  305. assert_rotated(struct drm_i915_gem_object *obj,
  306. const struct intel_rotation_info *r, unsigned int n,
  307. struct scatterlist *sg)
  308. {
  309. unsigned int x, y;
  310. for (x = 0; x < r->plane[n].width; x++) {
  311. for (y = 0; y < r->plane[n].height; y++) {
  312. unsigned long src_idx;
  313. dma_addr_t src;
  314. if (!sg) {
  315. pr_err("Invalid sg table: too short at plane %d, (%d, %d)!\n",
  316. n, x, y);
  317. return ERR_PTR(-EINVAL);
  318. }
  319. src_idx = rotated_index(r, n, x, y);
  320. src = i915_gem_object_get_dma_address(obj, src_idx);
  321. if (sg_dma_len(sg) != PAGE_SIZE) {
  322. pr_err("Invalid sg.length, found %d, expected %lu for rotated page (%d, %d) [src index %lu]\n",
  323. sg_dma_len(sg), PAGE_SIZE,
  324. x, y, src_idx);
  325. return ERR_PTR(-EINVAL);
  326. }
  327. if (sg_dma_address(sg) != src) {
  328. pr_err("Invalid address for rotated page (%d, %d) [src index %lu]\n",
  329. x, y, src_idx);
  330. return ERR_PTR(-EINVAL);
  331. }
  332. sg = sg_next(sg);
  333. }
  334. }
  335. return sg;
  336. }
  337. static unsigned int rotated_size(const struct intel_rotation_plane_info *a,
  338. const struct intel_rotation_plane_info *b)
  339. {
  340. return a->width * a->height + b->width * b->height;
  341. }
  342. static int igt_vma_rotate(void *arg)
  343. {
  344. struct drm_i915_private *i915 = arg;
  345. struct i915_address_space *vm = &i915->ggtt.base;
  346. struct drm_i915_gem_object *obj;
  347. const struct intel_rotation_plane_info planes[] = {
  348. { .width = 1, .height = 1, .stride = 1 },
  349. { .width = 2, .height = 2, .stride = 2 },
  350. { .width = 4, .height = 4, .stride = 4 },
  351. { .width = 8, .height = 8, .stride = 8 },
  352. { .width = 3, .height = 5, .stride = 3 },
  353. { .width = 3, .height = 5, .stride = 4 },
  354. { .width = 3, .height = 5, .stride = 5 },
  355. { .width = 5, .height = 3, .stride = 5 },
  356. { .width = 5, .height = 3, .stride = 7 },
  357. { .width = 5, .height = 3, .stride = 9 },
  358. { .width = 4, .height = 6, .stride = 6 },
  359. { .width = 6, .height = 4, .stride = 6 },
  360. { }
  361. }, *a, *b;
  362. const unsigned int max_pages = 64;
  363. int err = -ENOMEM;
  364. /* Create VMA for many different combinations of planes and check
  365. * that the page layout within the rotated VMA match our expectations.
  366. */
  367. obj = i915_gem_object_create_internal(i915, max_pages * PAGE_SIZE);
  368. if (IS_ERR(obj))
  369. goto out;
  370. for (a = planes; a->width; a++) {
  371. for (b = planes + ARRAY_SIZE(planes); b-- != planes; ) {
  372. struct i915_ggtt_view view;
  373. unsigned int n, max_offset;
  374. max_offset = max(a->stride * a->height,
  375. b->stride * b->height);
  376. GEM_BUG_ON(max_offset > max_pages);
  377. max_offset = max_pages - max_offset;
  378. view.type = I915_GGTT_VIEW_ROTATED;
  379. view.rotated.plane[0] = *a;
  380. view.rotated.plane[1] = *b;
  381. for_each_prime_number_from(view.rotated.plane[0].offset, 0, max_offset) {
  382. for_each_prime_number_from(view.rotated.plane[1].offset, 0, max_offset) {
  383. struct scatterlist *sg;
  384. struct i915_vma *vma;
  385. vma = checked_vma_instance(obj, vm, &view);
  386. if (IS_ERR(vma)) {
  387. err = PTR_ERR(vma);
  388. goto out_object;
  389. }
  390. err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
  391. if (err) {
  392. pr_err("Failed to pin VMA, err=%d\n", err);
  393. goto out_object;
  394. }
  395. if (vma->size != rotated_size(a, b) * PAGE_SIZE) {
  396. pr_err("VMA is wrong size, expected %lu, found %llu\n",
  397. PAGE_SIZE * rotated_size(a, b), vma->size);
  398. err = -EINVAL;
  399. goto out_object;
  400. }
  401. if (vma->pages->nents != rotated_size(a, b)) {
  402. pr_err("sg table is wrong sizeo, expected %u, found %u nents\n",
  403. rotated_size(a, b), vma->pages->nents);
  404. err = -EINVAL;
  405. goto out_object;
  406. }
  407. if (vma->node.size < vma->size) {
  408. pr_err("VMA binding too small, expected %llu, found %llu\n",
  409. vma->size, vma->node.size);
  410. err = -EINVAL;
  411. goto out_object;
  412. }
  413. if (vma->pages == obj->mm.pages) {
  414. pr_err("VMA using unrotated object pages!\n");
  415. err = -EINVAL;
  416. goto out_object;
  417. }
  418. sg = vma->pages->sgl;
  419. for (n = 0; n < ARRAY_SIZE(view.rotated.plane); n++) {
  420. sg = assert_rotated(obj, &view.rotated, n, sg);
  421. if (IS_ERR(sg)) {
  422. pr_err("Inconsistent VMA pages for plane %d: [(%d, %d, %d, %d), (%d, %d, %d, %d)]\n", n,
  423. view.rotated.plane[0].width,
  424. view.rotated.plane[0].height,
  425. view.rotated.plane[0].stride,
  426. view.rotated.plane[0].offset,
  427. view.rotated.plane[1].width,
  428. view.rotated.plane[1].height,
  429. view.rotated.plane[1].stride,
  430. view.rotated.plane[1].offset);
  431. err = -EINVAL;
  432. goto out_object;
  433. }
  434. }
  435. i915_vma_unpin(vma);
  436. }
  437. }
  438. }
  439. }
  440. out_object:
  441. i915_gem_object_put(obj);
  442. out:
  443. return err;
  444. }
  445. static bool assert_partial(struct drm_i915_gem_object *obj,
  446. struct i915_vma *vma,
  447. unsigned long offset,
  448. unsigned long size)
  449. {
  450. struct sgt_iter sgt;
  451. dma_addr_t dma;
  452. for_each_sgt_dma(dma, sgt, vma->pages) {
  453. dma_addr_t src;
  454. if (!size) {
  455. pr_err("Partial scattergather list too long\n");
  456. return false;
  457. }
  458. src = i915_gem_object_get_dma_address(obj, offset);
  459. if (src != dma) {
  460. pr_err("DMA mismatch for partial page offset %lu\n",
  461. offset);
  462. return false;
  463. }
  464. offset++;
  465. size--;
  466. }
  467. return true;
  468. }
  469. static bool assert_pin(struct i915_vma *vma,
  470. struct i915_ggtt_view *view,
  471. u64 size,
  472. const char *name)
  473. {
  474. bool ok = true;
  475. if (vma->size != size) {
  476. pr_err("(%s) VMA is wrong size, expected %llu, found %llu\n",
  477. name, size, vma->size);
  478. ok = false;
  479. }
  480. if (vma->node.size < vma->size) {
  481. pr_err("(%s) VMA binding too small, expected %llu, found %llu\n",
  482. name, vma->size, vma->node.size);
  483. ok = false;
  484. }
  485. if (view && view->type != I915_GGTT_VIEW_NORMAL) {
  486. if (memcmp(&vma->ggtt_view, view, sizeof(*view))) {
  487. pr_err("(%s) VMA mismatch upon creation!\n",
  488. name);
  489. ok = false;
  490. }
  491. if (vma->pages == vma->obj->mm.pages) {
  492. pr_err("(%s) VMA using original object pages!\n",
  493. name);
  494. ok = false;
  495. }
  496. } else {
  497. if (vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) {
  498. pr_err("Not the normal ggtt view! Found %d\n",
  499. vma->ggtt_view.type);
  500. ok = false;
  501. }
  502. if (vma->pages != vma->obj->mm.pages) {
  503. pr_err("VMA not using object pages!\n");
  504. ok = false;
  505. }
  506. }
  507. return ok;
  508. }
  509. static int igt_vma_partial(void *arg)
  510. {
  511. struct drm_i915_private *i915 = arg;
  512. struct i915_address_space *vm = &i915->ggtt.base;
  513. const unsigned int npages = 1021; /* prime! */
  514. struct drm_i915_gem_object *obj;
  515. const struct phase {
  516. const char *name;
  517. } phases[] = {
  518. { "create" },
  519. { "lookup" },
  520. { },
  521. }, *p;
  522. unsigned int sz, offset;
  523. struct i915_vma *vma;
  524. int err = -ENOMEM;
  525. /* Create lots of different VMA for the object and check that
  526. * we are returned the same VMA when we later request the same range.
  527. */
  528. obj = i915_gem_object_create_internal(i915, npages*PAGE_SIZE);
  529. if (IS_ERR(obj))
  530. goto out;
  531. for (p = phases; p->name; p++) { /* exercise both create/lookup */
  532. unsigned int count, nvma;
  533. nvma = 0;
  534. for_each_prime_number_from(sz, 1, npages) {
  535. for_each_prime_number_from(offset, 0, npages - sz) {
  536. struct i915_ggtt_view view;
  537. view.type = I915_GGTT_VIEW_PARTIAL;
  538. view.partial.offset = offset;
  539. view.partial.size = sz;
  540. if (sz == npages)
  541. view.type = I915_GGTT_VIEW_NORMAL;
  542. vma = checked_vma_instance(obj, vm, &view);
  543. if (IS_ERR(vma)) {
  544. err = PTR_ERR(vma);
  545. goto out_object;
  546. }
  547. err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
  548. if (err)
  549. goto out_object;
  550. if (!assert_pin(vma, &view, sz*PAGE_SIZE, p->name)) {
  551. pr_err("(%s) Inconsistent partial pinning for (offset=%d, size=%d)\n",
  552. p->name, offset, sz);
  553. err = -EINVAL;
  554. goto out_object;
  555. }
  556. if (!assert_partial(obj, vma, offset, sz)) {
  557. pr_err("(%s) Inconsistent partial pages for (offset=%d, size=%d)\n",
  558. p->name, offset, sz);
  559. err = -EINVAL;
  560. goto out_object;
  561. }
  562. i915_vma_unpin(vma);
  563. nvma++;
  564. }
  565. }
  566. count = 0;
  567. list_for_each_entry(vma, &obj->vma_list, obj_link)
  568. count++;
  569. if (count != nvma) {
  570. pr_err("(%s) All partial vma were not recorded on the obj->vma_list: found %u, expected %u\n",
  571. p->name, count, nvma);
  572. err = -EINVAL;
  573. goto out_object;
  574. }
  575. /* Check that we did create the whole object mapping */
  576. vma = checked_vma_instance(obj, vm, NULL);
  577. if (IS_ERR(vma)) {
  578. err = PTR_ERR(vma);
  579. goto out_object;
  580. }
  581. err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
  582. if (err)
  583. goto out_object;
  584. if (!assert_pin(vma, NULL, obj->base.size, p->name)) {
  585. pr_err("(%s) inconsistent full pin\n", p->name);
  586. err = -EINVAL;
  587. goto out_object;
  588. }
  589. i915_vma_unpin(vma);
  590. count = 0;
  591. list_for_each_entry(vma, &obj->vma_list, obj_link)
  592. count++;
  593. if (count != nvma) {
  594. pr_err("(%s) allocated an extra full vma!\n", p->name);
  595. err = -EINVAL;
  596. goto out_object;
  597. }
  598. }
  599. out_object:
  600. i915_gem_object_put(obj);
  601. out:
  602. return err;
  603. }
  604. int i915_vma_mock_selftests(void)
  605. {
  606. static const struct i915_subtest tests[] = {
  607. SUBTEST(igt_vma_create),
  608. SUBTEST(igt_vma_pin1),
  609. SUBTEST(igt_vma_rotate),
  610. SUBTEST(igt_vma_partial),
  611. };
  612. struct drm_i915_private *i915;
  613. int err;
  614. i915 = mock_gem_device();
  615. if (!i915)
  616. return -ENOMEM;
  617. mutex_lock(&i915->drm.struct_mutex);
  618. err = i915_subtests(tests, i915);
  619. mutex_unlock(&i915->drm.struct_mutex);
  620. drm_dev_unref(&i915->drm);
  621. return err;
  622. }