i915_gem_gtt.c 40 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675
  1. /*
  2. * Copyright © 2016 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. */
  24. #include <linux/list_sort.h>
  25. #include <linux/prime_numbers.h>
  26. #include "../i915_selftest.h"
  27. #include "i915_random.h"
  28. #include "mock_context.h"
  29. #include "mock_drm.h"
  30. #include "mock_gem_device.h"
  31. static void fake_free_pages(struct drm_i915_gem_object *obj,
  32. struct sg_table *pages)
  33. {
  34. sg_free_table(pages);
  35. kfree(pages);
  36. }
  37. static int fake_get_pages(struct drm_i915_gem_object *obj)
  38. {
  39. #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
  40. #define PFN_BIAS 0x1000
  41. struct sg_table *pages;
  42. struct scatterlist *sg;
  43. unsigned int sg_page_sizes;
  44. typeof(obj->base.size) rem;
  45. pages = kmalloc(sizeof(*pages), GFP);
  46. if (!pages)
  47. return -ENOMEM;
  48. rem = round_up(obj->base.size, BIT(31)) >> 31;
  49. if (sg_alloc_table(pages, rem, GFP)) {
  50. kfree(pages);
  51. return -ENOMEM;
  52. }
  53. sg_page_sizes = 0;
  54. rem = obj->base.size;
  55. for (sg = pages->sgl; sg; sg = sg_next(sg)) {
  56. unsigned long len = min_t(typeof(rem), rem, BIT(31));
  57. GEM_BUG_ON(!len);
  58. sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
  59. sg_dma_address(sg) = page_to_phys(sg_page(sg));
  60. sg_dma_len(sg) = len;
  61. sg_page_sizes |= len;
  62. rem -= len;
  63. }
  64. GEM_BUG_ON(rem);
  65. obj->mm.madv = I915_MADV_DONTNEED;
  66. __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
  67. return 0;
  68. #undef GFP
  69. }
  70. static void fake_put_pages(struct drm_i915_gem_object *obj,
  71. struct sg_table *pages)
  72. {
  73. fake_free_pages(obj, pages);
  74. obj->mm.dirty = false;
  75. obj->mm.madv = I915_MADV_WILLNEED;
  76. }
  77. static const struct drm_i915_gem_object_ops fake_ops = {
  78. .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
  79. .get_pages = fake_get_pages,
  80. .put_pages = fake_put_pages,
  81. };
  82. static struct drm_i915_gem_object *
  83. fake_dma_object(struct drm_i915_private *i915, u64 size)
  84. {
  85. struct drm_i915_gem_object *obj;
  86. GEM_BUG_ON(!size);
  87. GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
  88. if (overflows_type(size, obj->base.size))
  89. return ERR_PTR(-E2BIG);
  90. obj = i915_gem_object_alloc(i915);
  91. if (!obj)
  92. goto err;
  93. drm_gem_private_object_init(&i915->drm, &obj->base, size);
  94. i915_gem_object_init(obj, &fake_ops);
  95. obj->write_domain = I915_GEM_DOMAIN_CPU;
  96. obj->read_domains = I915_GEM_DOMAIN_CPU;
  97. obj->cache_level = I915_CACHE_NONE;
  98. /* Preallocate the "backing storage" */
  99. if (i915_gem_object_pin_pages(obj))
  100. goto err_obj;
  101. i915_gem_object_unpin_pages(obj);
  102. return obj;
  103. err_obj:
  104. i915_gem_object_put(obj);
  105. err:
  106. return ERR_PTR(-ENOMEM);
  107. }
  108. static int igt_ppgtt_alloc(void *arg)
  109. {
  110. struct drm_i915_private *dev_priv = arg;
  111. struct i915_hw_ppgtt *ppgtt;
  112. u64 size, last;
  113. int err;
  114. /* Allocate a ppggt and try to fill the entire range */
  115. if (!USES_PPGTT(dev_priv))
  116. return 0;
  117. ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
  118. if (!ppgtt)
  119. return -ENOMEM;
  120. mutex_lock(&dev_priv->drm.struct_mutex);
  121. err = __hw_ppgtt_init(ppgtt, dev_priv);
  122. if (err)
  123. goto err_ppgtt;
  124. if (!ppgtt->base.allocate_va_range)
  125. goto err_ppgtt_cleanup;
  126. /* Check we can allocate the entire range */
  127. for (size = 4096;
  128. size <= ppgtt->base.total;
  129. size <<= 2) {
  130. err = ppgtt->base.allocate_va_range(&ppgtt->base, 0, size);
  131. if (err) {
  132. if (err == -ENOMEM) {
  133. pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n",
  134. size, ilog2(size));
  135. err = 0; /* virtual space too large! */
  136. }
  137. goto err_ppgtt_cleanup;
  138. }
  139. ppgtt->base.clear_range(&ppgtt->base, 0, size);
  140. }
  141. /* Check we can incrementally allocate the entire range */
  142. for (last = 0, size = 4096;
  143. size <= ppgtt->base.total;
  144. last = size, size <<= 2) {
  145. err = ppgtt->base.allocate_va_range(&ppgtt->base,
  146. last, size - last);
  147. if (err) {
  148. if (err == -ENOMEM) {
  149. pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n",
  150. last, size - last, ilog2(size));
  151. err = 0; /* virtual space too large! */
  152. }
  153. goto err_ppgtt_cleanup;
  154. }
  155. }
  156. err_ppgtt_cleanup:
  157. ppgtt->base.cleanup(&ppgtt->base);
  158. err_ppgtt:
  159. mutex_unlock(&dev_priv->drm.struct_mutex);
  160. kfree(ppgtt);
  161. return err;
  162. }
  163. static int lowlevel_hole(struct drm_i915_private *i915,
  164. struct i915_address_space *vm,
  165. u64 hole_start, u64 hole_end,
  166. unsigned long end_time)
  167. {
  168. I915_RND_STATE(seed_prng);
  169. unsigned int size;
  170. struct i915_vma mock_vma;
  171. memset(&mock_vma, 0, sizeof(struct i915_vma));
  172. /* Keep creating larger objects until one cannot fit into the hole */
  173. for (size = 12; (hole_end - hole_start) >> size; size++) {
  174. I915_RND_SUBSTATE(prng, seed_prng);
  175. struct drm_i915_gem_object *obj;
  176. unsigned int *order, count, n;
  177. u64 hole_size;
  178. hole_size = (hole_end - hole_start) >> size;
  179. if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
  180. hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
  181. count = hole_size >> 1;
  182. if (!count) {
  183. pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
  184. __func__, hole_start, hole_end, size, hole_size);
  185. break;
  186. }
  187. do {
  188. order = i915_random_order(count, &prng);
  189. if (order)
  190. break;
  191. } while (count >>= 1);
  192. if (!count)
  193. return -ENOMEM;
  194. GEM_BUG_ON(!order);
  195. GEM_BUG_ON(count * BIT_ULL(size) > vm->total);
  196. GEM_BUG_ON(hole_start + count * BIT_ULL(size) > hole_end);
  197. /* Ignore allocation failures (i.e. don't report them as
  198. * a test failure) as we are purposefully allocating very
  199. * large objects without checking that we have sufficient
  200. * memory. We expect to hit -ENOMEM.
  201. */
  202. obj = fake_dma_object(i915, BIT_ULL(size));
  203. if (IS_ERR(obj)) {
  204. kfree(order);
  205. break;
  206. }
  207. GEM_BUG_ON(obj->base.size != BIT_ULL(size));
  208. if (i915_gem_object_pin_pages(obj)) {
  209. i915_gem_object_put(obj);
  210. kfree(order);
  211. break;
  212. }
  213. for (n = 0; n < count; n++) {
  214. u64 addr = hole_start + order[n] * BIT_ULL(size);
  215. GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
  216. if (igt_timeout(end_time,
  217. "%s timed out before %d/%d\n",
  218. __func__, n, count)) {
  219. hole_end = hole_start; /* quit */
  220. break;
  221. }
  222. if (vm->allocate_va_range &&
  223. vm->allocate_va_range(vm, addr, BIT_ULL(size)))
  224. break;
  225. mock_vma.pages = obj->mm.pages;
  226. mock_vma.node.size = BIT_ULL(size);
  227. mock_vma.node.start = addr;
  228. intel_runtime_pm_get(i915);
  229. vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0);
  230. intel_runtime_pm_put(i915);
  231. }
  232. count = n;
  233. i915_random_reorder(order, count, &prng);
  234. for (n = 0; n < count; n++) {
  235. u64 addr = hole_start + order[n] * BIT_ULL(size);
  236. GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
  237. vm->clear_range(vm, addr, BIT_ULL(size));
  238. }
  239. i915_gem_object_unpin_pages(obj);
  240. i915_gem_object_put(obj);
  241. kfree(order);
  242. }
  243. return 0;
  244. }
  245. static void close_object_list(struct list_head *objects,
  246. struct i915_address_space *vm)
  247. {
  248. struct drm_i915_gem_object *obj, *on;
  249. int ignored;
  250. list_for_each_entry_safe(obj, on, objects, st_link) {
  251. struct i915_vma *vma;
  252. vma = i915_vma_instance(obj, vm, NULL);
  253. if (!IS_ERR(vma))
  254. ignored = i915_vma_unbind(vma);
  255. /* Only ppgtt vma may be closed before the object is freed */
  256. if (!IS_ERR(vma) && !i915_vma_is_ggtt(vma))
  257. i915_vma_close(vma);
  258. list_del(&obj->st_link);
  259. i915_gem_object_put(obj);
  260. }
  261. }
  262. static int fill_hole(struct drm_i915_private *i915,
  263. struct i915_address_space *vm,
  264. u64 hole_start, u64 hole_end,
  265. unsigned long end_time)
  266. {
  267. const u64 hole_size = hole_end - hole_start;
  268. struct drm_i915_gem_object *obj;
  269. const unsigned long max_pages =
  270. min_t(u64, ULONG_MAX - 1, hole_size/2 >> PAGE_SHIFT);
  271. const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
  272. unsigned long npages, prime, flags;
  273. struct i915_vma *vma;
  274. LIST_HEAD(objects);
  275. int err;
  276. /* Try binding many VMA working inwards from either edge */
  277. flags = PIN_OFFSET_FIXED | PIN_USER;
  278. if (i915_is_ggtt(vm))
  279. flags |= PIN_GLOBAL;
  280. for_each_prime_number_from(prime, 2, max_step) {
  281. for (npages = 1; npages <= max_pages; npages *= prime) {
  282. const u64 full_size = npages << PAGE_SHIFT;
  283. const struct {
  284. const char *name;
  285. u64 offset;
  286. int step;
  287. } phases[] = {
  288. { "top-down", hole_end, -1, },
  289. { "bottom-up", hole_start, 1, },
  290. { }
  291. }, *p;
  292. obj = fake_dma_object(i915, full_size);
  293. if (IS_ERR(obj))
  294. break;
  295. list_add(&obj->st_link, &objects);
  296. /* Align differing sized objects against the edges, and
  297. * check we don't walk off into the void when binding
  298. * them into the GTT.
  299. */
  300. for (p = phases; p->name; p++) {
  301. u64 offset;
  302. offset = p->offset;
  303. list_for_each_entry(obj, &objects, st_link) {
  304. vma = i915_vma_instance(obj, vm, NULL);
  305. if (IS_ERR(vma))
  306. continue;
  307. if (p->step < 0) {
  308. if (offset < hole_start + obj->base.size)
  309. break;
  310. offset -= obj->base.size;
  311. }
  312. err = i915_vma_pin(vma, 0, 0, offset | flags);
  313. if (err) {
  314. pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
  315. __func__, p->name, err, npages, prime, offset);
  316. goto err;
  317. }
  318. if (!drm_mm_node_allocated(&vma->node) ||
  319. i915_vma_misplaced(vma, 0, 0, offset | flags)) {
  320. pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
  321. __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
  322. offset);
  323. err = -EINVAL;
  324. goto err;
  325. }
  326. i915_vma_unpin(vma);
  327. if (p->step > 0) {
  328. if (offset + obj->base.size > hole_end)
  329. break;
  330. offset += obj->base.size;
  331. }
  332. }
  333. offset = p->offset;
  334. list_for_each_entry(obj, &objects, st_link) {
  335. vma = i915_vma_instance(obj, vm, NULL);
  336. if (IS_ERR(vma))
  337. continue;
  338. if (p->step < 0) {
  339. if (offset < hole_start + obj->base.size)
  340. break;
  341. offset -= obj->base.size;
  342. }
  343. if (!drm_mm_node_allocated(&vma->node) ||
  344. i915_vma_misplaced(vma, 0, 0, offset | flags)) {
  345. pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
  346. __func__, p->name, vma->node.start, vma->node.size,
  347. offset);
  348. err = -EINVAL;
  349. goto err;
  350. }
  351. err = i915_vma_unbind(vma);
  352. if (err) {
  353. pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
  354. __func__, p->name, vma->node.start, vma->node.size,
  355. err);
  356. goto err;
  357. }
  358. if (p->step > 0) {
  359. if (offset + obj->base.size > hole_end)
  360. break;
  361. offset += obj->base.size;
  362. }
  363. }
  364. offset = p->offset;
  365. list_for_each_entry_reverse(obj, &objects, st_link) {
  366. vma = i915_vma_instance(obj, vm, NULL);
  367. if (IS_ERR(vma))
  368. continue;
  369. if (p->step < 0) {
  370. if (offset < hole_start + obj->base.size)
  371. break;
  372. offset -= obj->base.size;
  373. }
  374. err = i915_vma_pin(vma, 0, 0, offset | flags);
  375. if (err) {
  376. pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
  377. __func__, p->name, err, npages, prime, offset);
  378. goto err;
  379. }
  380. if (!drm_mm_node_allocated(&vma->node) ||
  381. i915_vma_misplaced(vma, 0, 0, offset | flags)) {
  382. pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
  383. __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
  384. offset);
  385. err = -EINVAL;
  386. goto err;
  387. }
  388. i915_vma_unpin(vma);
  389. if (p->step > 0) {
  390. if (offset + obj->base.size > hole_end)
  391. break;
  392. offset += obj->base.size;
  393. }
  394. }
  395. offset = p->offset;
  396. list_for_each_entry_reverse(obj, &objects, st_link) {
  397. vma = i915_vma_instance(obj, vm, NULL);
  398. if (IS_ERR(vma))
  399. continue;
  400. if (p->step < 0) {
  401. if (offset < hole_start + obj->base.size)
  402. break;
  403. offset -= obj->base.size;
  404. }
  405. if (!drm_mm_node_allocated(&vma->node) ||
  406. i915_vma_misplaced(vma, 0, 0, offset | flags)) {
  407. pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
  408. __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
  409. offset);
  410. err = -EINVAL;
  411. goto err;
  412. }
  413. err = i915_vma_unbind(vma);
  414. if (err) {
  415. pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
  416. __func__, p->name, vma->node.start, vma->node.size,
  417. err);
  418. goto err;
  419. }
  420. if (p->step > 0) {
  421. if (offset + obj->base.size > hole_end)
  422. break;
  423. offset += obj->base.size;
  424. }
  425. }
  426. }
  427. if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n",
  428. __func__, npages, prime)) {
  429. err = -EINTR;
  430. goto err;
  431. }
  432. }
  433. close_object_list(&objects, vm);
  434. }
  435. return 0;
  436. err:
  437. close_object_list(&objects, vm);
  438. return err;
  439. }
  440. static int walk_hole(struct drm_i915_private *i915,
  441. struct i915_address_space *vm,
  442. u64 hole_start, u64 hole_end,
  443. unsigned long end_time)
  444. {
  445. const u64 hole_size = hole_end - hole_start;
  446. const unsigned long max_pages =
  447. min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
  448. unsigned long flags;
  449. u64 size;
  450. /* Try binding a single VMA in different positions within the hole */
  451. flags = PIN_OFFSET_FIXED | PIN_USER;
  452. if (i915_is_ggtt(vm))
  453. flags |= PIN_GLOBAL;
  454. for_each_prime_number_from(size, 1, max_pages) {
  455. struct drm_i915_gem_object *obj;
  456. struct i915_vma *vma;
  457. u64 addr;
  458. int err = 0;
  459. obj = fake_dma_object(i915, size << PAGE_SHIFT);
  460. if (IS_ERR(obj))
  461. break;
  462. vma = i915_vma_instance(obj, vm, NULL);
  463. if (IS_ERR(vma)) {
  464. err = PTR_ERR(vma);
  465. goto err_put;
  466. }
  467. for (addr = hole_start;
  468. addr + obj->base.size < hole_end;
  469. addr += obj->base.size) {
  470. err = i915_vma_pin(vma, 0, 0, addr | flags);
  471. if (err) {
  472. pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
  473. __func__, addr, vma->size,
  474. hole_start, hole_end, err);
  475. goto err_close;
  476. }
  477. i915_vma_unpin(vma);
  478. if (!drm_mm_node_allocated(&vma->node) ||
  479. i915_vma_misplaced(vma, 0, 0, addr | flags)) {
  480. pr_err("%s incorrect at %llx + %llx\n",
  481. __func__, addr, vma->size);
  482. err = -EINVAL;
  483. goto err_close;
  484. }
  485. err = i915_vma_unbind(vma);
  486. if (err) {
  487. pr_err("%s unbind failed at %llx + %llx with err=%d\n",
  488. __func__, addr, vma->size, err);
  489. goto err_close;
  490. }
  491. GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
  492. if (igt_timeout(end_time,
  493. "%s timed out at %llx\n",
  494. __func__, addr)) {
  495. err = -EINTR;
  496. goto err_close;
  497. }
  498. }
  499. err_close:
  500. if (!i915_vma_is_ggtt(vma))
  501. i915_vma_close(vma);
  502. err_put:
  503. i915_gem_object_put(obj);
  504. if (err)
  505. return err;
  506. }
  507. return 0;
  508. }
  509. static int pot_hole(struct drm_i915_private *i915,
  510. struct i915_address_space *vm,
  511. u64 hole_start, u64 hole_end,
  512. unsigned long end_time)
  513. {
  514. struct drm_i915_gem_object *obj;
  515. struct i915_vma *vma;
  516. unsigned long flags;
  517. unsigned int pot;
  518. int err = 0;
  519. flags = PIN_OFFSET_FIXED | PIN_USER;
  520. if (i915_is_ggtt(vm))
  521. flags |= PIN_GLOBAL;
  522. obj = i915_gem_object_create_internal(i915, 2 * I915_GTT_PAGE_SIZE);
  523. if (IS_ERR(obj))
  524. return PTR_ERR(obj);
  525. vma = i915_vma_instance(obj, vm, NULL);
  526. if (IS_ERR(vma)) {
  527. err = PTR_ERR(vma);
  528. goto err_obj;
  529. }
  530. /* Insert a pair of pages across every pot boundary within the hole */
  531. for (pot = fls64(hole_end - 1) - 1;
  532. pot > ilog2(2 * I915_GTT_PAGE_SIZE);
  533. pot--) {
  534. u64 step = BIT_ULL(pot);
  535. u64 addr;
  536. for (addr = round_up(hole_start + I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
  537. addr <= round_down(hole_end - 2*I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
  538. addr += step) {
  539. err = i915_vma_pin(vma, 0, 0, addr | flags);
  540. if (err) {
  541. pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
  542. __func__,
  543. addr,
  544. hole_start, hole_end,
  545. err);
  546. goto err;
  547. }
  548. if (!drm_mm_node_allocated(&vma->node) ||
  549. i915_vma_misplaced(vma, 0, 0, addr | flags)) {
  550. pr_err("%s incorrect at %llx + %llx\n",
  551. __func__, addr, vma->size);
  552. i915_vma_unpin(vma);
  553. err = i915_vma_unbind(vma);
  554. err = -EINVAL;
  555. goto err;
  556. }
  557. i915_vma_unpin(vma);
  558. err = i915_vma_unbind(vma);
  559. GEM_BUG_ON(err);
  560. }
  561. if (igt_timeout(end_time,
  562. "%s timed out after %d/%d\n",
  563. __func__, pot, fls64(hole_end - 1) - 1)) {
  564. err = -EINTR;
  565. goto err;
  566. }
  567. }
  568. err:
  569. if (!i915_vma_is_ggtt(vma))
  570. i915_vma_close(vma);
  571. err_obj:
  572. i915_gem_object_put(obj);
  573. return err;
  574. }
  575. static int drunk_hole(struct drm_i915_private *i915,
  576. struct i915_address_space *vm,
  577. u64 hole_start, u64 hole_end,
  578. unsigned long end_time)
  579. {
  580. I915_RND_STATE(prng);
  581. unsigned int size;
  582. unsigned long flags;
  583. flags = PIN_OFFSET_FIXED | PIN_USER;
  584. if (i915_is_ggtt(vm))
  585. flags |= PIN_GLOBAL;
  586. /* Keep creating larger objects until one cannot fit into the hole */
  587. for (size = 12; (hole_end - hole_start) >> size; size++) {
  588. struct drm_i915_gem_object *obj;
  589. unsigned int *order, count, n;
  590. struct i915_vma *vma;
  591. u64 hole_size;
  592. int err = -ENODEV;
  593. hole_size = (hole_end - hole_start) >> size;
  594. if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
  595. hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
  596. count = hole_size >> 1;
  597. if (!count) {
  598. pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
  599. __func__, hole_start, hole_end, size, hole_size);
  600. break;
  601. }
  602. do {
  603. order = i915_random_order(count, &prng);
  604. if (order)
  605. break;
  606. } while (count >>= 1);
  607. if (!count)
  608. return -ENOMEM;
  609. GEM_BUG_ON(!order);
  610. /* Ignore allocation failures (i.e. don't report them as
  611. * a test failure) as we are purposefully allocating very
  612. * large objects without checking that we have sufficient
  613. * memory. We expect to hit -ENOMEM.
  614. */
  615. obj = fake_dma_object(i915, BIT_ULL(size));
  616. if (IS_ERR(obj)) {
  617. kfree(order);
  618. break;
  619. }
  620. vma = i915_vma_instance(obj, vm, NULL);
  621. if (IS_ERR(vma)) {
  622. err = PTR_ERR(vma);
  623. goto err_obj;
  624. }
  625. GEM_BUG_ON(vma->size != BIT_ULL(size));
  626. for (n = 0; n < count; n++) {
  627. u64 addr = hole_start + order[n] * BIT_ULL(size);
  628. err = i915_vma_pin(vma, 0, 0, addr | flags);
  629. if (err) {
  630. pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
  631. __func__,
  632. addr, BIT_ULL(size),
  633. hole_start, hole_end,
  634. err);
  635. goto err;
  636. }
  637. if (!drm_mm_node_allocated(&vma->node) ||
  638. i915_vma_misplaced(vma, 0, 0, addr | flags)) {
  639. pr_err("%s incorrect at %llx + %llx\n",
  640. __func__, addr, BIT_ULL(size));
  641. i915_vma_unpin(vma);
  642. err = i915_vma_unbind(vma);
  643. err = -EINVAL;
  644. goto err;
  645. }
  646. i915_vma_unpin(vma);
  647. err = i915_vma_unbind(vma);
  648. GEM_BUG_ON(err);
  649. if (igt_timeout(end_time,
  650. "%s timed out after %d/%d\n",
  651. __func__, n, count)) {
  652. err = -EINTR;
  653. goto err;
  654. }
  655. }
  656. err:
  657. if (!i915_vma_is_ggtt(vma))
  658. i915_vma_close(vma);
  659. err_obj:
  660. i915_gem_object_put(obj);
  661. kfree(order);
  662. if (err)
  663. return err;
  664. }
  665. return 0;
  666. }
  667. static int __shrink_hole(struct drm_i915_private *i915,
  668. struct i915_address_space *vm,
  669. u64 hole_start, u64 hole_end,
  670. unsigned long end_time)
  671. {
  672. struct drm_i915_gem_object *obj;
  673. unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
  674. unsigned int order = 12;
  675. LIST_HEAD(objects);
  676. int err = 0;
  677. u64 addr;
  678. /* Keep creating larger objects until one cannot fit into the hole */
  679. for (addr = hole_start; addr < hole_end; ) {
  680. struct i915_vma *vma;
  681. u64 size = BIT_ULL(order++);
  682. size = min(size, hole_end - addr);
  683. obj = fake_dma_object(i915, size);
  684. if (IS_ERR(obj)) {
  685. err = PTR_ERR(obj);
  686. break;
  687. }
  688. list_add(&obj->st_link, &objects);
  689. vma = i915_vma_instance(obj, vm, NULL);
  690. if (IS_ERR(vma)) {
  691. err = PTR_ERR(vma);
  692. break;
  693. }
  694. GEM_BUG_ON(vma->size != size);
  695. err = i915_vma_pin(vma, 0, 0, addr | flags);
  696. if (err) {
  697. pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
  698. __func__, addr, size, hole_start, hole_end, err);
  699. break;
  700. }
  701. if (!drm_mm_node_allocated(&vma->node) ||
  702. i915_vma_misplaced(vma, 0, 0, addr | flags)) {
  703. pr_err("%s incorrect at %llx + %llx\n",
  704. __func__, addr, size);
  705. i915_vma_unpin(vma);
  706. err = i915_vma_unbind(vma);
  707. err = -EINVAL;
  708. break;
  709. }
  710. i915_vma_unpin(vma);
  711. addr += size;
  712. if (igt_timeout(end_time,
  713. "%s timed out at ofset %llx [%llx - %llx]\n",
  714. __func__, addr, hole_start, hole_end)) {
  715. err = -EINTR;
  716. break;
  717. }
  718. }
  719. close_object_list(&objects, vm);
  720. return err;
  721. }
  722. static int shrink_hole(struct drm_i915_private *i915,
  723. struct i915_address_space *vm,
  724. u64 hole_start, u64 hole_end,
  725. unsigned long end_time)
  726. {
  727. unsigned long prime;
  728. int err;
  729. vm->fault_attr.probability = 999;
  730. atomic_set(&vm->fault_attr.times, -1);
  731. for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
  732. vm->fault_attr.interval = prime;
  733. err = __shrink_hole(i915, vm, hole_start, hole_end, end_time);
  734. if (err)
  735. break;
  736. }
  737. memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
  738. return err;
  739. }
  740. static int shrink_boom(struct drm_i915_private *i915,
  741. struct i915_address_space *vm,
  742. u64 hole_start, u64 hole_end,
  743. unsigned long end_time)
  744. {
  745. unsigned int sizes[] = { SZ_2M, SZ_1G };
  746. struct drm_i915_gem_object *purge;
  747. struct drm_i915_gem_object *explode;
  748. int err;
  749. int i;
  750. /*
  751. * Catch the case which shrink_hole seems to miss. The setup here
  752. * requires invoking the shrinker as we do the alloc_pt/alloc_pd, while
  753. * ensuring that all vma assiocated with the respective pd/pdp are
  754. * unpinned at the time.
  755. */
  756. for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
  757. unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
  758. unsigned int size = sizes[i];
  759. struct i915_vma *vma;
  760. purge = fake_dma_object(i915, size);
  761. if (IS_ERR(purge))
  762. return PTR_ERR(purge);
  763. vma = i915_vma_instance(purge, vm, NULL);
  764. if (IS_ERR(vma)) {
  765. err = PTR_ERR(vma);
  766. goto err_purge;
  767. }
  768. err = i915_vma_pin(vma, 0, 0, flags);
  769. if (err)
  770. goto err_purge;
  771. /* Should now be ripe for purging */
  772. i915_vma_unpin(vma);
  773. explode = fake_dma_object(i915, size);
  774. if (IS_ERR(explode)) {
  775. err = PTR_ERR(explode);
  776. goto err_purge;
  777. }
  778. vm->fault_attr.probability = 100;
  779. vm->fault_attr.interval = 1;
  780. atomic_set(&vm->fault_attr.times, -1);
  781. vma = i915_vma_instance(explode, vm, NULL);
  782. if (IS_ERR(vma)) {
  783. err = PTR_ERR(vma);
  784. goto err_explode;
  785. }
  786. err = i915_vma_pin(vma, 0, 0, flags | size);
  787. if (err)
  788. goto err_explode;
  789. i915_vma_unpin(vma);
  790. i915_gem_object_put(purge);
  791. i915_gem_object_put(explode);
  792. memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
  793. }
  794. return 0;
  795. err_explode:
  796. i915_gem_object_put(explode);
  797. err_purge:
  798. i915_gem_object_put(purge);
  799. memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
  800. return err;
  801. }
  802. static int exercise_ppgtt(struct drm_i915_private *dev_priv,
  803. int (*func)(struct drm_i915_private *i915,
  804. struct i915_address_space *vm,
  805. u64 hole_start, u64 hole_end,
  806. unsigned long end_time))
  807. {
  808. struct drm_file *file;
  809. struct i915_hw_ppgtt *ppgtt;
  810. IGT_TIMEOUT(end_time);
  811. int err;
  812. if (!USES_FULL_PPGTT(dev_priv))
  813. return 0;
  814. file = mock_file(dev_priv);
  815. if (IS_ERR(file))
  816. return PTR_ERR(file);
  817. mutex_lock(&dev_priv->drm.struct_mutex);
  818. ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv, "mock");
  819. if (IS_ERR(ppgtt)) {
  820. err = PTR_ERR(ppgtt);
  821. goto out_unlock;
  822. }
  823. GEM_BUG_ON(offset_in_page(ppgtt->base.total));
  824. GEM_BUG_ON(ppgtt->base.closed);
  825. err = func(dev_priv, &ppgtt->base, 0, ppgtt->base.total, end_time);
  826. i915_ppgtt_close(&ppgtt->base);
  827. i915_ppgtt_put(ppgtt);
  828. out_unlock:
  829. mutex_unlock(&dev_priv->drm.struct_mutex);
  830. mock_file_free(dev_priv, file);
  831. return err;
  832. }
  833. static int igt_ppgtt_fill(void *arg)
  834. {
  835. return exercise_ppgtt(arg, fill_hole);
  836. }
  837. static int igt_ppgtt_walk(void *arg)
  838. {
  839. return exercise_ppgtt(arg, walk_hole);
  840. }
  841. static int igt_ppgtt_pot(void *arg)
  842. {
  843. return exercise_ppgtt(arg, pot_hole);
  844. }
  845. static int igt_ppgtt_drunk(void *arg)
  846. {
  847. return exercise_ppgtt(arg, drunk_hole);
  848. }
  849. static int igt_ppgtt_lowlevel(void *arg)
  850. {
  851. return exercise_ppgtt(arg, lowlevel_hole);
  852. }
  853. static int igt_ppgtt_shrink(void *arg)
  854. {
  855. return exercise_ppgtt(arg, shrink_hole);
  856. }
  857. static int igt_ppgtt_shrink_boom(void *arg)
  858. {
  859. return exercise_ppgtt(arg, shrink_boom);
  860. }
  861. static int sort_holes(void *priv, struct list_head *A, struct list_head *B)
  862. {
  863. struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
  864. struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
  865. if (a->start < b->start)
  866. return -1;
  867. else
  868. return 1;
  869. }
  870. static int exercise_ggtt(struct drm_i915_private *i915,
  871. int (*func)(struct drm_i915_private *i915,
  872. struct i915_address_space *vm,
  873. u64 hole_start, u64 hole_end,
  874. unsigned long end_time))
  875. {
  876. struct i915_ggtt *ggtt = &i915->ggtt;
  877. u64 hole_start, hole_end, last = 0;
  878. struct drm_mm_node *node;
  879. IGT_TIMEOUT(end_time);
  880. int err = 0;
  881. mutex_lock(&i915->drm.struct_mutex);
  882. restart:
  883. list_sort(NULL, &ggtt->base.mm.hole_stack, sort_holes);
  884. drm_mm_for_each_hole(node, &ggtt->base.mm, hole_start, hole_end) {
  885. if (hole_start < last)
  886. continue;
  887. if (ggtt->base.mm.color_adjust)
  888. ggtt->base.mm.color_adjust(node, 0,
  889. &hole_start, &hole_end);
  890. if (hole_start >= hole_end)
  891. continue;
  892. err = func(i915, &ggtt->base, hole_start, hole_end, end_time);
  893. if (err)
  894. break;
  895. /* As we have manipulated the drm_mm, the list may be corrupt */
  896. last = hole_end;
  897. goto restart;
  898. }
  899. mutex_unlock(&i915->drm.struct_mutex);
  900. return err;
  901. }
  902. static int igt_ggtt_fill(void *arg)
  903. {
  904. return exercise_ggtt(arg, fill_hole);
  905. }
  906. static int igt_ggtt_walk(void *arg)
  907. {
  908. return exercise_ggtt(arg, walk_hole);
  909. }
  910. static int igt_ggtt_pot(void *arg)
  911. {
  912. return exercise_ggtt(arg, pot_hole);
  913. }
  914. static int igt_ggtt_drunk(void *arg)
  915. {
  916. return exercise_ggtt(arg, drunk_hole);
  917. }
  918. static int igt_ggtt_lowlevel(void *arg)
  919. {
  920. return exercise_ggtt(arg, lowlevel_hole);
  921. }
  922. static int igt_ggtt_page(void *arg)
  923. {
  924. const unsigned int count = PAGE_SIZE/sizeof(u32);
  925. I915_RND_STATE(prng);
  926. struct drm_i915_private *i915 = arg;
  927. struct i915_ggtt *ggtt = &i915->ggtt;
  928. struct drm_i915_gem_object *obj;
  929. struct drm_mm_node tmp;
  930. unsigned int *order, n;
  931. int err;
  932. mutex_lock(&i915->drm.struct_mutex);
  933. obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
  934. if (IS_ERR(obj)) {
  935. err = PTR_ERR(obj);
  936. goto out_unlock;
  937. }
  938. err = i915_gem_object_pin_pages(obj);
  939. if (err)
  940. goto out_free;
  941. memset(&tmp, 0, sizeof(tmp));
  942. err = drm_mm_insert_node_in_range(&ggtt->base.mm, &tmp,
  943. count * PAGE_SIZE, 0,
  944. I915_COLOR_UNEVICTABLE,
  945. 0, ggtt->mappable_end,
  946. DRM_MM_INSERT_LOW);
  947. if (err)
  948. goto out_unpin;
  949. intel_runtime_pm_get(i915);
  950. for (n = 0; n < count; n++) {
  951. u64 offset = tmp.start + n * PAGE_SIZE;
  952. ggtt->base.insert_page(&ggtt->base,
  953. i915_gem_object_get_dma_address(obj, 0),
  954. offset, I915_CACHE_NONE, 0);
  955. }
  956. order = i915_random_order(count, &prng);
  957. if (!order) {
  958. err = -ENOMEM;
  959. goto out_remove;
  960. }
  961. for (n = 0; n < count; n++) {
  962. u64 offset = tmp.start + order[n] * PAGE_SIZE;
  963. u32 __iomem *vaddr;
  964. vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
  965. iowrite32(n, vaddr + n);
  966. io_mapping_unmap_atomic(vaddr);
  967. }
  968. i915_gem_flush_ggtt_writes(i915);
  969. i915_random_reorder(order, count, &prng);
  970. for (n = 0; n < count; n++) {
  971. u64 offset = tmp.start + order[n] * PAGE_SIZE;
  972. u32 __iomem *vaddr;
  973. u32 val;
  974. vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
  975. val = ioread32(vaddr + n);
  976. io_mapping_unmap_atomic(vaddr);
  977. if (val != n) {
  978. pr_err("insert page failed: found %d, expected %d\n",
  979. val, n);
  980. err = -EINVAL;
  981. break;
  982. }
  983. }
  984. kfree(order);
  985. out_remove:
  986. ggtt->base.clear_range(&ggtt->base, tmp.start, tmp.size);
  987. intel_runtime_pm_put(i915);
  988. drm_mm_remove_node(&tmp);
  989. out_unpin:
  990. i915_gem_object_unpin_pages(obj);
  991. out_free:
  992. i915_gem_object_put(obj);
  993. out_unlock:
  994. mutex_unlock(&i915->drm.struct_mutex);
  995. return err;
  996. }
  997. static void track_vma_bind(struct i915_vma *vma)
  998. {
  999. struct drm_i915_gem_object *obj = vma->obj;
  1000. obj->bind_count++; /* track for eviction later */
  1001. __i915_gem_object_pin_pages(obj);
  1002. vma->pages = obj->mm.pages;
  1003. list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
  1004. }
  1005. static int exercise_mock(struct drm_i915_private *i915,
  1006. int (*func)(struct drm_i915_private *i915,
  1007. struct i915_address_space *vm,
  1008. u64 hole_start, u64 hole_end,
  1009. unsigned long end_time))
  1010. {
  1011. struct i915_gem_context *ctx;
  1012. struct i915_hw_ppgtt *ppgtt;
  1013. IGT_TIMEOUT(end_time);
  1014. int err;
  1015. ctx = mock_context(i915, "mock");
  1016. if (!ctx)
  1017. return -ENOMEM;
  1018. ppgtt = ctx->ppgtt;
  1019. GEM_BUG_ON(!ppgtt);
  1020. err = func(i915, &ppgtt->base, 0, ppgtt->base.total, end_time);
  1021. mock_context_close(ctx);
  1022. return err;
  1023. }
  1024. static int igt_mock_fill(void *arg)
  1025. {
  1026. return exercise_mock(arg, fill_hole);
  1027. }
  1028. static int igt_mock_walk(void *arg)
  1029. {
  1030. return exercise_mock(arg, walk_hole);
  1031. }
  1032. static int igt_mock_pot(void *arg)
  1033. {
  1034. return exercise_mock(arg, pot_hole);
  1035. }
  1036. static int igt_mock_drunk(void *arg)
  1037. {
  1038. return exercise_mock(arg, drunk_hole);
  1039. }
  1040. static int igt_gtt_reserve(void *arg)
  1041. {
  1042. struct drm_i915_private *i915 = arg;
  1043. struct drm_i915_gem_object *obj, *on;
  1044. LIST_HEAD(objects);
  1045. u64 total;
  1046. int err = -ENODEV;
  1047. /* i915_gem_gtt_reserve() tries to reserve the precise range
  1048. * for the node, and evicts if it has to. So our test checks that
  1049. * it can give us the requsted space and prevent overlaps.
  1050. */
  1051. /* Start by filling the GGTT */
  1052. for (total = 0;
  1053. total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
  1054. total += 2*I915_GTT_PAGE_SIZE) {
  1055. struct i915_vma *vma;
  1056. obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE);
  1057. if (IS_ERR(obj)) {
  1058. err = PTR_ERR(obj);
  1059. goto out;
  1060. }
  1061. err = i915_gem_object_pin_pages(obj);
  1062. if (err) {
  1063. i915_gem_object_put(obj);
  1064. goto out;
  1065. }
  1066. list_add(&obj->st_link, &objects);
  1067. vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
  1068. if (IS_ERR(vma)) {
  1069. err = PTR_ERR(vma);
  1070. goto out;
  1071. }
  1072. err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
  1073. obj->base.size,
  1074. total,
  1075. obj->cache_level,
  1076. 0);
  1077. if (err) {
  1078. pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
  1079. total, i915->ggtt.base.total, err);
  1080. goto out;
  1081. }
  1082. track_vma_bind(vma);
  1083. GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
  1084. if (vma->node.start != total ||
  1085. vma->node.size != 2*I915_GTT_PAGE_SIZE) {
  1086. pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
  1087. vma->node.start, vma->node.size,
  1088. total, 2*I915_GTT_PAGE_SIZE);
  1089. err = -EINVAL;
  1090. goto out;
  1091. }
  1092. }
  1093. /* Now we start forcing evictions */
  1094. for (total = I915_GTT_PAGE_SIZE;
  1095. total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
  1096. total += 2*I915_GTT_PAGE_SIZE) {
  1097. struct i915_vma *vma;
  1098. obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE);
  1099. if (IS_ERR(obj)) {
  1100. err = PTR_ERR(obj);
  1101. goto out;
  1102. }
  1103. err = i915_gem_object_pin_pages(obj);
  1104. if (err) {
  1105. i915_gem_object_put(obj);
  1106. goto out;
  1107. }
  1108. list_add(&obj->st_link, &objects);
  1109. vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
  1110. if (IS_ERR(vma)) {
  1111. err = PTR_ERR(vma);
  1112. goto out;
  1113. }
  1114. err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
  1115. obj->base.size,
  1116. total,
  1117. obj->cache_level,
  1118. 0);
  1119. if (err) {
  1120. pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
  1121. total, i915->ggtt.base.total, err);
  1122. goto out;
  1123. }
  1124. track_vma_bind(vma);
  1125. GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
  1126. if (vma->node.start != total ||
  1127. vma->node.size != 2*I915_GTT_PAGE_SIZE) {
  1128. pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
  1129. vma->node.start, vma->node.size,
  1130. total, 2*I915_GTT_PAGE_SIZE);
  1131. err = -EINVAL;
  1132. goto out;
  1133. }
  1134. }
  1135. /* And then try at random */
  1136. list_for_each_entry_safe(obj, on, &objects, st_link) {
  1137. struct i915_vma *vma;
  1138. u64 offset;
  1139. vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
  1140. if (IS_ERR(vma)) {
  1141. err = PTR_ERR(vma);
  1142. goto out;
  1143. }
  1144. err = i915_vma_unbind(vma);
  1145. if (err) {
  1146. pr_err("i915_vma_unbind failed with err=%d!\n", err);
  1147. goto out;
  1148. }
  1149. offset = random_offset(0, i915->ggtt.base.total,
  1150. 2*I915_GTT_PAGE_SIZE,
  1151. I915_GTT_MIN_ALIGNMENT);
  1152. err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
  1153. obj->base.size,
  1154. offset,
  1155. obj->cache_level,
  1156. 0);
  1157. if (err) {
  1158. pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
  1159. total, i915->ggtt.base.total, err);
  1160. goto out;
  1161. }
  1162. track_vma_bind(vma);
  1163. GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
  1164. if (vma->node.start != offset ||
  1165. vma->node.size != 2*I915_GTT_PAGE_SIZE) {
  1166. pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
  1167. vma->node.start, vma->node.size,
  1168. offset, 2*I915_GTT_PAGE_SIZE);
  1169. err = -EINVAL;
  1170. goto out;
  1171. }
  1172. }
  1173. out:
  1174. list_for_each_entry_safe(obj, on, &objects, st_link) {
  1175. i915_gem_object_unpin_pages(obj);
  1176. i915_gem_object_put(obj);
  1177. }
  1178. return err;
  1179. }
  1180. static int igt_gtt_insert(void *arg)
  1181. {
  1182. struct drm_i915_private *i915 = arg;
  1183. struct drm_i915_gem_object *obj, *on;
  1184. struct drm_mm_node tmp = {};
  1185. const struct invalid_insert {
  1186. u64 size;
  1187. u64 alignment;
  1188. u64 start, end;
  1189. } invalid_insert[] = {
  1190. {
  1191. i915->ggtt.base.total + I915_GTT_PAGE_SIZE, 0,
  1192. 0, i915->ggtt.base.total,
  1193. },
  1194. {
  1195. 2*I915_GTT_PAGE_SIZE, 0,
  1196. 0, I915_GTT_PAGE_SIZE,
  1197. },
  1198. {
  1199. -(u64)I915_GTT_PAGE_SIZE, 0,
  1200. 0, 4*I915_GTT_PAGE_SIZE,
  1201. },
  1202. {
  1203. -(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
  1204. 0, 4*I915_GTT_PAGE_SIZE,
  1205. },
  1206. {
  1207. I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
  1208. I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1,
  1209. },
  1210. {}
  1211. }, *ii;
  1212. LIST_HEAD(objects);
  1213. u64 total;
  1214. int err = -ENODEV;
  1215. /* i915_gem_gtt_insert() tries to allocate some free space in the GTT
  1216. * to the node, evicting if required.
  1217. */
  1218. /* Check a couple of obviously invalid requests */
  1219. for (ii = invalid_insert; ii->size; ii++) {
  1220. err = i915_gem_gtt_insert(&i915->ggtt.base, &tmp,
  1221. ii->size, ii->alignment,
  1222. I915_COLOR_UNEVICTABLE,
  1223. ii->start, ii->end,
  1224. 0);
  1225. if (err != -ENOSPC) {
  1226. pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
  1227. ii->size, ii->alignment, ii->start, ii->end,
  1228. err);
  1229. return -EINVAL;
  1230. }
  1231. }
  1232. /* Start by filling the GGTT */
  1233. for (total = 0;
  1234. total + I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
  1235. total += I915_GTT_PAGE_SIZE) {
  1236. struct i915_vma *vma;
  1237. obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
  1238. if (IS_ERR(obj)) {
  1239. err = PTR_ERR(obj);
  1240. goto out;
  1241. }
  1242. err = i915_gem_object_pin_pages(obj);
  1243. if (err) {
  1244. i915_gem_object_put(obj);
  1245. goto out;
  1246. }
  1247. list_add(&obj->st_link, &objects);
  1248. vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
  1249. if (IS_ERR(vma)) {
  1250. err = PTR_ERR(vma);
  1251. goto out;
  1252. }
  1253. err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
  1254. obj->base.size, 0, obj->cache_level,
  1255. 0, i915->ggtt.base.total,
  1256. 0);
  1257. if (err == -ENOSPC) {
  1258. /* maxed out the GGTT space */
  1259. i915_gem_object_put(obj);
  1260. break;
  1261. }
  1262. if (err) {
  1263. pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
  1264. total, i915->ggtt.base.total, err);
  1265. goto out;
  1266. }
  1267. track_vma_bind(vma);
  1268. __i915_vma_pin(vma);
  1269. GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
  1270. }
  1271. list_for_each_entry(obj, &objects, st_link) {
  1272. struct i915_vma *vma;
  1273. vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
  1274. if (IS_ERR(vma)) {
  1275. err = PTR_ERR(vma);
  1276. goto out;
  1277. }
  1278. if (!drm_mm_node_allocated(&vma->node)) {
  1279. pr_err("VMA was unexpectedly evicted!\n");
  1280. err = -EINVAL;
  1281. goto out;
  1282. }
  1283. __i915_vma_unpin(vma);
  1284. }
  1285. /* If we then reinsert, we should find the same hole */
  1286. list_for_each_entry_safe(obj, on, &objects, st_link) {
  1287. struct i915_vma *vma;
  1288. u64 offset;
  1289. vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
  1290. if (IS_ERR(vma)) {
  1291. err = PTR_ERR(vma);
  1292. goto out;
  1293. }
  1294. GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
  1295. offset = vma->node.start;
  1296. err = i915_vma_unbind(vma);
  1297. if (err) {
  1298. pr_err("i915_vma_unbind failed with err=%d!\n", err);
  1299. goto out;
  1300. }
  1301. err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
  1302. obj->base.size, 0, obj->cache_level,
  1303. 0, i915->ggtt.base.total,
  1304. 0);
  1305. if (err) {
  1306. pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
  1307. total, i915->ggtt.base.total, err);
  1308. goto out;
  1309. }
  1310. track_vma_bind(vma);
  1311. GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
  1312. if (vma->node.start != offset) {
  1313. pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
  1314. offset, vma->node.start);
  1315. err = -EINVAL;
  1316. goto out;
  1317. }
  1318. }
  1319. /* And then force evictions */
  1320. for (total = 0;
  1321. total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
  1322. total += 2*I915_GTT_PAGE_SIZE) {
  1323. struct i915_vma *vma;
  1324. obj = i915_gem_object_create_internal(i915, 2*I915_GTT_PAGE_SIZE);
  1325. if (IS_ERR(obj)) {
  1326. err = PTR_ERR(obj);
  1327. goto out;
  1328. }
  1329. err = i915_gem_object_pin_pages(obj);
  1330. if (err) {
  1331. i915_gem_object_put(obj);
  1332. goto out;
  1333. }
  1334. list_add(&obj->st_link, &objects);
  1335. vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
  1336. if (IS_ERR(vma)) {
  1337. err = PTR_ERR(vma);
  1338. goto out;
  1339. }
  1340. err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
  1341. obj->base.size, 0, obj->cache_level,
  1342. 0, i915->ggtt.base.total,
  1343. 0);
  1344. if (err) {
  1345. pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
  1346. total, i915->ggtt.base.total, err);
  1347. goto out;
  1348. }
  1349. track_vma_bind(vma);
  1350. GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
  1351. }
  1352. out:
  1353. list_for_each_entry_safe(obj, on, &objects, st_link) {
  1354. i915_gem_object_unpin_pages(obj);
  1355. i915_gem_object_put(obj);
  1356. }
  1357. return err;
  1358. }
  1359. int i915_gem_gtt_mock_selftests(void)
  1360. {
  1361. static const struct i915_subtest tests[] = {
  1362. SUBTEST(igt_mock_drunk),
  1363. SUBTEST(igt_mock_walk),
  1364. SUBTEST(igt_mock_pot),
  1365. SUBTEST(igt_mock_fill),
  1366. SUBTEST(igt_gtt_reserve),
  1367. SUBTEST(igt_gtt_insert),
  1368. };
  1369. struct drm_i915_private *i915;
  1370. int err;
  1371. i915 = mock_gem_device();
  1372. if (!i915)
  1373. return -ENOMEM;
  1374. mutex_lock(&i915->drm.struct_mutex);
  1375. err = i915_subtests(tests, i915);
  1376. mutex_unlock(&i915->drm.struct_mutex);
  1377. drm_dev_unref(&i915->drm);
  1378. return err;
  1379. }
  1380. int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
  1381. {
  1382. static const struct i915_subtest tests[] = {
  1383. SUBTEST(igt_ppgtt_alloc),
  1384. SUBTEST(igt_ppgtt_lowlevel),
  1385. SUBTEST(igt_ppgtt_drunk),
  1386. SUBTEST(igt_ppgtt_walk),
  1387. SUBTEST(igt_ppgtt_pot),
  1388. SUBTEST(igt_ppgtt_fill),
  1389. SUBTEST(igt_ppgtt_shrink),
  1390. SUBTEST(igt_ppgtt_shrink_boom),
  1391. SUBTEST(igt_ggtt_lowlevel),
  1392. SUBTEST(igt_ggtt_drunk),
  1393. SUBTEST(igt_ggtt_walk),
  1394. SUBTEST(igt_ggtt_pot),
  1395. SUBTEST(igt_ggtt_fill),
  1396. SUBTEST(igt_ggtt_page),
  1397. };
  1398. GEM_BUG_ON(offset_in_page(i915->ggtt.base.total));
  1399. return i915_subtests(tests, i915);
  1400. }