i915_gem_gtt.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593
  1. /*
  2. * Copyright © 2016 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. */
  24. #include <linux/list_sort.h>
  25. #include <linux/prime_numbers.h>
  26. #include "../i915_selftest.h"
  27. #include "i915_random.h"
  28. #include "mock_context.h"
  29. #include "mock_drm.h"
  30. #include "mock_gem_device.h"
  31. static void fake_free_pages(struct drm_i915_gem_object *obj,
  32. struct sg_table *pages)
  33. {
  34. sg_free_table(pages);
  35. kfree(pages);
  36. }
  37. static int fake_get_pages(struct drm_i915_gem_object *obj)
  38. {
  39. #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
  40. #define PFN_BIAS 0x1000
  41. struct sg_table *pages;
  42. struct scatterlist *sg;
  43. unsigned int sg_page_sizes;
  44. typeof(obj->base.size) rem;
  45. pages = kmalloc(sizeof(*pages), GFP);
  46. if (!pages)
  47. return -ENOMEM;
  48. rem = round_up(obj->base.size, BIT(31)) >> 31;
  49. if (sg_alloc_table(pages, rem, GFP)) {
  50. kfree(pages);
  51. return -ENOMEM;
  52. }
  53. sg_page_sizes = 0;
  54. rem = obj->base.size;
  55. for (sg = pages->sgl; sg; sg = sg_next(sg)) {
  56. unsigned long len = min_t(typeof(rem), rem, BIT(31));
  57. GEM_BUG_ON(!len);
  58. sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
  59. sg_dma_address(sg) = page_to_phys(sg_page(sg));
  60. sg_dma_len(sg) = len;
  61. sg_page_sizes |= len;
  62. rem -= len;
  63. }
  64. GEM_BUG_ON(rem);
  65. obj->mm.madv = I915_MADV_DONTNEED;
  66. __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
  67. return 0;
  68. #undef GFP
  69. }
  70. static void fake_put_pages(struct drm_i915_gem_object *obj,
  71. struct sg_table *pages)
  72. {
  73. fake_free_pages(obj, pages);
  74. obj->mm.dirty = false;
  75. obj->mm.madv = I915_MADV_WILLNEED;
  76. }
  77. static const struct drm_i915_gem_object_ops fake_ops = {
  78. .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
  79. .get_pages = fake_get_pages,
  80. .put_pages = fake_put_pages,
  81. };
  82. static struct drm_i915_gem_object *
  83. fake_dma_object(struct drm_i915_private *i915, u64 size)
  84. {
  85. struct drm_i915_gem_object *obj;
  86. GEM_BUG_ON(!size);
  87. GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
  88. if (overflows_type(size, obj->base.size))
  89. return ERR_PTR(-E2BIG);
  90. obj = i915_gem_object_alloc(i915);
  91. if (!obj)
  92. goto err;
  93. drm_gem_private_object_init(&i915->drm, &obj->base, size);
  94. i915_gem_object_init(obj, &fake_ops);
  95. obj->base.write_domain = I915_GEM_DOMAIN_CPU;
  96. obj->base.read_domains = I915_GEM_DOMAIN_CPU;
  97. obj->cache_level = I915_CACHE_NONE;
  98. /* Preallocate the "backing storage" */
  99. if (i915_gem_object_pin_pages(obj))
  100. goto err_obj;
  101. i915_gem_object_unpin_pages(obj);
  102. return obj;
  103. err_obj:
  104. i915_gem_object_put(obj);
  105. err:
  106. return ERR_PTR(-ENOMEM);
  107. }
  108. static int igt_ppgtt_alloc(void *arg)
  109. {
  110. struct drm_i915_private *dev_priv = arg;
  111. struct i915_hw_ppgtt *ppgtt;
  112. u64 size, last;
  113. int err;
  114. /* Allocate a ppggt and try to fill the entire range */
  115. if (!USES_PPGTT(dev_priv))
  116. return 0;
  117. ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
  118. if (!ppgtt)
  119. return -ENOMEM;
  120. mutex_lock(&dev_priv->drm.struct_mutex);
  121. err = __hw_ppgtt_init(ppgtt, dev_priv);
  122. if (err)
  123. goto err_ppgtt;
  124. if (!ppgtt->base.allocate_va_range)
  125. goto err_ppgtt_cleanup;
  126. /* Check we can allocate the entire range */
  127. for (size = 4096;
  128. size <= ppgtt->base.total;
  129. size <<= 2) {
  130. err = ppgtt->base.allocate_va_range(&ppgtt->base, 0, size);
  131. if (err) {
  132. if (err == -ENOMEM) {
  133. pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n",
  134. size, ilog2(size));
  135. err = 0; /* virtual space too large! */
  136. }
  137. goto err_ppgtt_cleanup;
  138. }
  139. ppgtt->base.clear_range(&ppgtt->base, 0, size);
  140. }
  141. /* Check we can incrementally allocate the entire range */
  142. for (last = 0, size = 4096;
  143. size <= ppgtt->base.total;
  144. last = size, size <<= 2) {
  145. err = ppgtt->base.allocate_va_range(&ppgtt->base,
  146. last, size - last);
  147. if (err) {
  148. if (err == -ENOMEM) {
  149. pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n",
  150. last, size - last, ilog2(size));
  151. err = 0; /* virtual space too large! */
  152. }
  153. goto err_ppgtt_cleanup;
  154. }
  155. }
  156. err_ppgtt_cleanup:
  157. ppgtt->base.cleanup(&ppgtt->base);
  158. err_ppgtt:
  159. mutex_unlock(&dev_priv->drm.struct_mutex);
  160. kfree(ppgtt);
  161. return err;
  162. }
  163. static int lowlevel_hole(struct drm_i915_private *i915,
  164. struct i915_address_space *vm,
  165. u64 hole_start, u64 hole_end,
  166. unsigned long end_time)
  167. {
  168. I915_RND_STATE(seed_prng);
  169. unsigned int size;
  170. struct i915_vma mock_vma;
  171. memset(&mock_vma, 0, sizeof(struct i915_vma));
  172. /* Keep creating larger objects until one cannot fit into the hole */
  173. for (size = 12; (hole_end - hole_start) >> size; size++) {
  174. I915_RND_SUBSTATE(prng, seed_prng);
  175. struct drm_i915_gem_object *obj;
  176. unsigned int *order, count, n;
  177. u64 hole_size;
  178. hole_size = (hole_end - hole_start) >> size;
  179. if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
  180. hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
  181. count = hole_size >> 1;
  182. if (!count) {
  183. pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
  184. __func__, hole_start, hole_end, size, hole_size);
  185. break;
  186. }
  187. do {
  188. order = i915_random_order(count, &prng);
  189. if (order)
  190. break;
  191. } while (count >>= 1);
  192. if (!count)
  193. return -ENOMEM;
  194. GEM_BUG_ON(!order);
  195. GEM_BUG_ON(count * BIT_ULL(size) > vm->total);
  196. GEM_BUG_ON(hole_start + count * BIT_ULL(size) > hole_end);
  197. /* Ignore allocation failures (i.e. don't report them as
  198. * a test failure) as we are purposefully allocating very
  199. * large objects without checking that we have sufficient
  200. * memory. We expect to hit -ENOMEM.
  201. */
  202. obj = fake_dma_object(i915, BIT_ULL(size));
  203. if (IS_ERR(obj)) {
  204. kfree(order);
  205. break;
  206. }
  207. GEM_BUG_ON(obj->base.size != BIT_ULL(size));
  208. if (i915_gem_object_pin_pages(obj)) {
  209. i915_gem_object_put(obj);
  210. kfree(order);
  211. break;
  212. }
  213. for (n = 0; n < count; n++) {
  214. u64 addr = hole_start + order[n] * BIT_ULL(size);
  215. GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
  216. if (igt_timeout(end_time,
  217. "%s timed out before %d/%d\n",
  218. __func__, n, count)) {
  219. hole_end = hole_start; /* quit */
  220. break;
  221. }
  222. if (vm->allocate_va_range &&
  223. vm->allocate_va_range(vm, addr, BIT_ULL(size)))
  224. break;
  225. mock_vma.pages = obj->mm.pages;
  226. mock_vma.node.size = BIT_ULL(size);
  227. mock_vma.node.start = addr;
  228. intel_runtime_pm_get(i915);
  229. vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0);
  230. intel_runtime_pm_put(i915);
  231. }
  232. count = n;
  233. i915_random_reorder(order, count, &prng);
  234. for (n = 0; n < count; n++) {
  235. u64 addr = hole_start + order[n] * BIT_ULL(size);
  236. GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
  237. vm->clear_range(vm, addr, BIT_ULL(size));
  238. }
  239. i915_gem_object_unpin_pages(obj);
  240. i915_gem_object_put(obj);
  241. kfree(order);
  242. }
  243. return 0;
  244. }
  245. static void close_object_list(struct list_head *objects,
  246. struct i915_address_space *vm)
  247. {
  248. struct drm_i915_gem_object *obj, *on;
  249. int ignored;
  250. list_for_each_entry_safe(obj, on, objects, st_link) {
  251. struct i915_vma *vma;
  252. vma = i915_vma_instance(obj, vm, NULL);
  253. if (!IS_ERR(vma))
  254. ignored = i915_vma_unbind(vma);
  255. /* Only ppgtt vma may be closed before the object is freed */
  256. if (!IS_ERR(vma) && !i915_vma_is_ggtt(vma))
  257. i915_vma_close(vma);
  258. list_del(&obj->st_link);
  259. i915_gem_object_put(obj);
  260. }
  261. }
  262. static int fill_hole(struct drm_i915_private *i915,
  263. struct i915_address_space *vm,
  264. u64 hole_start, u64 hole_end,
  265. unsigned long end_time)
  266. {
  267. const u64 hole_size = hole_end - hole_start;
  268. struct drm_i915_gem_object *obj;
  269. const unsigned long max_pages =
  270. min_t(u64, ULONG_MAX - 1, hole_size/2 >> PAGE_SHIFT);
  271. const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
  272. unsigned long npages, prime, flags;
  273. struct i915_vma *vma;
  274. LIST_HEAD(objects);
  275. int err;
  276. /* Try binding many VMA working inwards from either edge */
  277. flags = PIN_OFFSET_FIXED | PIN_USER;
  278. if (i915_is_ggtt(vm))
  279. flags |= PIN_GLOBAL;
  280. for_each_prime_number_from(prime, 2, max_step) {
  281. for (npages = 1; npages <= max_pages; npages *= prime) {
  282. const u64 full_size = npages << PAGE_SHIFT;
  283. const struct {
  284. const char *name;
  285. u64 offset;
  286. int step;
  287. } phases[] = {
  288. { "top-down", hole_end, -1, },
  289. { "bottom-up", hole_start, 1, },
  290. { }
  291. }, *p;
  292. obj = fake_dma_object(i915, full_size);
  293. if (IS_ERR(obj))
  294. break;
  295. list_add(&obj->st_link, &objects);
  296. /* Align differing sized objects against the edges, and
  297. * check we don't walk off into the void when binding
  298. * them into the GTT.
  299. */
  300. for (p = phases; p->name; p++) {
  301. u64 offset;
  302. offset = p->offset;
  303. list_for_each_entry(obj, &objects, st_link) {
  304. vma = i915_vma_instance(obj, vm, NULL);
  305. if (IS_ERR(vma))
  306. continue;
  307. if (p->step < 0) {
  308. if (offset < hole_start + obj->base.size)
  309. break;
  310. offset -= obj->base.size;
  311. }
  312. err = i915_vma_pin(vma, 0, 0, offset | flags);
  313. if (err) {
  314. pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
  315. __func__, p->name, err, npages, prime, offset);
  316. goto err;
  317. }
  318. if (!drm_mm_node_allocated(&vma->node) ||
  319. i915_vma_misplaced(vma, 0, 0, offset | flags)) {
  320. pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
  321. __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
  322. offset);
  323. err = -EINVAL;
  324. goto err;
  325. }
  326. i915_vma_unpin(vma);
  327. if (p->step > 0) {
  328. if (offset + obj->base.size > hole_end)
  329. break;
  330. offset += obj->base.size;
  331. }
  332. }
  333. offset = p->offset;
  334. list_for_each_entry(obj, &objects, st_link) {
  335. vma = i915_vma_instance(obj, vm, NULL);
  336. if (IS_ERR(vma))
  337. continue;
  338. if (p->step < 0) {
  339. if (offset < hole_start + obj->base.size)
  340. break;
  341. offset -= obj->base.size;
  342. }
  343. if (!drm_mm_node_allocated(&vma->node) ||
  344. i915_vma_misplaced(vma, 0, 0, offset | flags)) {
  345. pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
  346. __func__, p->name, vma->node.start, vma->node.size,
  347. offset);
  348. err = -EINVAL;
  349. goto err;
  350. }
  351. err = i915_vma_unbind(vma);
  352. if (err) {
  353. pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
  354. __func__, p->name, vma->node.start, vma->node.size,
  355. err);
  356. goto err;
  357. }
  358. if (p->step > 0) {
  359. if (offset + obj->base.size > hole_end)
  360. break;
  361. offset += obj->base.size;
  362. }
  363. }
  364. offset = p->offset;
  365. list_for_each_entry_reverse(obj, &objects, st_link) {
  366. vma = i915_vma_instance(obj, vm, NULL);
  367. if (IS_ERR(vma))
  368. continue;
  369. if (p->step < 0) {
  370. if (offset < hole_start + obj->base.size)
  371. break;
  372. offset -= obj->base.size;
  373. }
  374. err = i915_vma_pin(vma, 0, 0, offset | flags);
  375. if (err) {
  376. pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
  377. __func__, p->name, err, npages, prime, offset);
  378. goto err;
  379. }
  380. if (!drm_mm_node_allocated(&vma->node) ||
  381. i915_vma_misplaced(vma, 0, 0, offset | flags)) {
  382. pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
  383. __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
  384. offset);
  385. err = -EINVAL;
  386. goto err;
  387. }
  388. i915_vma_unpin(vma);
  389. if (p->step > 0) {
  390. if (offset + obj->base.size > hole_end)
  391. break;
  392. offset += obj->base.size;
  393. }
  394. }
  395. offset = p->offset;
  396. list_for_each_entry_reverse(obj, &objects, st_link) {
  397. vma = i915_vma_instance(obj, vm, NULL);
  398. if (IS_ERR(vma))
  399. continue;
  400. if (p->step < 0) {
  401. if (offset < hole_start + obj->base.size)
  402. break;
  403. offset -= obj->base.size;
  404. }
  405. if (!drm_mm_node_allocated(&vma->node) ||
  406. i915_vma_misplaced(vma, 0, 0, offset | flags)) {
  407. pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
  408. __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
  409. offset);
  410. err = -EINVAL;
  411. goto err;
  412. }
  413. err = i915_vma_unbind(vma);
  414. if (err) {
  415. pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
  416. __func__, p->name, vma->node.start, vma->node.size,
  417. err);
  418. goto err;
  419. }
  420. if (p->step > 0) {
  421. if (offset + obj->base.size > hole_end)
  422. break;
  423. offset += obj->base.size;
  424. }
  425. }
  426. }
  427. if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n",
  428. __func__, npages, prime)) {
  429. err = -EINTR;
  430. goto err;
  431. }
  432. }
  433. close_object_list(&objects, vm);
  434. }
  435. return 0;
  436. err:
  437. close_object_list(&objects, vm);
  438. return err;
  439. }
  440. static int walk_hole(struct drm_i915_private *i915,
  441. struct i915_address_space *vm,
  442. u64 hole_start, u64 hole_end,
  443. unsigned long end_time)
  444. {
  445. const u64 hole_size = hole_end - hole_start;
  446. const unsigned long max_pages =
  447. min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
  448. unsigned long flags;
  449. u64 size;
  450. /* Try binding a single VMA in different positions within the hole */
  451. flags = PIN_OFFSET_FIXED | PIN_USER;
  452. if (i915_is_ggtt(vm))
  453. flags |= PIN_GLOBAL;
  454. for_each_prime_number_from(size, 1, max_pages) {
  455. struct drm_i915_gem_object *obj;
  456. struct i915_vma *vma;
  457. u64 addr;
  458. int err = 0;
  459. obj = fake_dma_object(i915, size << PAGE_SHIFT);
  460. if (IS_ERR(obj))
  461. break;
  462. vma = i915_vma_instance(obj, vm, NULL);
  463. if (IS_ERR(vma)) {
  464. err = PTR_ERR(vma);
  465. goto err_put;
  466. }
  467. for (addr = hole_start;
  468. addr + obj->base.size < hole_end;
  469. addr += obj->base.size) {
  470. err = i915_vma_pin(vma, 0, 0, addr | flags);
  471. if (err) {
  472. pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
  473. __func__, addr, vma->size,
  474. hole_start, hole_end, err);
  475. goto err_close;
  476. }
  477. i915_vma_unpin(vma);
  478. if (!drm_mm_node_allocated(&vma->node) ||
  479. i915_vma_misplaced(vma, 0, 0, addr | flags)) {
  480. pr_err("%s incorrect at %llx + %llx\n",
  481. __func__, addr, vma->size);
  482. err = -EINVAL;
  483. goto err_close;
  484. }
  485. err = i915_vma_unbind(vma);
  486. if (err) {
  487. pr_err("%s unbind failed at %llx + %llx with err=%d\n",
  488. __func__, addr, vma->size, err);
  489. goto err_close;
  490. }
  491. GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
  492. if (igt_timeout(end_time,
  493. "%s timed out at %llx\n",
  494. __func__, addr)) {
  495. err = -EINTR;
  496. goto err_close;
  497. }
  498. }
  499. err_close:
  500. if (!i915_vma_is_ggtt(vma))
  501. i915_vma_close(vma);
  502. err_put:
  503. i915_gem_object_put(obj);
  504. if (err)
  505. return err;
  506. }
  507. return 0;
  508. }
  509. static int pot_hole(struct drm_i915_private *i915,
  510. struct i915_address_space *vm,
  511. u64 hole_start, u64 hole_end,
  512. unsigned long end_time)
  513. {
  514. struct drm_i915_gem_object *obj;
  515. struct i915_vma *vma;
  516. unsigned long flags;
  517. unsigned int pot;
  518. int err = 0;
  519. flags = PIN_OFFSET_FIXED | PIN_USER;
  520. if (i915_is_ggtt(vm))
  521. flags |= PIN_GLOBAL;
  522. obj = i915_gem_object_create_internal(i915, 2 * I915_GTT_PAGE_SIZE);
  523. if (IS_ERR(obj))
  524. return PTR_ERR(obj);
  525. vma = i915_vma_instance(obj, vm, NULL);
  526. if (IS_ERR(vma)) {
  527. err = PTR_ERR(vma);
  528. goto err_obj;
  529. }
  530. /* Insert a pair of pages across every pot boundary within the hole */
  531. for (pot = fls64(hole_end - 1) - 1;
  532. pot > ilog2(2 * I915_GTT_PAGE_SIZE);
  533. pot--) {
  534. u64 step = BIT_ULL(pot);
  535. u64 addr;
  536. for (addr = round_up(hole_start + I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
  537. addr <= round_down(hole_end - 2*I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
  538. addr += step) {
  539. err = i915_vma_pin(vma, 0, 0, addr | flags);
  540. if (err) {
  541. pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
  542. __func__,
  543. addr,
  544. hole_start, hole_end,
  545. err);
  546. goto err;
  547. }
  548. if (!drm_mm_node_allocated(&vma->node) ||
  549. i915_vma_misplaced(vma, 0, 0, addr | flags)) {
  550. pr_err("%s incorrect at %llx + %llx\n",
  551. __func__, addr, vma->size);
  552. i915_vma_unpin(vma);
  553. err = i915_vma_unbind(vma);
  554. err = -EINVAL;
  555. goto err;
  556. }
  557. i915_vma_unpin(vma);
  558. err = i915_vma_unbind(vma);
  559. GEM_BUG_ON(err);
  560. }
  561. if (igt_timeout(end_time,
  562. "%s timed out after %d/%d\n",
  563. __func__, pot, fls64(hole_end - 1) - 1)) {
  564. err = -EINTR;
  565. goto err;
  566. }
  567. }
  568. err:
  569. if (!i915_vma_is_ggtt(vma))
  570. i915_vma_close(vma);
  571. err_obj:
  572. i915_gem_object_put(obj);
  573. return err;
  574. }
  575. static int drunk_hole(struct drm_i915_private *i915,
  576. struct i915_address_space *vm,
  577. u64 hole_start, u64 hole_end,
  578. unsigned long end_time)
  579. {
  580. I915_RND_STATE(prng);
  581. unsigned int size;
  582. unsigned long flags;
  583. flags = PIN_OFFSET_FIXED | PIN_USER;
  584. if (i915_is_ggtt(vm))
  585. flags |= PIN_GLOBAL;
  586. /* Keep creating larger objects until one cannot fit into the hole */
  587. for (size = 12; (hole_end - hole_start) >> size; size++) {
  588. struct drm_i915_gem_object *obj;
  589. unsigned int *order, count, n;
  590. struct i915_vma *vma;
  591. u64 hole_size;
  592. int err = -ENODEV;
  593. hole_size = (hole_end - hole_start) >> size;
  594. if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
  595. hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
  596. count = hole_size >> 1;
  597. if (!count) {
  598. pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
  599. __func__, hole_start, hole_end, size, hole_size);
  600. break;
  601. }
  602. do {
  603. order = i915_random_order(count, &prng);
  604. if (order)
  605. break;
  606. } while (count >>= 1);
  607. if (!count)
  608. return -ENOMEM;
  609. GEM_BUG_ON(!order);
  610. /* Ignore allocation failures (i.e. don't report them as
  611. * a test failure) as we are purposefully allocating very
  612. * large objects without checking that we have sufficient
  613. * memory. We expect to hit -ENOMEM.
  614. */
  615. obj = fake_dma_object(i915, BIT_ULL(size));
  616. if (IS_ERR(obj)) {
  617. kfree(order);
  618. break;
  619. }
  620. vma = i915_vma_instance(obj, vm, NULL);
  621. if (IS_ERR(vma)) {
  622. err = PTR_ERR(vma);
  623. goto err_obj;
  624. }
  625. GEM_BUG_ON(vma->size != BIT_ULL(size));
  626. for (n = 0; n < count; n++) {
  627. u64 addr = hole_start + order[n] * BIT_ULL(size);
  628. err = i915_vma_pin(vma, 0, 0, addr | flags);
  629. if (err) {
  630. pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
  631. __func__,
  632. addr, BIT_ULL(size),
  633. hole_start, hole_end,
  634. err);
  635. goto err;
  636. }
  637. if (!drm_mm_node_allocated(&vma->node) ||
  638. i915_vma_misplaced(vma, 0, 0, addr | flags)) {
  639. pr_err("%s incorrect at %llx + %llx\n",
  640. __func__, addr, BIT_ULL(size));
  641. i915_vma_unpin(vma);
  642. err = i915_vma_unbind(vma);
  643. err = -EINVAL;
  644. goto err;
  645. }
  646. i915_vma_unpin(vma);
  647. err = i915_vma_unbind(vma);
  648. GEM_BUG_ON(err);
  649. if (igt_timeout(end_time,
  650. "%s timed out after %d/%d\n",
  651. __func__, n, count)) {
  652. err = -EINTR;
  653. goto err;
  654. }
  655. }
  656. err:
  657. if (!i915_vma_is_ggtt(vma))
  658. i915_vma_close(vma);
  659. err_obj:
  660. i915_gem_object_put(obj);
  661. kfree(order);
  662. if (err)
  663. return err;
  664. }
  665. return 0;
  666. }
  667. static int __shrink_hole(struct drm_i915_private *i915,
  668. struct i915_address_space *vm,
  669. u64 hole_start, u64 hole_end,
  670. unsigned long end_time)
  671. {
  672. struct drm_i915_gem_object *obj;
  673. unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
  674. unsigned int order = 12;
  675. LIST_HEAD(objects);
  676. int err = 0;
  677. u64 addr;
  678. /* Keep creating larger objects until one cannot fit into the hole */
  679. for (addr = hole_start; addr < hole_end; ) {
  680. struct i915_vma *vma;
  681. u64 size = BIT_ULL(order++);
  682. size = min(size, hole_end - addr);
  683. obj = fake_dma_object(i915, size);
  684. if (IS_ERR(obj)) {
  685. err = PTR_ERR(obj);
  686. break;
  687. }
  688. list_add(&obj->st_link, &objects);
  689. vma = i915_vma_instance(obj, vm, NULL);
  690. if (IS_ERR(vma)) {
  691. err = PTR_ERR(vma);
  692. break;
  693. }
  694. GEM_BUG_ON(vma->size != size);
  695. err = i915_vma_pin(vma, 0, 0, addr | flags);
  696. if (err) {
  697. pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
  698. __func__, addr, size, hole_start, hole_end, err);
  699. break;
  700. }
  701. if (!drm_mm_node_allocated(&vma->node) ||
  702. i915_vma_misplaced(vma, 0, 0, addr | flags)) {
  703. pr_err("%s incorrect at %llx + %llx\n",
  704. __func__, addr, size);
  705. i915_vma_unpin(vma);
  706. err = i915_vma_unbind(vma);
  707. err = -EINVAL;
  708. break;
  709. }
  710. i915_vma_unpin(vma);
  711. addr += size;
  712. if (igt_timeout(end_time,
  713. "%s timed out at ofset %llx [%llx - %llx]\n",
  714. __func__, addr, hole_start, hole_end)) {
  715. err = -EINTR;
  716. break;
  717. }
  718. }
  719. close_object_list(&objects, vm);
  720. return err;
  721. }
  722. static int shrink_hole(struct drm_i915_private *i915,
  723. struct i915_address_space *vm,
  724. u64 hole_start, u64 hole_end,
  725. unsigned long end_time)
  726. {
  727. unsigned long prime;
  728. int err;
  729. vm->fault_attr.probability = 999;
  730. atomic_set(&vm->fault_attr.times, -1);
  731. for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
  732. vm->fault_attr.interval = prime;
  733. err = __shrink_hole(i915, vm, hole_start, hole_end, end_time);
  734. if (err)
  735. break;
  736. }
  737. memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
  738. return err;
  739. }
  740. static int exercise_ppgtt(struct drm_i915_private *dev_priv,
  741. int (*func)(struct drm_i915_private *i915,
  742. struct i915_address_space *vm,
  743. u64 hole_start, u64 hole_end,
  744. unsigned long end_time))
  745. {
  746. struct drm_file *file;
  747. struct i915_hw_ppgtt *ppgtt;
  748. IGT_TIMEOUT(end_time);
  749. int err;
  750. if (!USES_FULL_PPGTT(dev_priv))
  751. return 0;
  752. file = mock_file(dev_priv);
  753. if (IS_ERR(file))
  754. return PTR_ERR(file);
  755. mutex_lock(&dev_priv->drm.struct_mutex);
  756. ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv, "mock");
  757. if (IS_ERR(ppgtt)) {
  758. err = PTR_ERR(ppgtt);
  759. goto out_unlock;
  760. }
  761. GEM_BUG_ON(offset_in_page(ppgtt->base.total));
  762. GEM_BUG_ON(ppgtt->base.closed);
  763. err = func(dev_priv, &ppgtt->base, 0, ppgtt->base.total, end_time);
  764. i915_ppgtt_close(&ppgtt->base);
  765. i915_ppgtt_put(ppgtt);
  766. out_unlock:
  767. mutex_unlock(&dev_priv->drm.struct_mutex);
  768. mock_file_free(dev_priv, file);
  769. return err;
  770. }
  771. static int igt_ppgtt_fill(void *arg)
  772. {
  773. return exercise_ppgtt(arg, fill_hole);
  774. }
  775. static int igt_ppgtt_walk(void *arg)
  776. {
  777. return exercise_ppgtt(arg, walk_hole);
  778. }
  779. static int igt_ppgtt_pot(void *arg)
  780. {
  781. return exercise_ppgtt(arg, pot_hole);
  782. }
  783. static int igt_ppgtt_drunk(void *arg)
  784. {
  785. return exercise_ppgtt(arg, drunk_hole);
  786. }
  787. static int igt_ppgtt_lowlevel(void *arg)
  788. {
  789. return exercise_ppgtt(arg, lowlevel_hole);
  790. }
  791. static int igt_ppgtt_shrink(void *arg)
  792. {
  793. return exercise_ppgtt(arg, shrink_hole);
  794. }
  795. static int sort_holes(void *priv, struct list_head *A, struct list_head *B)
  796. {
  797. struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
  798. struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
  799. if (a->start < b->start)
  800. return -1;
  801. else
  802. return 1;
  803. }
  804. static int exercise_ggtt(struct drm_i915_private *i915,
  805. int (*func)(struct drm_i915_private *i915,
  806. struct i915_address_space *vm,
  807. u64 hole_start, u64 hole_end,
  808. unsigned long end_time))
  809. {
  810. struct i915_ggtt *ggtt = &i915->ggtt;
  811. u64 hole_start, hole_end, last = 0;
  812. struct drm_mm_node *node;
  813. IGT_TIMEOUT(end_time);
  814. int err = 0;
  815. mutex_lock(&i915->drm.struct_mutex);
  816. restart:
  817. list_sort(NULL, &ggtt->base.mm.hole_stack, sort_holes);
  818. drm_mm_for_each_hole(node, &ggtt->base.mm, hole_start, hole_end) {
  819. if (hole_start < last)
  820. continue;
  821. if (ggtt->base.mm.color_adjust)
  822. ggtt->base.mm.color_adjust(node, 0,
  823. &hole_start, &hole_end);
  824. if (hole_start >= hole_end)
  825. continue;
  826. err = func(i915, &ggtt->base, hole_start, hole_end, end_time);
  827. if (err)
  828. break;
  829. /* As we have manipulated the drm_mm, the list may be corrupt */
  830. last = hole_end;
  831. goto restart;
  832. }
  833. mutex_unlock(&i915->drm.struct_mutex);
  834. return err;
  835. }
  836. static int igt_ggtt_fill(void *arg)
  837. {
  838. return exercise_ggtt(arg, fill_hole);
  839. }
  840. static int igt_ggtt_walk(void *arg)
  841. {
  842. return exercise_ggtt(arg, walk_hole);
  843. }
  844. static int igt_ggtt_pot(void *arg)
  845. {
  846. return exercise_ggtt(arg, pot_hole);
  847. }
  848. static int igt_ggtt_drunk(void *arg)
  849. {
  850. return exercise_ggtt(arg, drunk_hole);
  851. }
  852. static int igt_ggtt_lowlevel(void *arg)
  853. {
  854. return exercise_ggtt(arg, lowlevel_hole);
  855. }
  856. static int igt_ggtt_page(void *arg)
  857. {
  858. const unsigned int count = PAGE_SIZE/sizeof(u32);
  859. I915_RND_STATE(prng);
  860. struct drm_i915_private *i915 = arg;
  861. struct i915_ggtt *ggtt = &i915->ggtt;
  862. struct drm_i915_gem_object *obj;
  863. struct drm_mm_node tmp;
  864. unsigned int *order, n;
  865. int err;
  866. mutex_lock(&i915->drm.struct_mutex);
  867. obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
  868. if (IS_ERR(obj)) {
  869. err = PTR_ERR(obj);
  870. goto out_unlock;
  871. }
  872. err = i915_gem_object_pin_pages(obj);
  873. if (err)
  874. goto out_free;
  875. memset(&tmp, 0, sizeof(tmp));
  876. err = drm_mm_insert_node_in_range(&ggtt->base.mm, &tmp,
  877. 1024 * PAGE_SIZE, 0,
  878. I915_COLOR_UNEVICTABLE,
  879. 0, ggtt->mappable_end,
  880. DRM_MM_INSERT_LOW);
  881. if (err)
  882. goto out_unpin;
  883. order = i915_random_order(count, &prng);
  884. if (!order) {
  885. err = -ENOMEM;
  886. goto out_remove;
  887. }
  888. intel_runtime_pm_get(i915);
  889. for (n = 0; n < count; n++) {
  890. u64 offset = tmp.start + order[n] * PAGE_SIZE;
  891. u32 __iomem *vaddr;
  892. ggtt->base.insert_page(&ggtt->base,
  893. i915_gem_object_get_dma_address(obj, 0),
  894. offset, I915_CACHE_NONE, 0);
  895. vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
  896. iowrite32(n, vaddr + n);
  897. io_mapping_unmap_atomic(vaddr);
  898. wmb();
  899. ggtt->base.clear_range(&ggtt->base, offset, PAGE_SIZE);
  900. }
  901. i915_random_reorder(order, count, &prng);
  902. for (n = 0; n < count; n++) {
  903. u64 offset = tmp.start + order[n] * PAGE_SIZE;
  904. u32 __iomem *vaddr;
  905. u32 val;
  906. ggtt->base.insert_page(&ggtt->base,
  907. i915_gem_object_get_dma_address(obj, 0),
  908. offset, I915_CACHE_NONE, 0);
  909. vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
  910. val = ioread32(vaddr + n);
  911. io_mapping_unmap_atomic(vaddr);
  912. ggtt->base.clear_range(&ggtt->base, offset, PAGE_SIZE);
  913. if (val != n) {
  914. pr_err("insert page failed: found %d, expected %d\n",
  915. val, n);
  916. err = -EINVAL;
  917. break;
  918. }
  919. }
  920. intel_runtime_pm_put(i915);
  921. kfree(order);
  922. out_remove:
  923. drm_mm_remove_node(&tmp);
  924. out_unpin:
  925. i915_gem_object_unpin_pages(obj);
  926. out_free:
  927. i915_gem_object_put(obj);
  928. out_unlock:
  929. mutex_unlock(&i915->drm.struct_mutex);
  930. return err;
  931. }
  932. static void track_vma_bind(struct i915_vma *vma)
  933. {
  934. struct drm_i915_gem_object *obj = vma->obj;
  935. obj->bind_count++; /* track for eviction later */
  936. __i915_gem_object_pin_pages(obj);
  937. vma->pages = obj->mm.pages;
  938. list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
  939. }
  940. static int exercise_mock(struct drm_i915_private *i915,
  941. int (*func)(struct drm_i915_private *i915,
  942. struct i915_address_space *vm,
  943. u64 hole_start, u64 hole_end,
  944. unsigned long end_time))
  945. {
  946. struct i915_gem_context *ctx;
  947. struct i915_hw_ppgtt *ppgtt;
  948. IGT_TIMEOUT(end_time);
  949. int err;
  950. ctx = mock_context(i915, "mock");
  951. if (!ctx)
  952. return -ENOMEM;
  953. ppgtt = ctx->ppgtt;
  954. GEM_BUG_ON(!ppgtt);
  955. err = func(i915, &ppgtt->base, 0, ppgtt->base.total, end_time);
  956. mock_context_close(ctx);
  957. return err;
  958. }
  959. static int igt_mock_fill(void *arg)
  960. {
  961. return exercise_mock(arg, fill_hole);
  962. }
  963. static int igt_mock_walk(void *arg)
  964. {
  965. return exercise_mock(arg, walk_hole);
  966. }
  967. static int igt_mock_pot(void *arg)
  968. {
  969. return exercise_mock(arg, pot_hole);
  970. }
  971. static int igt_mock_drunk(void *arg)
  972. {
  973. return exercise_mock(arg, drunk_hole);
  974. }
  975. static int igt_gtt_reserve(void *arg)
  976. {
  977. struct drm_i915_private *i915 = arg;
  978. struct drm_i915_gem_object *obj, *on;
  979. LIST_HEAD(objects);
  980. u64 total;
  981. int err = -ENODEV;
  982. /* i915_gem_gtt_reserve() tries to reserve the precise range
  983. * for the node, and evicts if it has to. So our test checks that
  984. * it can give us the requsted space and prevent overlaps.
  985. */
  986. /* Start by filling the GGTT */
  987. for (total = 0;
  988. total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
  989. total += 2*I915_GTT_PAGE_SIZE) {
  990. struct i915_vma *vma;
  991. obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE);
  992. if (IS_ERR(obj)) {
  993. err = PTR_ERR(obj);
  994. goto out;
  995. }
  996. err = i915_gem_object_pin_pages(obj);
  997. if (err) {
  998. i915_gem_object_put(obj);
  999. goto out;
  1000. }
  1001. list_add(&obj->st_link, &objects);
  1002. vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
  1003. if (IS_ERR(vma)) {
  1004. err = PTR_ERR(vma);
  1005. goto out;
  1006. }
  1007. err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
  1008. obj->base.size,
  1009. total,
  1010. obj->cache_level,
  1011. 0);
  1012. if (err) {
  1013. pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
  1014. total, i915->ggtt.base.total, err);
  1015. goto out;
  1016. }
  1017. track_vma_bind(vma);
  1018. GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
  1019. if (vma->node.start != total ||
  1020. vma->node.size != 2*I915_GTT_PAGE_SIZE) {
  1021. pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
  1022. vma->node.start, vma->node.size,
  1023. total, 2*I915_GTT_PAGE_SIZE);
  1024. err = -EINVAL;
  1025. goto out;
  1026. }
  1027. }
  1028. /* Now we start forcing evictions */
  1029. for (total = I915_GTT_PAGE_SIZE;
  1030. total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
  1031. total += 2*I915_GTT_PAGE_SIZE) {
  1032. struct i915_vma *vma;
  1033. obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE);
  1034. if (IS_ERR(obj)) {
  1035. err = PTR_ERR(obj);
  1036. goto out;
  1037. }
  1038. err = i915_gem_object_pin_pages(obj);
  1039. if (err) {
  1040. i915_gem_object_put(obj);
  1041. goto out;
  1042. }
  1043. list_add(&obj->st_link, &objects);
  1044. vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
  1045. if (IS_ERR(vma)) {
  1046. err = PTR_ERR(vma);
  1047. goto out;
  1048. }
  1049. err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
  1050. obj->base.size,
  1051. total,
  1052. obj->cache_level,
  1053. 0);
  1054. if (err) {
  1055. pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
  1056. total, i915->ggtt.base.total, err);
  1057. goto out;
  1058. }
  1059. track_vma_bind(vma);
  1060. GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
  1061. if (vma->node.start != total ||
  1062. vma->node.size != 2*I915_GTT_PAGE_SIZE) {
  1063. pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
  1064. vma->node.start, vma->node.size,
  1065. total, 2*I915_GTT_PAGE_SIZE);
  1066. err = -EINVAL;
  1067. goto out;
  1068. }
  1069. }
  1070. /* And then try at random */
  1071. list_for_each_entry_safe(obj, on, &objects, st_link) {
  1072. struct i915_vma *vma;
  1073. u64 offset;
  1074. vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
  1075. if (IS_ERR(vma)) {
  1076. err = PTR_ERR(vma);
  1077. goto out;
  1078. }
  1079. err = i915_vma_unbind(vma);
  1080. if (err) {
  1081. pr_err("i915_vma_unbind failed with err=%d!\n", err);
  1082. goto out;
  1083. }
  1084. offset = random_offset(0, i915->ggtt.base.total,
  1085. 2*I915_GTT_PAGE_SIZE,
  1086. I915_GTT_MIN_ALIGNMENT);
  1087. err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
  1088. obj->base.size,
  1089. offset,
  1090. obj->cache_level,
  1091. 0);
  1092. if (err) {
  1093. pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
  1094. total, i915->ggtt.base.total, err);
  1095. goto out;
  1096. }
  1097. track_vma_bind(vma);
  1098. GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
  1099. if (vma->node.start != offset ||
  1100. vma->node.size != 2*I915_GTT_PAGE_SIZE) {
  1101. pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
  1102. vma->node.start, vma->node.size,
  1103. offset, 2*I915_GTT_PAGE_SIZE);
  1104. err = -EINVAL;
  1105. goto out;
  1106. }
  1107. }
  1108. out:
  1109. list_for_each_entry_safe(obj, on, &objects, st_link) {
  1110. i915_gem_object_unpin_pages(obj);
  1111. i915_gem_object_put(obj);
  1112. }
  1113. return err;
  1114. }
  1115. static int igt_gtt_insert(void *arg)
  1116. {
  1117. struct drm_i915_private *i915 = arg;
  1118. struct drm_i915_gem_object *obj, *on;
  1119. struct drm_mm_node tmp = {};
  1120. const struct invalid_insert {
  1121. u64 size;
  1122. u64 alignment;
  1123. u64 start, end;
  1124. } invalid_insert[] = {
  1125. {
  1126. i915->ggtt.base.total + I915_GTT_PAGE_SIZE, 0,
  1127. 0, i915->ggtt.base.total,
  1128. },
  1129. {
  1130. 2*I915_GTT_PAGE_SIZE, 0,
  1131. 0, I915_GTT_PAGE_SIZE,
  1132. },
  1133. {
  1134. -(u64)I915_GTT_PAGE_SIZE, 0,
  1135. 0, 4*I915_GTT_PAGE_SIZE,
  1136. },
  1137. {
  1138. -(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
  1139. 0, 4*I915_GTT_PAGE_SIZE,
  1140. },
  1141. {
  1142. I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
  1143. I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1,
  1144. },
  1145. {}
  1146. }, *ii;
  1147. LIST_HEAD(objects);
  1148. u64 total;
  1149. int err = -ENODEV;
  1150. /* i915_gem_gtt_insert() tries to allocate some free space in the GTT
  1151. * to the node, evicting if required.
  1152. */
  1153. /* Check a couple of obviously invalid requests */
  1154. for (ii = invalid_insert; ii->size; ii++) {
  1155. err = i915_gem_gtt_insert(&i915->ggtt.base, &tmp,
  1156. ii->size, ii->alignment,
  1157. I915_COLOR_UNEVICTABLE,
  1158. ii->start, ii->end,
  1159. 0);
  1160. if (err != -ENOSPC) {
  1161. pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
  1162. ii->size, ii->alignment, ii->start, ii->end,
  1163. err);
  1164. return -EINVAL;
  1165. }
  1166. }
  1167. /* Start by filling the GGTT */
  1168. for (total = 0;
  1169. total + I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
  1170. total += I915_GTT_PAGE_SIZE) {
  1171. struct i915_vma *vma;
  1172. obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
  1173. if (IS_ERR(obj)) {
  1174. err = PTR_ERR(obj);
  1175. goto out;
  1176. }
  1177. err = i915_gem_object_pin_pages(obj);
  1178. if (err) {
  1179. i915_gem_object_put(obj);
  1180. goto out;
  1181. }
  1182. list_add(&obj->st_link, &objects);
  1183. vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
  1184. if (IS_ERR(vma)) {
  1185. err = PTR_ERR(vma);
  1186. goto out;
  1187. }
  1188. err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
  1189. obj->base.size, 0, obj->cache_level,
  1190. 0, i915->ggtt.base.total,
  1191. 0);
  1192. if (err == -ENOSPC) {
  1193. /* maxed out the GGTT space */
  1194. i915_gem_object_put(obj);
  1195. break;
  1196. }
  1197. if (err) {
  1198. pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
  1199. total, i915->ggtt.base.total, err);
  1200. goto out;
  1201. }
  1202. track_vma_bind(vma);
  1203. __i915_vma_pin(vma);
  1204. GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
  1205. }
  1206. list_for_each_entry(obj, &objects, st_link) {
  1207. struct i915_vma *vma;
  1208. vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
  1209. if (IS_ERR(vma)) {
  1210. err = PTR_ERR(vma);
  1211. goto out;
  1212. }
  1213. if (!drm_mm_node_allocated(&vma->node)) {
  1214. pr_err("VMA was unexpectedly evicted!\n");
  1215. err = -EINVAL;
  1216. goto out;
  1217. }
  1218. __i915_vma_unpin(vma);
  1219. }
  1220. /* If we then reinsert, we should find the same hole */
  1221. list_for_each_entry_safe(obj, on, &objects, st_link) {
  1222. struct i915_vma *vma;
  1223. u64 offset;
  1224. vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
  1225. if (IS_ERR(vma)) {
  1226. err = PTR_ERR(vma);
  1227. goto out;
  1228. }
  1229. GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
  1230. offset = vma->node.start;
  1231. err = i915_vma_unbind(vma);
  1232. if (err) {
  1233. pr_err("i915_vma_unbind failed with err=%d!\n", err);
  1234. goto out;
  1235. }
  1236. err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
  1237. obj->base.size, 0, obj->cache_level,
  1238. 0, i915->ggtt.base.total,
  1239. 0);
  1240. if (err) {
  1241. pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
  1242. total, i915->ggtt.base.total, err);
  1243. goto out;
  1244. }
  1245. track_vma_bind(vma);
  1246. GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
  1247. if (vma->node.start != offset) {
  1248. pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
  1249. offset, vma->node.start);
  1250. err = -EINVAL;
  1251. goto out;
  1252. }
  1253. }
  1254. /* And then force evictions */
  1255. for (total = 0;
  1256. total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
  1257. total += 2*I915_GTT_PAGE_SIZE) {
  1258. struct i915_vma *vma;
  1259. obj = i915_gem_object_create_internal(i915, 2*I915_GTT_PAGE_SIZE);
  1260. if (IS_ERR(obj)) {
  1261. err = PTR_ERR(obj);
  1262. goto out;
  1263. }
  1264. err = i915_gem_object_pin_pages(obj);
  1265. if (err) {
  1266. i915_gem_object_put(obj);
  1267. goto out;
  1268. }
  1269. list_add(&obj->st_link, &objects);
  1270. vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
  1271. if (IS_ERR(vma)) {
  1272. err = PTR_ERR(vma);
  1273. goto out;
  1274. }
  1275. err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
  1276. obj->base.size, 0, obj->cache_level,
  1277. 0, i915->ggtt.base.total,
  1278. 0);
  1279. if (err) {
  1280. pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
  1281. total, i915->ggtt.base.total, err);
  1282. goto out;
  1283. }
  1284. track_vma_bind(vma);
  1285. GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
  1286. }
  1287. out:
  1288. list_for_each_entry_safe(obj, on, &objects, st_link) {
  1289. i915_gem_object_unpin_pages(obj);
  1290. i915_gem_object_put(obj);
  1291. }
  1292. return err;
  1293. }
  1294. int i915_gem_gtt_mock_selftests(void)
  1295. {
  1296. static const struct i915_subtest tests[] = {
  1297. SUBTEST(igt_mock_drunk),
  1298. SUBTEST(igt_mock_walk),
  1299. SUBTEST(igt_mock_pot),
  1300. SUBTEST(igt_mock_fill),
  1301. SUBTEST(igt_gtt_reserve),
  1302. SUBTEST(igt_gtt_insert),
  1303. };
  1304. struct drm_i915_private *i915;
  1305. int err;
  1306. i915 = mock_gem_device();
  1307. if (!i915)
  1308. return -ENOMEM;
  1309. mutex_lock(&i915->drm.struct_mutex);
  1310. err = i915_subtests(tests, i915);
  1311. mutex_unlock(&i915->drm.struct_mutex);
  1312. drm_dev_unref(&i915->drm);
  1313. return err;
  1314. }
  1315. int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
  1316. {
  1317. static const struct i915_subtest tests[] = {
  1318. SUBTEST(igt_ppgtt_alloc),
  1319. SUBTEST(igt_ppgtt_lowlevel),
  1320. SUBTEST(igt_ppgtt_drunk),
  1321. SUBTEST(igt_ppgtt_walk),
  1322. SUBTEST(igt_ppgtt_pot),
  1323. SUBTEST(igt_ppgtt_fill),
  1324. SUBTEST(igt_ppgtt_shrink),
  1325. SUBTEST(igt_ggtt_lowlevel),
  1326. SUBTEST(igt_ggtt_drunk),
  1327. SUBTEST(igt_ggtt_walk),
  1328. SUBTEST(igt_ggtt_pot),
  1329. SUBTEST(igt_ggtt_fill),
  1330. SUBTEST(igt_ggtt_page),
  1331. };
  1332. GEM_BUG_ON(offset_in_page(i915->ggtt.base.total));
  1333. return i915_subtests(tests, i915);
  1334. }