i915_gem_gtt.c 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562
  1. /*
  2. * Copyright © 2016 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. */
  24. #include <linux/list_sort.h>
  25. #include <linux/prime_numbers.h>
  26. #include "../i915_selftest.h"
  27. #include "i915_random.h"
  28. #include "mock_context.h"
  29. #include "mock_drm.h"
  30. #include "mock_gem_device.h"
  31. static void fake_free_pages(struct drm_i915_gem_object *obj,
  32. struct sg_table *pages)
  33. {
  34. sg_free_table(pages);
  35. kfree(pages);
  36. }
  37. static struct sg_table *
  38. fake_get_pages(struct drm_i915_gem_object *obj)
  39. {
  40. #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
  41. #define PFN_BIAS 0x1000
  42. struct sg_table *pages;
  43. struct scatterlist *sg;
  44. typeof(obj->base.size) rem;
  45. pages = kmalloc(sizeof(*pages), GFP);
  46. if (!pages)
  47. return ERR_PTR(-ENOMEM);
  48. rem = round_up(obj->base.size, BIT(31)) >> 31;
  49. if (sg_alloc_table(pages, rem, GFP)) {
  50. kfree(pages);
  51. return ERR_PTR(-ENOMEM);
  52. }
  53. rem = obj->base.size;
  54. for (sg = pages->sgl; sg; sg = sg_next(sg)) {
  55. unsigned long len = min_t(typeof(rem), rem, BIT(31));
  56. GEM_BUG_ON(!len);
  57. sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
  58. sg_dma_address(sg) = page_to_phys(sg_page(sg));
  59. sg_dma_len(sg) = len;
  60. rem -= len;
  61. }
  62. GEM_BUG_ON(rem);
  63. obj->mm.madv = I915_MADV_DONTNEED;
  64. return pages;
  65. #undef GFP
  66. }
  67. static void fake_put_pages(struct drm_i915_gem_object *obj,
  68. struct sg_table *pages)
  69. {
  70. fake_free_pages(obj, pages);
  71. obj->mm.dirty = false;
  72. obj->mm.madv = I915_MADV_WILLNEED;
  73. }
  74. static const struct drm_i915_gem_object_ops fake_ops = {
  75. .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
  76. .get_pages = fake_get_pages,
  77. .put_pages = fake_put_pages,
  78. };
  79. static struct drm_i915_gem_object *
  80. fake_dma_object(struct drm_i915_private *i915, u64 size)
  81. {
  82. struct drm_i915_gem_object *obj;
  83. GEM_BUG_ON(!size);
  84. GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
  85. if (overflows_type(size, obj->base.size))
  86. return ERR_PTR(-E2BIG);
  87. obj = i915_gem_object_alloc(i915);
  88. if (!obj)
  89. goto err;
  90. drm_gem_private_object_init(&i915->drm, &obj->base, size);
  91. i915_gem_object_init(obj, &fake_ops);
  92. obj->base.write_domain = I915_GEM_DOMAIN_CPU;
  93. obj->base.read_domains = I915_GEM_DOMAIN_CPU;
  94. obj->cache_level = I915_CACHE_NONE;
  95. /* Preallocate the "backing storage" */
  96. if (i915_gem_object_pin_pages(obj))
  97. goto err_obj;
  98. i915_gem_object_unpin_pages(obj);
  99. return obj;
  100. err_obj:
  101. i915_gem_object_put(obj);
  102. err:
  103. return ERR_PTR(-ENOMEM);
  104. }
  105. static int igt_ppgtt_alloc(void *arg)
  106. {
  107. struct drm_i915_private *dev_priv = arg;
  108. struct i915_hw_ppgtt *ppgtt;
  109. u64 size, last;
  110. int err;
  111. /* Allocate a ppggt and try to fill the entire range */
  112. if (!USES_PPGTT(dev_priv))
  113. return 0;
  114. ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
  115. if (!ppgtt)
  116. return -ENOMEM;
  117. mutex_lock(&dev_priv->drm.struct_mutex);
  118. err = __hw_ppgtt_init(ppgtt, dev_priv);
  119. if (err)
  120. goto err_ppgtt;
  121. if (!ppgtt->base.allocate_va_range)
  122. goto err_ppgtt_cleanup;
  123. /* Check we can allocate the entire range */
  124. for (size = 4096;
  125. size <= ppgtt->base.total;
  126. size <<= 2) {
  127. err = ppgtt->base.allocate_va_range(&ppgtt->base, 0, size);
  128. if (err) {
  129. if (err == -ENOMEM) {
  130. pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n",
  131. size, ilog2(size));
  132. err = 0; /* virtual space too large! */
  133. }
  134. goto err_ppgtt_cleanup;
  135. }
  136. ppgtt->base.clear_range(&ppgtt->base, 0, size);
  137. }
  138. /* Check we can incrementally allocate the entire range */
  139. for (last = 0, size = 4096;
  140. size <= ppgtt->base.total;
  141. last = size, size <<= 2) {
  142. err = ppgtt->base.allocate_va_range(&ppgtt->base,
  143. last, size - last);
  144. if (err) {
  145. if (err == -ENOMEM) {
  146. pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n",
  147. last, size - last, ilog2(size));
  148. err = 0; /* virtual space too large! */
  149. }
  150. goto err_ppgtt_cleanup;
  151. }
  152. }
  153. err_ppgtt_cleanup:
  154. ppgtt->base.cleanup(&ppgtt->base);
  155. err_ppgtt:
  156. mutex_unlock(&dev_priv->drm.struct_mutex);
  157. kfree(ppgtt);
  158. return err;
  159. }
  160. static int lowlevel_hole(struct drm_i915_private *i915,
  161. struct i915_address_space *vm,
  162. u64 hole_start, u64 hole_end,
  163. unsigned long end_time)
  164. {
  165. I915_RND_STATE(seed_prng);
  166. unsigned int size;
  167. /* Keep creating larger objects until one cannot fit into the hole */
  168. for (size = 12; (hole_end - hole_start) >> size; size++) {
  169. I915_RND_SUBSTATE(prng, seed_prng);
  170. struct drm_i915_gem_object *obj;
  171. unsigned int *order, count, n;
  172. u64 hole_size;
  173. hole_size = (hole_end - hole_start) >> size;
  174. if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
  175. hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
  176. count = hole_size;
  177. do {
  178. count >>= 1;
  179. order = i915_random_order(count, &prng);
  180. } while (!order && count);
  181. if (!order)
  182. break;
  183. GEM_BUG_ON(count * BIT_ULL(size) > vm->total);
  184. GEM_BUG_ON(hole_start + count * BIT_ULL(size) > hole_end);
  185. /* Ignore allocation failures (i.e. don't report them as
  186. * a test failure) as we are purposefully allocating very
  187. * large objects without checking that we have sufficient
  188. * memory. We expect to hit -ENOMEM.
  189. */
  190. obj = fake_dma_object(i915, BIT_ULL(size));
  191. if (IS_ERR(obj)) {
  192. kfree(order);
  193. break;
  194. }
  195. GEM_BUG_ON(obj->base.size != BIT_ULL(size));
  196. if (i915_gem_object_pin_pages(obj)) {
  197. i915_gem_object_put(obj);
  198. kfree(order);
  199. break;
  200. }
  201. for (n = 0; n < count; n++) {
  202. u64 addr = hole_start + order[n] * BIT_ULL(size);
  203. GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
  204. if (igt_timeout(end_time,
  205. "%s timed out before %d/%d\n",
  206. __func__, n, count)) {
  207. hole_end = hole_start; /* quit */
  208. break;
  209. }
  210. if (vm->allocate_va_range &&
  211. vm->allocate_va_range(vm, addr, BIT_ULL(size)))
  212. break;
  213. vm->insert_entries(vm, obj->mm.pages, addr,
  214. I915_CACHE_NONE, 0);
  215. }
  216. count = n;
  217. i915_random_reorder(order, count, &prng);
  218. for (n = 0; n < count; n++) {
  219. u64 addr = hole_start + order[n] * BIT_ULL(size);
  220. GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
  221. vm->clear_range(vm, addr, BIT_ULL(size));
  222. }
  223. i915_gem_object_unpin_pages(obj);
  224. i915_gem_object_put(obj);
  225. kfree(order);
  226. }
  227. return 0;
  228. }
  229. static void close_object_list(struct list_head *objects,
  230. struct i915_address_space *vm)
  231. {
  232. struct drm_i915_gem_object *obj, *on;
  233. int ignored;
  234. list_for_each_entry_safe(obj, on, objects, st_link) {
  235. struct i915_vma *vma;
  236. vma = i915_vma_instance(obj, vm, NULL);
  237. if (!IS_ERR(vma))
  238. ignored = i915_vma_unbind(vma);
  239. /* Only ppgtt vma may be closed before the object is freed */
  240. if (!IS_ERR(vma) && !i915_vma_is_ggtt(vma))
  241. i915_vma_close(vma);
  242. list_del(&obj->st_link);
  243. i915_gem_object_put(obj);
  244. }
  245. }
  246. static int fill_hole(struct drm_i915_private *i915,
  247. struct i915_address_space *vm,
  248. u64 hole_start, u64 hole_end,
  249. unsigned long end_time)
  250. {
  251. const u64 hole_size = hole_end - hole_start;
  252. struct drm_i915_gem_object *obj;
  253. const unsigned long max_pages =
  254. min_t(u64, ULONG_MAX - 1, hole_size/2 >> PAGE_SHIFT);
  255. const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
  256. unsigned long npages, prime, flags;
  257. struct i915_vma *vma;
  258. LIST_HEAD(objects);
  259. int err;
  260. /* Try binding many VMA working inwards from either edge */
  261. flags = PIN_OFFSET_FIXED | PIN_USER;
  262. if (i915_is_ggtt(vm))
  263. flags |= PIN_GLOBAL;
  264. for_each_prime_number_from(prime, 2, max_step) {
  265. for (npages = 1; npages <= max_pages; npages *= prime) {
  266. const u64 full_size = npages << PAGE_SHIFT;
  267. const struct {
  268. const char *name;
  269. u64 offset;
  270. int step;
  271. } phases[] = {
  272. { "top-down", hole_end, -1, },
  273. { "bottom-up", hole_start, 1, },
  274. { }
  275. }, *p;
  276. obj = fake_dma_object(i915, full_size);
  277. if (IS_ERR(obj))
  278. break;
  279. list_add(&obj->st_link, &objects);
  280. /* Align differing sized objects against the edges, and
  281. * check we don't walk off into the void when binding
  282. * them into the GTT.
  283. */
  284. for (p = phases; p->name; p++) {
  285. u64 offset;
  286. offset = p->offset;
  287. list_for_each_entry(obj, &objects, st_link) {
  288. vma = i915_vma_instance(obj, vm, NULL);
  289. if (IS_ERR(vma))
  290. continue;
  291. if (p->step < 0) {
  292. if (offset < hole_start + obj->base.size)
  293. break;
  294. offset -= obj->base.size;
  295. }
  296. err = i915_vma_pin(vma, 0, 0, offset | flags);
  297. if (err) {
  298. pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
  299. __func__, p->name, err, npages, prime, offset);
  300. goto err;
  301. }
  302. if (!drm_mm_node_allocated(&vma->node) ||
  303. i915_vma_misplaced(vma, 0, 0, offset | flags)) {
  304. pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
  305. __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
  306. offset);
  307. err = -EINVAL;
  308. goto err;
  309. }
  310. i915_vma_unpin(vma);
  311. if (p->step > 0) {
  312. if (offset + obj->base.size > hole_end)
  313. break;
  314. offset += obj->base.size;
  315. }
  316. }
  317. offset = p->offset;
  318. list_for_each_entry(obj, &objects, st_link) {
  319. vma = i915_vma_instance(obj, vm, NULL);
  320. if (IS_ERR(vma))
  321. continue;
  322. if (p->step < 0) {
  323. if (offset < hole_start + obj->base.size)
  324. break;
  325. offset -= obj->base.size;
  326. }
  327. if (!drm_mm_node_allocated(&vma->node) ||
  328. i915_vma_misplaced(vma, 0, 0, offset | flags)) {
  329. pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
  330. __func__, p->name, vma->node.start, vma->node.size,
  331. offset);
  332. err = -EINVAL;
  333. goto err;
  334. }
  335. err = i915_vma_unbind(vma);
  336. if (err) {
  337. pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
  338. __func__, p->name, vma->node.start, vma->node.size,
  339. err);
  340. goto err;
  341. }
  342. if (p->step > 0) {
  343. if (offset + obj->base.size > hole_end)
  344. break;
  345. offset += obj->base.size;
  346. }
  347. }
  348. offset = p->offset;
  349. list_for_each_entry_reverse(obj, &objects, st_link) {
  350. vma = i915_vma_instance(obj, vm, NULL);
  351. if (IS_ERR(vma))
  352. continue;
  353. if (p->step < 0) {
  354. if (offset < hole_start + obj->base.size)
  355. break;
  356. offset -= obj->base.size;
  357. }
  358. err = i915_vma_pin(vma, 0, 0, offset | flags);
  359. if (err) {
  360. pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
  361. __func__, p->name, err, npages, prime, offset);
  362. goto err;
  363. }
  364. if (!drm_mm_node_allocated(&vma->node) ||
  365. i915_vma_misplaced(vma, 0, 0, offset | flags)) {
  366. pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
  367. __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
  368. offset);
  369. err = -EINVAL;
  370. goto err;
  371. }
  372. i915_vma_unpin(vma);
  373. if (p->step > 0) {
  374. if (offset + obj->base.size > hole_end)
  375. break;
  376. offset += obj->base.size;
  377. }
  378. }
  379. offset = p->offset;
  380. list_for_each_entry_reverse(obj, &objects, st_link) {
  381. vma = i915_vma_instance(obj, vm, NULL);
  382. if (IS_ERR(vma))
  383. continue;
  384. if (p->step < 0) {
  385. if (offset < hole_start + obj->base.size)
  386. break;
  387. offset -= obj->base.size;
  388. }
  389. if (!drm_mm_node_allocated(&vma->node) ||
  390. i915_vma_misplaced(vma, 0, 0, offset | flags)) {
  391. pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
  392. __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
  393. offset);
  394. err = -EINVAL;
  395. goto err;
  396. }
  397. err = i915_vma_unbind(vma);
  398. if (err) {
  399. pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
  400. __func__, p->name, vma->node.start, vma->node.size,
  401. err);
  402. goto err;
  403. }
  404. if (p->step > 0) {
  405. if (offset + obj->base.size > hole_end)
  406. break;
  407. offset += obj->base.size;
  408. }
  409. }
  410. }
  411. if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n",
  412. __func__, npages, prime)) {
  413. err = -EINTR;
  414. goto err;
  415. }
  416. }
  417. close_object_list(&objects, vm);
  418. }
  419. return 0;
  420. err:
  421. close_object_list(&objects, vm);
  422. return err;
  423. }
  424. static int walk_hole(struct drm_i915_private *i915,
  425. struct i915_address_space *vm,
  426. u64 hole_start, u64 hole_end,
  427. unsigned long end_time)
  428. {
  429. const u64 hole_size = hole_end - hole_start;
  430. const unsigned long max_pages =
  431. min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
  432. unsigned long flags;
  433. u64 size;
  434. /* Try binding a single VMA in different positions within the hole */
  435. flags = PIN_OFFSET_FIXED | PIN_USER;
  436. if (i915_is_ggtt(vm))
  437. flags |= PIN_GLOBAL;
  438. for_each_prime_number_from(size, 1, max_pages) {
  439. struct drm_i915_gem_object *obj;
  440. struct i915_vma *vma;
  441. u64 addr;
  442. int err = 0;
  443. obj = fake_dma_object(i915, size << PAGE_SHIFT);
  444. if (IS_ERR(obj))
  445. break;
  446. vma = i915_vma_instance(obj, vm, NULL);
  447. if (IS_ERR(vma)) {
  448. err = PTR_ERR(vma);
  449. goto err_put;
  450. }
  451. for (addr = hole_start;
  452. addr + obj->base.size < hole_end;
  453. addr += obj->base.size) {
  454. err = i915_vma_pin(vma, 0, 0, addr | flags);
  455. if (err) {
  456. pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
  457. __func__, addr, vma->size,
  458. hole_start, hole_end, err);
  459. goto err_close;
  460. }
  461. i915_vma_unpin(vma);
  462. if (!drm_mm_node_allocated(&vma->node) ||
  463. i915_vma_misplaced(vma, 0, 0, addr | flags)) {
  464. pr_err("%s incorrect at %llx + %llx\n",
  465. __func__, addr, vma->size);
  466. err = -EINVAL;
  467. goto err_close;
  468. }
  469. err = i915_vma_unbind(vma);
  470. if (err) {
  471. pr_err("%s unbind failed at %llx + %llx with err=%d\n",
  472. __func__, addr, vma->size, err);
  473. goto err_close;
  474. }
  475. GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
  476. if (igt_timeout(end_time,
  477. "%s timed out at %llx\n",
  478. __func__, addr)) {
  479. err = -EINTR;
  480. goto err_close;
  481. }
  482. }
  483. err_close:
  484. if (!i915_vma_is_ggtt(vma))
  485. i915_vma_close(vma);
  486. err_put:
  487. i915_gem_object_put(obj);
  488. if (err)
  489. return err;
  490. }
  491. return 0;
  492. }
  493. static int pot_hole(struct drm_i915_private *i915,
  494. struct i915_address_space *vm,
  495. u64 hole_start, u64 hole_end,
  496. unsigned long end_time)
  497. {
  498. struct drm_i915_gem_object *obj;
  499. struct i915_vma *vma;
  500. unsigned long flags;
  501. unsigned int pot;
  502. int err = 0;
  503. flags = PIN_OFFSET_FIXED | PIN_USER;
  504. if (i915_is_ggtt(vm))
  505. flags |= PIN_GLOBAL;
  506. obj = i915_gem_object_create_internal(i915, 2 * I915_GTT_PAGE_SIZE);
  507. if (IS_ERR(obj))
  508. return PTR_ERR(obj);
  509. vma = i915_vma_instance(obj, vm, NULL);
  510. if (IS_ERR(vma)) {
  511. err = PTR_ERR(vma);
  512. goto err_obj;
  513. }
  514. /* Insert a pair of pages across every pot boundary within the hole */
  515. for (pot = fls64(hole_end - 1) - 1;
  516. pot > ilog2(2 * I915_GTT_PAGE_SIZE);
  517. pot--) {
  518. u64 step = BIT_ULL(pot);
  519. u64 addr;
  520. for (addr = round_up(hole_start + I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
  521. addr <= round_down(hole_end - 2*I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
  522. addr += step) {
  523. err = i915_vma_pin(vma, 0, 0, addr | flags);
  524. if (err) {
  525. pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
  526. __func__,
  527. addr,
  528. hole_start, hole_end,
  529. err);
  530. goto err;
  531. }
  532. if (!drm_mm_node_allocated(&vma->node) ||
  533. i915_vma_misplaced(vma, 0, 0, addr | flags)) {
  534. pr_err("%s incorrect at %llx + %llx\n",
  535. __func__, addr, vma->size);
  536. i915_vma_unpin(vma);
  537. err = i915_vma_unbind(vma);
  538. err = -EINVAL;
  539. goto err;
  540. }
  541. i915_vma_unpin(vma);
  542. err = i915_vma_unbind(vma);
  543. GEM_BUG_ON(err);
  544. }
  545. if (igt_timeout(end_time,
  546. "%s timed out after %d/%d\n",
  547. __func__, pot, fls64(hole_end - 1) - 1)) {
  548. err = -EINTR;
  549. goto err;
  550. }
  551. }
  552. err:
  553. if (!i915_vma_is_ggtt(vma))
  554. i915_vma_close(vma);
  555. err_obj:
  556. i915_gem_object_put(obj);
  557. return err;
  558. }
  559. static int drunk_hole(struct drm_i915_private *i915,
  560. struct i915_address_space *vm,
  561. u64 hole_start, u64 hole_end,
  562. unsigned long end_time)
  563. {
  564. I915_RND_STATE(prng);
  565. unsigned int size;
  566. unsigned long flags;
  567. flags = PIN_OFFSET_FIXED | PIN_USER;
  568. if (i915_is_ggtt(vm))
  569. flags |= PIN_GLOBAL;
  570. /* Keep creating larger objects until one cannot fit into the hole */
  571. for (size = 12; (hole_end - hole_start) >> size; size++) {
  572. struct drm_i915_gem_object *obj;
  573. unsigned int *order, count, n;
  574. struct i915_vma *vma;
  575. u64 hole_size;
  576. int err;
  577. hole_size = (hole_end - hole_start) >> size;
  578. if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
  579. hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
  580. count = hole_size;
  581. do {
  582. count >>= 1;
  583. order = i915_random_order(count, &prng);
  584. } while (!order && count);
  585. if (!order)
  586. break;
  587. /* Ignore allocation failures (i.e. don't report them as
  588. * a test failure) as we are purposefully allocating very
  589. * large objects without checking that we have sufficient
  590. * memory. We expect to hit -ENOMEM.
  591. */
  592. obj = fake_dma_object(i915, BIT_ULL(size));
  593. if (IS_ERR(obj)) {
  594. kfree(order);
  595. break;
  596. }
  597. vma = i915_vma_instance(obj, vm, NULL);
  598. if (IS_ERR(vma)) {
  599. err = PTR_ERR(vma);
  600. goto err_obj;
  601. }
  602. GEM_BUG_ON(vma->size != BIT_ULL(size));
  603. for (n = 0; n < count; n++) {
  604. u64 addr = hole_start + order[n] * BIT_ULL(size);
  605. err = i915_vma_pin(vma, 0, 0, addr | flags);
  606. if (err) {
  607. pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
  608. __func__,
  609. addr, BIT_ULL(size),
  610. hole_start, hole_end,
  611. err);
  612. goto err;
  613. }
  614. if (!drm_mm_node_allocated(&vma->node) ||
  615. i915_vma_misplaced(vma, 0, 0, addr | flags)) {
  616. pr_err("%s incorrect at %llx + %llx\n",
  617. __func__, addr, BIT_ULL(size));
  618. i915_vma_unpin(vma);
  619. err = i915_vma_unbind(vma);
  620. err = -EINVAL;
  621. goto err;
  622. }
  623. i915_vma_unpin(vma);
  624. err = i915_vma_unbind(vma);
  625. GEM_BUG_ON(err);
  626. if (igt_timeout(end_time,
  627. "%s timed out after %d/%d\n",
  628. __func__, n, count)) {
  629. err = -EINTR;
  630. goto err;
  631. }
  632. }
  633. err:
  634. if (!i915_vma_is_ggtt(vma))
  635. i915_vma_close(vma);
  636. err_obj:
  637. i915_gem_object_put(obj);
  638. kfree(order);
  639. if (err)
  640. return err;
  641. }
  642. return 0;
  643. }
  644. static int __shrink_hole(struct drm_i915_private *i915,
  645. struct i915_address_space *vm,
  646. u64 hole_start, u64 hole_end,
  647. unsigned long end_time)
  648. {
  649. struct drm_i915_gem_object *obj;
  650. unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
  651. unsigned int order = 12;
  652. LIST_HEAD(objects);
  653. int err = 0;
  654. u64 addr;
  655. /* Keep creating larger objects until one cannot fit into the hole */
  656. for (addr = hole_start; addr < hole_end; ) {
  657. struct i915_vma *vma;
  658. u64 size = BIT_ULL(order++);
  659. size = min(size, hole_end - addr);
  660. obj = fake_dma_object(i915, size);
  661. if (IS_ERR(obj)) {
  662. err = PTR_ERR(obj);
  663. break;
  664. }
  665. list_add(&obj->st_link, &objects);
  666. vma = i915_vma_instance(obj, vm, NULL);
  667. if (IS_ERR(vma)) {
  668. err = PTR_ERR(vma);
  669. break;
  670. }
  671. GEM_BUG_ON(vma->size != size);
  672. err = i915_vma_pin(vma, 0, 0, addr | flags);
  673. if (err) {
  674. pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
  675. __func__, addr, size, hole_start, hole_end, err);
  676. break;
  677. }
  678. if (!drm_mm_node_allocated(&vma->node) ||
  679. i915_vma_misplaced(vma, 0, 0, addr | flags)) {
  680. pr_err("%s incorrect at %llx + %llx\n",
  681. __func__, addr, size);
  682. i915_vma_unpin(vma);
  683. err = i915_vma_unbind(vma);
  684. err = -EINVAL;
  685. break;
  686. }
  687. i915_vma_unpin(vma);
  688. addr += size;
  689. if (igt_timeout(end_time,
  690. "%s timed out at ofset %llx [%llx - %llx]\n",
  691. __func__, addr, hole_start, hole_end)) {
  692. err = -EINTR;
  693. break;
  694. }
  695. }
  696. close_object_list(&objects, vm);
  697. return err;
  698. }
  699. static int shrink_hole(struct drm_i915_private *i915,
  700. struct i915_address_space *vm,
  701. u64 hole_start, u64 hole_end,
  702. unsigned long end_time)
  703. {
  704. unsigned long prime;
  705. int err;
  706. vm->fault_attr.probability = 999;
  707. atomic_set(&vm->fault_attr.times, -1);
  708. for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
  709. vm->fault_attr.interval = prime;
  710. err = __shrink_hole(i915, vm, hole_start, hole_end, end_time);
  711. if (err)
  712. break;
  713. }
  714. memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
  715. return err;
  716. }
  717. static int exercise_ppgtt(struct drm_i915_private *dev_priv,
  718. int (*func)(struct drm_i915_private *i915,
  719. struct i915_address_space *vm,
  720. u64 hole_start, u64 hole_end,
  721. unsigned long end_time))
  722. {
  723. struct drm_file *file;
  724. struct i915_hw_ppgtt *ppgtt;
  725. IGT_TIMEOUT(end_time);
  726. int err;
  727. if (!USES_FULL_PPGTT(dev_priv))
  728. return 0;
  729. file = mock_file(dev_priv);
  730. if (IS_ERR(file))
  731. return PTR_ERR(file);
  732. mutex_lock(&dev_priv->drm.struct_mutex);
  733. ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv, "mock");
  734. if (IS_ERR(ppgtt)) {
  735. err = PTR_ERR(ppgtt);
  736. goto out_unlock;
  737. }
  738. GEM_BUG_ON(offset_in_page(ppgtt->base.total));
  739. GEM_BUG_ON(ppgtt->base.closed);
  740. err = func(dev_priv, &ppgtt->base, 0, ppgtt->base.total, end_time);
  741. i915_ppgtt_close(&ppgtt->base);
  742. i915_ppgtt_put(ppgtt);
  743. out_unlock:
  744. mutex_unlock(&dev_priv->drm.struct_mutex);
  745. mock_file_free(dev_priv, file);
  746. return err;
  747. }
  748. static int igt_ppgtt_fill(void *arg)
  749. {
  750. return exercise_ppgtt(arg, fill_hole);
  751. }
  752. static int igt_ppgtt_walk(void *arg)
  753. {
  754. return exercise_ppgtt(arg, walk_hole);
  755. }
  756. static int igt_ppgtt_pot(void *arg)
  757. {
  758. return exercise_ppgtt(arg, pot_hole);
  759. }
  760. static int igt_ppgtt_drunk(void *arg)
  761. {
  762. return exercise_ppgtt(arg, drunk_hole);
  763. }
  764. static int igt_ppgtt_lowlevel(void *arg)
  765. {
  766. return exercise_ppgtt(arg, lowlevel_hole);
  767. }
  768. static int igt_ppgtt_shrink(void *arg)
  769. {
  770. return exercise_ppgtt(arg, shrink_hole);
  771. }
  772. static int sort_holes(void *priv, struct list_head *A, struct list_head *B)
  773. {
  774. struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
  775. struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
  776. if (a->start < b->start)
  777. return -1;
  778. else
  779. return 1;
  780. }
  781. static int exercise_ggtt(struct drm_i915_private *i915,
  782. int (*func)(struct drm_i915_private *i915,
  783. struct i915_address_space *vm,
  784. u64 hole_start, u64 hole_end,
  785. unsigned long end_time))
  786. {
  787. struct i915_ggtt *ggtt = &i915->ggtt;
  788. u64 hole_start, hole_end, last = 0;
  789. struct drm_mm_node *node;
  790. IGT_TIMEOUT(end_time);
  791. int err;
  792. mutex_lock(&i915->drm.struct_mutex);
  793. restart:
  794. list_sort(NULL, &ggtt->base.mm.hole_stack, sort_holes);
  795. drm_mm_for_each_hole(node, &ggtt->base.mm, hole_start, hole_end) {
  796. if (hole_start < last)
  797. continue;
  798. if (ggtt->base.mm.color_adjust)
  799. ggtt->base.mm.color_adjust(node, 0,
  800. &hole_start, &hole_end);
  801. if (hole_start >= hole_end)
  802. continue;
  803. err = func(i915, &ggtt->base, hole_start, hole_end, end_time);
  804. if (err)
  805. break;
  806. /* As we have manipulated the drm_mm, the list may be corrupt */
  807. last = hole_end;
  808. goto restart;
  809. }
  810. mutex_unlock(&i915->drm.struct_mutex);
  811. return err;
  812. }
  813. static int igt_ggtt_fill(void *arg)
  814. {
  815. return exercise_ggtt(arg, fill_hole);
  816. }
  817. static int igt_ggtt_walk(void *arg)
  818. {
  819. return exercise_ggtt(arg, walk_hole);
  820. }
  821. static int igt_ggtt_pot(void *arg)
  822. {
  823. return exercise_ggtt(arg, pot_hole);
  824. }
  825. static int igt_ggtt_drunk(void *arg)
  826. {
  827. return exercise_ggtt(arg, drunk_hole);
  828. }
  829. static int igt_ggtt_lowlevel(void *arg)
  830. {
  831. return exercise_ggtt(arg, lowlevel_hole);
  832. }
  833. static int igt_ggtt_page(void *arg)
  834. {
  835. const unsigned int count = PAGE_SIZE/sizeof(u32);
  836. I915_RND_STATE(prng);
  837. struct drm_i915_private *i915 = arg;
  838. struct i915_ggtt *ggtt = &i915->ggtt;
  839. struct drm_i915_gem_object *obj;
  840. struct drm_mm_node tmp;
  841. unsigned int *order, n;
  842. int err;
  843. mutex_lock(&i915->drm.struct_mutex);
  844. obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
  845. if (IS_ERR(obj)) {
  846. err = PTR_ERR(obj);
  847. goto out_unlock;
  848. }
  849. err = i915_gem_object_pin_pages(obj);
  850. if (err)
  851. goto out_free;
  852. memset(&tmp, 0, sizeof(tmp));
  853. err = drm_mm_insert_node_in_range(&ggtt->base.mm, &tmp,
  854. 1024 * PAGE_SIZE, 0,
  855. I915_COLOR_UNEVICTABLE,
  856. 0, ggtt->mappable_end,
  857. DRM_MM_INSERT_LOW);
  858. if (err)
  859. goto out_unpin;
  860. order = i915_random_order(count, &prng);
  861. if (!order) {
  862. err = -ENOMEM;
  863. goto out_remove;
  864. }
  865. for (n = 0; n < count; n++) {
  866. u64 offset = tmp.start + order[n] * PAGE_SIZE;
  867. u32 __iomem *vaddr;
  868. ggtt->base.insert_page(&ggtt->base,
  869. i915_gem_object_get_dma_address(obj, 0),
  870. offset, I915_CACHE_NONE, 0);
  871. vaddr = io_mapping_map_atomic_wc(&ggtt->mappable, offset);
  872. iowrite32(n, vaddr + n);
  873. io_mapping_unmap_atomic(vaddr);
  874. wmb();
  875. ggtt->base.clear_range(&ggtt->base, offset, PAGE_SIZE);
  876. }
  877. i915_random_reorder(order, count, &prng);
  878. for (n = 0; n < count; n++) {
  879. u64 offset = tmp.start + order[n] * PAGE_SIZE;
  880. u32 __iomem *vaddr;
  881. u32 val;
  882. ggtt->base.insert_page(&ggtt->base,
  883. i915_gem_object_get_dma_address(obj, 0),
  884. offset, I915_CACHE_NONE, 0);
  885. vaddr = io_mapping_map_atomic_wc(&ggtt->mappable, offset);
  886. val = ioread32(vaddr + n);
  887. io_mapping_unmap_atomic(vaddr);
  888. ggtt->base.clear_range(&ggtt->base, offset, PAGE_SIZE);
  889. if (val != n) {
  890. pr_err("insert page failed: found %d, expected %d\n",
  891. val, n);
  892. err = -EINVAL;
  893. break;
  894. }
  895. }
  896. kfree(order);
  897. out_remove:
  898. drm_mm_remove_node(&tmp);
  899. out_unpin:
  900. i915_gem_object_unpin_pages(obj);
  901. out_free:
  902. i915_gem_object_put(obj);
  903. out_unlock:
  904. mutex_unlock(&i915->drm.struct_mutex);
  905. return err;
  906. }
  907. static void track_vma_bind(struct i915_vma *vma)
  908. {
  909. struct drm_i915_gem_object *obj = vma->obj;
  910. obj->bind_count++; /* track for eviction later */
  911. __i915_gem_object_pin_pages(obj);
  912. vma->pages = obj->mm.pages;
  913. list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
  914. }
  915. static int exercise_mock(struct drm_i915_private *i915,
  916. int (*func)(struct drm_i915_private *i915,
  917. struct i915_address_space *vm,
  918. u64 hole_start, u64 hole_end,
  919. unsigned long end_time))
  920. {
  921. struct i915_gem_context *ctx;
  922. struct i915_hw_ppgtt *ppgtt;
  923. IGT_TIMEOUT(end_time);
  924. int err;
  925. ctx = mock_context(i915, "mock");
  926. if (!ctx)
  927. return -ENOMEM;
  928. ppgtt = ctx->ppgtt;
  929. GEM_BUG_ON(!ppgtt);
  930. err = func(i915, &ppgtt->base, 0, ppgtt->base.total, end_time);
  931. mock_context_close(ctx);
  932. return err;
  933. }
  934. static int igt_mock_fill(void *arg)
  935. {
  936. return exercise_mock(arg, fill_hole);
  937. }
  938. static int igt_mock_walk(void *arg)
  939. {
  940. return exercise_mock(arg, walk_hole);
  941. }
  942. static int igt_mock_pot(void *arg)
  943. {
  944. return exercise_mock(arg, pot_hole);
  945. }
  946. static int igt_mock_drunk(void *arg)
  947. {
  948. return exercise_mock(arg, drunk_hole);
  949. }
  950. static int igt_gtt_reserve(void *arg)
  951. {
  952. struct drm_i915_private *i915 = arg;
  953. struct drm_i915_gem_object *obj, *on;
  954. LIST_HEAD(objects);
  955. u64 total;
  956. int err;
  957. /* i915_gem_gtt_reserve() tries to reserve the precise range
  958. * for the node, and evicts if it has to. So our test checks that
  959. * it can give us the requsted space and prevent overlaps.
  960. */
  961. /* Start by filling the GGTT */
  962. for (total = 0;
  963. total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
  964. total += 2*I915_GTT_PAGE_SIZE) {
  965. struct i915_vma *vma;
  966. obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE);
  967. if (IS_ERR(obj)) {
  968. err = PTR_ERR(obj);
  969. goto out;
  970. }
  971. err = i915_gem_object_pin_pages(obj);
  972. if (err) {
  973. i915_gem_object_put(obj);
  974. goto out;
  975. }
  976. list_add(&obj->st_link, &objects);
  977. vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
  978. if (IS_ERR(vma)) {
  979. err = PTR_ERR(vma);
  980. goto out;
  981. }
  982. err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
  983. obj->base.size,
  984. total,
  985. obj->cache_level,
  986. 0);
  987. if (err) {
  988. pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
  989. total, i915->ggtt.base.total, err);
  990. goto out;
  991. }
  992. track_vma_bind(vma);
  993. GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
  994. if (vma->node.start != total ||
  995. vma->node.size != 2*I915_GTT_PAGE_SIZE) {
  996. pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
  997. vma->node.start, vma->node.size,
  998. total, 2*I915_GTT_PAGE_SIZE);
  999. err = -EINVAL;
  1000. goto out;
  1001. }
  1002. }
  1003. /* Now we start forcing evictions */
  1004. for (total = I915_GTT_PAGE_SIZE;
  1005. total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
  1006. total += 2*I915_GTT_PAGE_SIZE) {
  1007. struct i915_vma *vma;
  1008. obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE);
  1009. if (IS_ERR(obj)) {
  1010. err = PTR_ERR(obj);
  1011. goto out;
  1012. }
  1013. err = i915_gem_object_pin_pages(obj);
  1014. if (err) {
  1015. i915_gem_object_put(obj);
  1016. goto out;
  1017. }
  1018. list_add(&obj->st_link, &objects);
  1019. vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
  1020. if (IS_ERR(vma)) {
  1021. err = PTR_ERR(vma);
  1022. goto out;
  1023. }
  1024. err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
  1025. obj->base.size,
  1026. total,
  1027. obj->cache_level,
  1028. 0);
  1029. if (err) {
  1030. pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
  1031. total, i915->ggtt.base.total, err);
  1032. goto out;
  1033. }
  1034. track_vma_bind(vma);
  1035. GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
  1036. if (vma->node.start != total ||
  1037. vma->node.size != 2*I915_GTT_PAGE_SIZE) {
  1038. pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
  1039. vma->node.start, vma->node.size,
  1040. total, 2*I915_GTT_PAGE_SIZE);
  1041. err = -EINVAL;
  1042. goto out;
  1043. }
  1044. }
  1045. /* And then try at random */
  1046. list_for_each_entry_safe(obj, on, &objects, st_link) {
  1047. struct i915_vma *vma;
  1048. u64 offset;
  1049. vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
  1050. if (IS_ERR(vma)) {
  1051. err = PTR_ERR(vma);
  1052. goto out;
  1053. }
  1054. err = i915_vma_unbind(vma);
  1055. if (err) {
  1056. pr_err("i915_vma_unbind failed with err=%d!\n", err);
  1057. goto out;
  1058. }
  1059. offset = random_offset(0, i915->ggtt.base.total,
  1060. 2*I915_GTT_PAGE_SIZE,
  1061. I915_GTT_MIN_ALIGNMENT);
  1062. err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
  1063. obj->base.size,
  1064. offset,
  1065. obj->cache_level,
  1066. 0);
  1067. if (err) {
  1068. pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
  1069. total, i915->ggtt.base.total, err);
  1070. goto out;
  1071. }
  1072. track_vma_bind(vma);
  1073. GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
  1074. if (vma->node.start != offset ||
  1075. vma->node.size != 2*I915_GTT_PAGE_SIZE) {
  1076. pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
  1077. vma->node.start, vma->node.size,
  1078. offset, 2*I915_GTT_PAGE_SIZE);
  1079. err = -EINVAL;
  1080. goto out;
  1081. }
  1082. }
  1083. out:
  1084. list_for_each_entry_safe(obj, on, &objects, st_link) {
  1085. i915_gem_object_unpin_pages(obj);
  1086. i915_gem_object_put(obj);
  1087. }
  1088. return err;
  1089. }
  1090. static int igt_gtt_insert(void *arg)
  1091. {
  1092. struct drm_i915_private *i915 = arg;
  1093. struct drm_i915_gem_object *obj, *on;
  1094. struct drm_mm_node tmp = {};
  1095. const struct invalid_insert {
  1096. u64 size;
  1097. u64 alignment;
  1098. u64 start, end;
  1099. } invalid_insert[] = {
  1100. {
  1101. i915->ggtt.base.total + I915_GTT_PAGE_SIZE, 0,
  1102. 0, i915->ggtt.base.total,
  1103. },
  1104. {
  1105. 2*I915_GTT_PAGE_SIZE, 0,
  1106. 0, I915_GTT_PAGE_SIZE,
  1107. },
  1108. {
  1109. -(u64)I915_GTT_PAGE_SIZE, 0,
  1110. 0, 4*I915_GTT_PAGE_SIZE,
  1111. },
  1112. {
  1113. -(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
  1114. 0, 4*I915_GTT_PAGE_SIZE,
  1115. },
  1116. {
  1117. I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
  1118. I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1,
  1119. },
  1120. {}
  1121. }, *ii;
  1122. LIST_HEAD(objects);
  1123. u64 total;
  1124. int err;
  1125. /* i915_gem_gtt_insert() tries to allocate some free space in the GTT
  1126. * to the node, evicting if required.
  1127. */
  1128. /* Check a couple of obviously invalid requests */
  1129. for (ii = invalid_insert; ii->size; ii++) {
  1130. err = i915_gem_gtt_insert(&i915->ggtt.base, &tmp,
  1131. ii->size, ii->alignment,
  1132. I915_COLOR_UNEVICTABLE,
  1133. ii->start, ii->end,
  1134. 0);
  1135. if (err != -ENOSPC) {
  1136. pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
  1137. ii->size, ii->alignment, ii->start, ii->end,
  1138. err);
  1139. return -EINVAL;
  1140. }
  1141. }
  1142. /* Start by filling the GGTT */
  1143. for (total = 0;
  1144. total + I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
  1145. total += I915_GTT_PAGE_SIZE) {
  1146. struct i915_vma *vma;
  1147. obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
  1148. if (IS_ERR(obj)) {
  1149. err = PTR_ERR(obj);
  1150. goto out;
  1151. }
  1152. err = i915_gem_object_pin_pages(obj);
  1153. if (err) {
  1154. i915_gem_object_put(obj);
  1155. goto out;
  1156. }
  1157. list_add(&obj->st_link, &objects);
  1158. vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
  1159. if (IS_ERR(vma)) {
  1160. err = PTR_ERR(vma);
  1161. goto out;
  1162. }
  1163. err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
  1164. obj->base.size, 0, obj->cache_level,
  1165. 0, i915->ggtt.base.total,
  1166. 0);
  1167. if (err == -ENOSPC) {
  1168. /* maxed out the GGTT space */
  1169. i915_gem_object_put(obj);
  1170. break;
  1171. }
  1172. if (err) {
  1173. pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
  1174. total, i915->ggtt.base.total, err);
  1175. goto out;
  1176. }
  1177. track_vma_bind(vma);
  1178. __i915_vma_pin(vma);
  1179. GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
  1180. }
  1181. list_for_each_entry(obj, &objects, st_link) {
  1182. struct i915_vma *vma;
  1183. vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
  1184. if (IS_ERR(vma)) {
  1185. err = PTR_ERR(vma);
  1186. goto out;
  1187. }
  1188. if (!drm_mm_node_allocated(&vma->node)) {
  1189. pr_err("VMA was unexpectedly evicted!\n");
  1190. err = -EINVAL;
  1191. goto out;
  1192. }
  1193. __i915_vma_unpin(vma);
  1194. }
  1195. /* If we then reinsert, we should find the same hole */
  1196. list_for_each_entry_safe(obj, on, &objects, st_link) {
  1197. struct i915_vma *vma;
  1198. u64 offset;
  1199. vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
  1200. if (IS_ERR(vma)) {
  1201. err = PTR_ERR(vma);
  1202. goto out;
  1203. }
  1204. GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
  1205. offset = vma->node.start;
  1206. err = i915_vma_unbind(vma);
  1207. if (err) {
  1208. pr_err("i915_vma_unbind failed with err=%d!\n", err);
  1209. goto out;
  1210. }
  1211. err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
  1212. obj->base.size, 0, obj->cache_level,
  1213. 0, i915->ggtt.base.total,
  1214. 0);
  1215. if (err) {
  1216. pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
  1217. total, i915->ggtt.base.total, err);
  1218. goto out;
  1219. }
  1220. track_vma_bind(vma);
  1221. GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
  1222. if (vma->node.start != offset) {
  1223. pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
  1224. offset, vma->node.start);
  1225. err = -EINVAL;
  1226. goto out;
  1227. }
  1228. }
  1229. /* And then force evictions */
  1230. for (total = 0;
  1231. total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
  1232. total += 2*I915_GTT_PAGE_SIZE) {
  1233. struct i915_vma *vma;
  1234. obj = i915_gem_object_create_internal(i915, 2*I915_GTT_PAGE_SIZE);
  1235. if (IS_ERR(obj)) {
  1236. err = PTR_ERR(obj);
  1237. goto out;
  1238. }
  1239. err = i915_gem_object_pin_pages(obj);
  1240. if (err) {
  1241. i915_gem_object_put(obj);
  1242. goto out;
  1243. }
  1244. list_add(&obj->st_link, &objects);
  1245. vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
  1246. if (IS_ERR(vma)) {
  1247. err = PTR_ERR(vma);
  1248. goto out;
  1249. }
  1250. err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
  1251. obj->base.size, 0, obj->cache_level,
  1252. 0, i915->ggtt.base.total,
  1253. 0);
  1254. if (err) {
  1255. pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
  1256. total, i915->ggtt.base.total, err);
  1257. goto out;
  1258. }
  1259. track_vma_bind(vma);
  1260. GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
  1261. }
  1262. out:
  1263. list_for_each_entry_safe(obj, on, &objects, st_link) {
  1264. i915_gem_object_unpin_pages(obj);
  1265. i915_gem_object_put(obj);
  1266. }
  1267. return err;
  1268. }
  1269. int i915_gem_gtt_mock_selftests(void)
  1270. {
  1271. static const struct i915_subtest tests[] = {
  1272. SUBTEST(igt_mock_drunk),
  1273. SUBTEST(igt_mock_walk),
  1274. SUBTEST(igt_mock_pot),
  1275. SUBTEST(igt_mock_fill),
  1276. SUBTEST(igt_gtt_reserve),
  1277. SUBTEST(igt_gtt_insert),
  1278. };
  1279. struct drm_i915_private *i915;
  1280. int err;
  1281. i915 = mock_gem_device();
  1282. if (!i915)
  1283. return -ENOMEM;
  1284. mutex_lock(&i915->drm.struct_mutex);
  1285. err = i915_subtests(tests, i915);
  1286. mutex_unlock(&i915->drm.struct_mutex);
  1287. drm_dev_unref(&i915->drm);
  1288. return err;
  1289. }
  1290. int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
  1291. {
  1292. static const struct i915_subtest tests[] = {
  1293. SUBTEST(igt_ppgtt_alloc),
  1294. SUBTEST(igt_ppgtt_lowlevel),
  1295. SUBTEST(igt_ppgtt_drunk),
  1296. SUBTEST(igt_ppgtt_walk),
  1297. SUBTEST(igt_ppgtt_pot),
  1298. SUBTEST(igt_ppgtt_fill),
  1299. SUBTEST(igt_ppgtt_shrink),
  1300. SUBTEST(igt_ggtt_lowlevel),
  1301. SUBTEST(igt_ggtt_drunk),
  1302. SUBTEST(igt_ggtt_walk),
  1303. SUBTEST(igt_ggtt_pot),
  1304. SUBTEST(igt_ggtt_fill),
  1305. SUBTEST(igt_ggtt_page),
  1306. };
  1307. GEM_BUG_ON(offset_in_page(i915->ggtt.base.total));
  1308. return i915_subtests(tests, i915);
  1309. }