i915_vma.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716
  1. /*
  2. * Copyright © 2016 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. */
  24. #include "i915_vma.h"
  25. #include "i915_drv.h"
  26. #include "intel_ringbuffer.h"
  27. #include "intel_frontbuffer.h"
  28. #include <drm/drm_gem.h>
  29. static void
  30. i915_vma_retire(struct i915_gem_active *active,
  31. struct drm_i915_gem_request *rq)
  32. {
  33. const unsigned int idx = rq->engine->id;
  34. struct i915_vma *vma =
  35. container_of(active, struct i915_vma, last_read[idx]);
  36. struct drm_i915_gem_object *obj = vma->obj;
  37. GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));
  38. i915_vma_clear_active(vma, idx);
  39. if (i915_vma_is_active(vma))
  40. return;
  41. GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
  42. list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
  43. if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
  44. WARN_ON(i915_vma_unbind(vma));
  45. GEM_BUG_ON(!i915_gem_object_is_active(obj));
  46. if (--obj->active_count)
  47. return;
  48. /* Bump our place on the bound list to keep it roughly in LRU order
  49. * so that we don't steal from recently used but inactive objects
  50. * (unless we are forced to ofc!)
  51. */
  52. if (obj->bind_count)
  53. list_move_tail(&obj->global_link, &rq->i915->mm.bound_list);
  54. obj->mm.dirty = true; /* be paranoid */
  55. if (i915_gem_object_has_active_reference(obj)) {
  56. i915_gem_object_clear_active_reference(obj);
  57. i915_gem_object_put(obj);
  58. }
  59. }
  60. static struct i915_vma *
  61. vma_create(struct drm_i915_gem_object *obj,
  62. struct i915_address_space *vm,
  63. const struct i915_ggtt_view *view)
  64. {
  65. struct i915_vma *vma;
  66. struct rb_node *rb, **p;
  67. int i;
  68. /* The aliasing_ppgtt should never be used directly! */
  69. GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
  70. vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL);
  71. if (vma == NULL)
  72. return ERR_PTR(-ENOMEM);
  73. for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
  74. init_request_active(&vma->last_read[i], i915_vma_retire);
  75. init_request_active(&vma->last_fence, NULL);
  76. vma->vm = vm;
  77. vma->obj = obj;
  78. vma->resv = obj->resv;
  79. vma->size = obj->base.size;
  80. vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
  81. if (view && view->type != I915_GGTT_VIEW_NORMAL) {
  82. vma->ggtt_view = *view;
  83. if (view->type == I915_GGTT_VIEW_PARTIAL) {
  84. GEM_BUG_ON(range_overflows_t(u64,
  85. view->partial.offset,
  86. view->partial.size,
  87. obj->base.size >> PAGE_SHIFT));
  88. vma->size = view->partial.size;
  89. vma->size <<= PAGE_SHIFT;
  90. GEM_BUG_ON(vma->size >= obj->base.size);
  91. } else if (view->type == I915_GGTT_VIEW_ROTATED) {
  92. vma->size = intel_rotation_info_size(&view->rotated);
  93. vma->size <<= PAGE_SHIFT;
  94. }
  95. }
  96. if (unlikely(vma->size > vm->total))
  97. goto err_vma;
  98. GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
  99. if (i915_is_ggtt(vm)) {
  100. if (unlikely(overflows_type(vma->size, u32)))
  101. goto err_vma;
  102. vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
  103. i915_gem_object_get_tiling(obj),
  104. i915_gem_object_get_stride(obj));
  105. if (unlikely(vma->fence_size < vma->size || /* overflow */
  106. vma->fence_size > vm->total))
  107. goto err_vma;
  108. GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
  109. vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
  110. i915_gem_object_get_tiling(obj),
  111. i915_gem_object_get_stride(obj));
  112. GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
  113. vma->flags |= I915_VMA_GGTT;
  114. list_add(&vma->obj_link, &obj->vma_list);
  115. } else {
  116. i915_ppgtt_get(i915_vm_to_ppgtt(vm));
  117. list_add_tail(&vma->obj_link, &obj->vma_list);
  118. }
  119. rb = NULL;
  120. p = &obj->vma_tree.rb_node;
  121. while (*p) {
  122. struct i915_vma *pos;
  123. rb = *p;
  124. pos = rb_entry(rb, struct i915_vma, obj_node);
  125. if (i915_vma_compare(pos, vm, view) < 0)
  126. p = &rb->rb_right;
  127. else
  128. p = &rb->rb_left;
  129. }
  130. rb_link_node(&vma->obj_node, rb, p);
  131. rb_insert_color(&vma->obj_node, &obj->vma_tree);
  132. list_add(&vma->vm_link, &vm->unbound_list);
  133. return vma;
  134. err_vma:
  135. kmem_cache_free(vm->i915->vmas, vma);
  136. return ERR_PTR(-E2BIG);
  137. }
  138. static struct i915_vma *
  139. vma_lookup(struct drm_i915_gem_object *obj,
  140. struct i915_address_space *vm,
  141. const struct i915_ggtt_view *view)
  142. {
  143. struct rb_node *rb;
  144. rb = obj->vma_tree.rb_node;
  145. while (rb) {
  146. struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
  147. long cmp;
  148. cmp = i915_vma_compare(vma, vm, view);
  149. if (cmp == 0)
  150. return vma;
  151. if (cmp < 0)
  152. rb = rb->rb_right;
  153. else
  154. rb = rb->rb_left;
  155. }
  156. return NULL;
  157. }
  158. /**
  159. * i915_vma_instance - return the singleton instance of the VMA
  160. * @obj: parent &struct drm_i915_gem_object to be mapped
  161. * @vm: address space in which the mapping is located
  162. * @view: additional mapping requirements
  163. *
  164. * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
  165. * the same @view characteristics. If a match is not found, one is created.
  166. * Once created, the VMA is kept until either the object is freed, or the
  167. * address space is closed.
  168. *
  169. * Must be called with struct_mutex held.
  170. *
  171. * Returns the vma, or an error pointer.
  172. */
  173. struct i915_vma *
  174. i915_vma_instance(struct drm_i915_gem_object *obj,
  175. struct i915_address_space *vm,
  176. const struct i915_ggtt_view *view)
  177. {
  178. struct i915_vma *vma;
  179. lockdep_assert_held(&obj->base.dev->struct_mutex);
  180. GEM_BUG_ON(view && !i915_is_ggtt(vm));
  181. GEM_BUG_ON(vm->closed);
  182. vma = vma_lookup(obj, vm, view);
  183. if (!vma)
  184. vma = vma_create(obj, vm, view);
  185. GEM_BUG_ON(!IS_ERR(vma) && i915_vma_is_closed(vma));
  186. GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
  187. GEM_BUG_ON(!IS_ERR(vma) && vma_lookup(obj, vm, view) != vma);
  188. return vma;
  189. }
  190. /**
  191. * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
  192. * @vma: VMA to map
  193. * @cache_level: mapping cache level
  194. * @flags: flags like global or local mapping
  195. *
  196. * DMA addresses are taken from the scatter-gather table of this object (or of
  197. * this VMA in case of non-default GGTT views) and PTE entries set up.
  198. * Note that DMA addresses are also the only part of the SG table we care about.
  199. */
  200. int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
  201. u32 flags)
  202. {
  203. u32 bind_flags;
  204. u32 vma_flags;
  205. int ret;
  206. GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
  207. GEM_BUG_ON(vma->size > vma->node.size);
  208. if (GEM_WARN_ON(range_overflows(vma->node.start,
  209. vma->node.size,
  210. vma->vm->total)))
  211. return -ENODEV;
  212. if (GEM_WARN_ON(!flags))
  213. return -EINVAL;
  214. bind_flags = 0;
  215. if (flags & PIN_GLOBAL)
  216. bind_flags |= I915_VMA_GLOBAL_BIND;
  217. if (flags & PIN_USER)
  218. bind_flags |= I915_VMA_LOCAL_BIND;
  219. vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
  220. if (flags & PIN_UPDATE)
  221. bind_flags |= vma_flags;
  222. else
  223. bind_flags &= ~vma_flags;
  224. if (bind_flags == 0)
  225. return 0;
  226. trace_i915_vma_bind(vma, bind_flags);
  227. ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
  228. if (ret)
  229. return ret;
  230. vma->flags |= bind_flags;
  231. return 0;
  232. }
  233. void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
  234. {
  235. void __iomem *ptr;
  236. /* Access through the GTT requires the device to be awake. */
  237. assert_rpm_wakelock_held(vma->vm->i915);
  238. lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
  239. if (WARN_ON(!i915_vma_is_map_and_fenceable(vma)))
  240. return IO_ERR_PTR(-ENODEV);
  241. GEM_BUG_ON(!i915_vma_is_ggtt(vma));
  242. GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
  243. ptr = vma->iomap;
  244. if (ptr == NULL) {
  245. ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable,
  246. vma->node.start,
  247. vma->node.size);
  248. if (ptr == NULL)
  249. return IO_ERR_PTR(-ENOMEM);
  250. vma->iomap = ptr;
  251. }
  252. __i915_vma_pin(vma);
  253. return ptr;
  254. }
  255. void i915_vma_unpin_and_release(struct i915_vma **p_vma)
  256. {
  257. struct i915_vma *vma;
  258. struct drm_i915_gem_object *obj;
  259. vma = fetch_and_zero(p_vma);
  260. if (!vma)
  261. return;
  262. obj = vma->obj;
  263. i915_vma_unpin(vma);
  264. i915_vma_close(vma);
  265. __i915_gem_object_release_unless_active(obj);
  266. }
  267. bool i915_vma_misplaced(const struct i915_vma *vma,
  268. u64 size, u64 alignment, u64 flags)
  269. {
  270. if (!drm_mm_node_allocated(&vma->node))
  271. return false;
  272. if (vma->node.size < size)
  273. return true;
  274. GEM_BUG_ON(alignment && !is_power_of_2(alignment));
  275. if (alignment && !IS_ALIGNED(vma->node.start, alignment))
  276. return true;
  277. if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
  278. return true;
  279. if (flags & PIN_OFFSET_BIAS &&
  280. vma->node.start < (flags & PIN_OFFSET_MASK))
  281. return true;
  282. if (flags & PIN_OFFSET_FIXED &&
  283. vma->node.start != (flags & PIN_OFFSET_MASK))
  284. return true;
  285. return false;
  286. }
  287. void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
  288. {
  289. bool mappable, fenceable;
  290. GEM_BUG_ON(!i915_vma_is_ggtt(vma));
  291. GEM_BUG_ON(!vma->fence_size);
  292. /*
  293. * Explicitly disable for rotated VMA since the display does not
  294. * need the fence and the VMA is not accessible to other users.
  295. */
  296. if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
  297. return;
  298. fenceable = (vma->node.size >= vma->fence_size &&
  299. IS_ALIGNED(vma->node.start, vma->fence_alignment));
  300. mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
  301. if (mappable && fenceable)
  302. vma->flags |= I915_VMA_CAN_FENCE;
  303. else
  304. vma->flags &= ~I915_VMA_CAN_FENCE;
  305. }
  306. static bool color_differs(struct drm_mm_node *node, unsigned long color)
  307. {
  308. return node->allocated && node->color != color;
  309. }
  310. bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
  311. {
  312. struct drm_mm_node *node = &vma->node;
  313. struct drm_mm_node *other;
  314. /*
  315. * On some machines we have to be careful when putting differing types
  316. * of snoopable memory together to avoid the prefetcher crossing memory
  317. * domains and dying. During vm initialisation, we decide whether or not
  318. * these constraints apply and set the drm_mm.color_adjust
  319. * appropriately.
  320. */
  321. if (vma->vm->mm.color_adjust == NULL)
  322. return true;
  323. /* Only valid to be called on an already inserted vma */
  324. GEM_BUG_ON(!drm_mm_node_allocated(node));
  325. GEM_BUG_ON(list_empty(&node->node_list));
  326. other = list_prev_entry(node, node_list);
  327. if (color_differs(other, cache_level) && !drm_mm_hole_follows(other))
  328. return false;
  329. other = list_next_entry(node, node_list);
  330. if (color_differs(other, cache_level) && !drm_mm_hole_follows(node))
  331. return false;
  332. return true;
  333. }
  334. /**
  335. * i915_vma_insert - finds a slot for the vma in its address space
  336. * @vma: the vma
  337. * @size: requested size in bytes (can be larger than the VMA)
  338. * @alignment: required alignment
  339. * @flags: mask of PIN_* flags to use
  340. *
  341. * First we try to allocate some free space that meets the requirements for
  342. * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
  343. * preferrably the oldest idle entry to make room for the new VMA.
  344. *
  345. * Returns:
  346. * 0 on success, negative error code otherwise.
  347. */
  348. static int
  349. i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
  350. {
  351. struct drm_i915_private *dev_priv = vma->vm->i915;
  352. struct drm_i915_gem_object *obj = vma->obj;
  353. u64 start, end;
  354. int ret;
  355. GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
  356. GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
  357. size = max(size, vma->size);
  358. alignment = max(alignment, vma->display_alignment);
  359. if (flags & PIN_MAPPABLE) {
  360. size = max_t(typeof(size), size, vma->fence_size);
  361. alignment = max_t(typeof(alignment),
  362. alignment, vma->fence_alignment);
  363. }
  364. GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
  365. GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
  366. GEM_BUG_ON(!is_power_of_2(alignment));
  367. start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
  368. GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
  369. end = vma->vm->total;
  370. if (flags & PIN_MAPPABLE)
  371. end = min_t(u64, end, dev_priv->ggtt.mappable_end);
  372. if (flags & PIN_ZONE_4G)
  373. end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
  374. GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
  375. /* If binding the object/GGTT view requires more space than the entire
  376. * aperture has, reject it early before evicting everything in a vain
  377. * attempt to find space.
  378. */
  379. if (size > end) {
  380. DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
  381. size, obj->base.size,
  382. flags & PIN_MAPPABLE ? "mappable" : "total",
  383. end);
  384. return -ENOSPC;
  385. }
  386. ret = i915_gem_object_pin_pages(obj);
  387. if (ret)
  388. return ret;
  389. if (flags & PIN_OFFSET_FIXED) {
  390. u64 offset = flags & PIN_OFFSET_MASK;
  391. if (!IS_ALIGNED(offset, alignment) ||
  392. range_overflows(offset, size, end)) {
  393. ret = -EINVAL;
  394. goto err_unpin;
  395. }
  396. ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
  397. size, offset, obj->cache_level,
  398. flags);
  399. if (ret)
  400. goto err_unpin;
  401. } else {
  402. ret = i915_gem_gtt_insert(vma->vm, &vma->node,
  403. size, alignment, obj->cache_level,
  404. start, end, flags);
  405. if (ret)
  406. goto err_unpin;
  407. GEM_BUG_ON(vma->node.start < start);
  408. GEM_BUG_ON(vma->node.start + vma->node.size > end);
  409. }
  410. GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
  411. GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
  412. list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
  413. list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
  414. obj->bind_count++;
  415. GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
  416. return 0;
  417. err_unpin:
  418. i915_gem_object_unpin_pages(obj);
  419. return ret;
  420. }
  421. static void
  422. i915_vma_remove(struct i915_vma *vma)
  423. {
  424. struct drm_i915_gem_object *obj = vma->obj;
  425. GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
  426. GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
  427. drm_mm_remove_node(&vma->node);
  428. list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
  429. /* Since the unbound list is global, only move to that list if
  430. * no more VMAs exist.
  431. */
  432. if (--obj->bind_count == 0)
  433. list_move_tail(&obj->global_link,
  434. &to_i915(obj->base.dev)->mm.unbound_list);
  435. /* And finally now the object is completely decoupled from this vma,
  436. * we can drop its hold on the backing storage and allow it to be
  437. * reaped by the shrinker.
  438. */
  439. i915_gem_object_unpin_pages(obj);
  440. GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
  441. }
  442. int __i915_vma_do_pin(struct i915_vma *vma,
  443. u64 size, u64 alignment, u64 flags)
  444. {
  445. const unsigned int bound = vma->flags;
  446. int ret;
  447. lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
  448. GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
  449. GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
  450. if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
  451. ret = -EBUSY;
  452. goto err_unpin;
  453. }
  454. if ((bound & I915_VMA_BIND_MASK) == 0) {
  455. ret = i915_vma_insert(vma, size, alignment, flags);
  456. if (ret)
  457. goto err_unpin;
  458. }
  459. ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
  460. if (ret)
  461. goto err_remove;
  462. if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
  463. __i915_vma_set_map_and_fenceable(vma);
  464. GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
  465. GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
  466. return 0;
  467. err_remove:
  468. if ((bound & I915_VMA_BIND_MASK) == 0) {
  469. GEM_BUG_ON(vma->pages);
  470. i915_vma_remove(vma);
  471. }
  472. err_unpin:
  473. __i915_vma_unpin(vma);
  474. return ret;
  475. }
  476. static void i915_vma_destroy(struct i915_vma *vma)
  477. {
  478. int i;
  479. GEM_BUG_ON(vma->node.allocated);
  480. GEM_BUG_ON(i915_vma_is_active(vma));
  481. GEM_BUG_ON(!i915_vma_is_closed(vma));
  482. GEM_BUG_ON(vma->fence);
  483. for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
  484. GEM_BUG_ON(i915_gem_active_isset(&vma->last_read[i]));
  485. GEM_BUG_ON(i915_gem_active_isset(&vma->last_fence));
  486. list_del(&vma->vm_link);
  487. if (!i915_vma_is_ggtt(vma))
  488. i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
  489. kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
  490. }
  491. void i915_vma_close(struct i915_vma *vma)
  492. {
  493. GEM_BUG_ON(i915_vma_is_closed(vma));
  494. vma->flags |= I915_VMA_CLOSED;
  495. list_del(&vma->obj_link);
  496. rb_erase(&vma->obj_node, &vma->obj->vma_tree);
  497. if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
  498. WARN_ON(i915_vma_unbind(vma));
  499. }
  500. static void __i915_vma_iounmap(struct i915_vma *vma)
  501. {
  502. GEM_BUG_ON(i915_vma_is_pinned(vma));
  503. if (vma->iomap == NULL)
  504. return;
  505. io_mapping_unmap(vma->iomap);
  506. vma->iomap = NULL;
  507. }
  508. int i915_vma_unbind(struct i915_vma *vma)
  509. {
  510. struct drm_i915_gem_object *obj = vma->obj;
  511. unsigned long active;
  512. int ret;
  513. lockdep_assert_held(&obj->base.dev->struct_mutex);
  514. /* First wait upon any activity as retiring the request may
  515. * have side-effects such as unpinning or even unbinding this vma.
  516. */
  517. active = i915_vma_get_active(vma);
  518. if (active) {
  519. int idx;
  520. /* When a closed VMA is retired, it is unbound - eek.
  521. * In order to prevent it from being recursively closed,
  522. * take a pin on the vma so that the second unbind is
  523. * aborted.
  524. *
  525. * Even more scary is that the retire callback may free
  526. * the object (last active vma). To prevent the explosion
  527. * we defer the actual object free to a worker that can
  528. * only proceed once it acquires the struct_mutex (which
  529. * we currently hold, therefore it cannot free this object
  530. * before we are finished).
  531. */
  532. __i915_vma_pin(vma);
  533. for_each_active(active, idx) {
  534. ret = i915_gem_active_retire(&vma->last_read[idx],
  535. &vma->vm->i915->drm.struct_mutex);
  536. if (ret)
  537. break;
  538. }
  539. if (!ret) {
  540. ret = i915_gem_active_retire(&vma->last_fence,
  541. &vma->vm->i915->drm.struct_mutex);
  542. }
  543. __i915_vma_unpin(vma);
  544. if (ret)
  545. return ret;
  546. }
  547. GEM_BUG_ON(i915_vma_is_active(vma));
  548. if (i915_vma_is_pinned(vma))
  549. return -EBUSY;
  550. if (!drm_mm_node_allocated(&vma->node))
  551. goto destroy;
  552. GEM_BUG_ON(obj->bind_count == 0);
  553. GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
  554. if (i915_vma_is_map_and_fenceable(vma)) {
  555. /* release the fence reg _after_ flushing */
  556. ret = i915_vma_put_fence(vma);
  557. if (ret)
  558. return ret;
  559. /* Force a pagefault for domain tracking on next user access */
  560. i915_gem_release_mmap(obj);
  561. __i915_vma_iounmap(vma);
  562. vma->flags &= ~I915_VMA_CAN_FENCE;
  563. }
  564. if (likely(!vma->vm->closed)) {
  565. trace_i915_vma_unbind(vma);
  566. vma->vm->unbind_vma(vma);
  567. }
  568. vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
  569. if (vma->pages != obj->mm.pages) {
  570. GEM_BUG_ON(!vma->pages);
  571. sg_free_table(vma->pages);
  572. kfree(vma->pages);
  573. }
  574. vma->pages = NULL;
  575. i915_vma_remove(vma);
  576. destroy:
  577. if (unlikely(i915_vma_is_closed(vma)))
  578. i915_vma_destroy(vma);
  579. return 0;
  580. }
  581. #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
  582. #include "selftests/i915_vma.c"
  583. #endif