i915_vma.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689
  1. /*
  2. * Copyright © 2016 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. */
  24. #include "i915_vma.h"
  25. #include "i915_drv.h"
  26. #include "intel_ringbuffer.h"
  27. #include "intel_frontbuffer.h"
  28. #include <drm/drm_gem.h>
  29. static void
  30. i915_vma_retire(struct i915_gem_active *active,
  31. struct drm_i915_gem_request *rq)
  32. {
  33. const unsigned int idx = rq->engine->id;
  34. struct i915_vma *vma =
  35. container_of(active, struct i915_vma, last_read[idx]);
  36. struct drm_i915_gem_object *obj = vma->obj;
  37. GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));
  38. i915_vma_clear_active(vma, idx);
  39. if (i915_vma_is_active(vma))
  40. return;
  41. GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
  42. list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
  43. if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
  44. WARN_ON(i915_vma_unbind(vma));
  45. GEM_BUG_ON(!i915_gem_object_is_active(obj));
  46. if (--obj->active_count)
  47. return;
  48. /* Bump our place on the bound list to keep it roughly in LRU order
  49. * so that we don't steal from recently used but inactive objects
  50. * (unless we are forced to ofc!)
  51. */
  52. if (obj->bind_count)
  53. list_move_tail(&obj->global_link, &rq->i915->mm.bound_list);
  54. obj->mm.dirty = true; /* be paranoid */
  55. if (i915_gem_object_has_active_reference(obj)) {
  56. i915_gem_object_clear_active_reference(obj);
  57. i915_gem_object_put(obj);
  58. }
  59. }
  60. static struct i915_vma *
  61. vma_create(struct drm_i915_gem_object *obj,
  62. struct i915_address_space *vm,
  63. const struct i915_ggtt_view *view)
  64. {
  65. struct i915_vma *vma;
  66. struct rb_node *rb, **p;
  67. int i;
  68. vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL);
  69. if (vma == NULL)
  70. return ERR_PTR(-ENOMEM);
  71. INIT_LIST_HEAD(&vma->exec_list);
  72. for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
  73. init_request_active(&vma->last_read[i], i915_vma_retire);
  74. init_request_active(&vma->last_fence, NULL);
  75. vma->vm = vm;
  76. vma->obj = obj;
  77. vma->size = obj->base.size;
  78. vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
  79. if (view) {
  80. vma->ggtt_view = *view;
  81. if (view->type == I915_GGTT_VIEW_PARTIAL) {
  82. GEM_BUG_ON(range_overflows_t(u64,
  83. view->partial.offset,
  84. view->partial.size,
  85. obj->base.size >> PAGE_SHIFT));
  86. vma->size = view->partial.size;
  87. vma->size <<= PAGE_SHIFT;
  88. GEM_BUG_ON(vma->size >= obj->base.size);
  89. } else if (view->type == I915_GGTT_VIEW_ROTATED) {
  90. vma->size = intel_rotation_info_size(&view->rotated);
  91. vma->size <<= PAGE_SHIFT;
  92. }
  93. }
  94. if (unlikely(vma->size > vm->total))
  95. goto err_vma;
  96. GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
  97. if (i915_is_ggtt(vm)) {
  98. if (unlikely(overflows_type(vma->size, u32)))
  99. goto err_vma;
  100. vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
  101. i915_gem_object_get_tiling(obj),
  102. i915_gem_object_get_stride(obj));
  103. if (unlikely(vma->fence_size < vma->size || /* overflow */
  104. vma->fence_size > vm->total))
  105. goto err_vma;
  106. GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
  107. vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
  108. i915_gem_object_get_tiling(obj),
  109. i915_gem_object_get_stride(obj));
  110. GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
  111. vma->flags |= I915_VMA_GGTT;
  112. list_add(&vma->obj_link, &obj->vma_list);
  113. } else {
  114. i915_ppgtt_get(i915_vm_to_ppgtt(vm));
  115. list_add_tail(&vma->obj_link, &obj->vma_list);
  116. }
  117. rb = NULL;
  118. p = &obj->vma_tree.rb_node;
  119. while (*p) {
  120. struct i915_vma *pos;
  121. rb = *p;
  122. pos = rb_entry(rb, struct i915_vma, obj_node);
  123. if (i915_vma_compare(pos, vm, view) < 0)
  124. p = &rb->rb_right;
  125. else
  126. p = &rb->rb_left;
  127. }
  128. rb_link_node(&vma->obj_node, rb, p);
  129. rb_insert_color(&vma->obj_node, &obj->vma_tree);
  130. list_add(&vma->vm_link, &vm->unbound_list);
  131. return vma;
  132. err_vma:
  133. kmem_cache_free(vm->i915->vmas, vma);
  134. return ERR_PTR(-E2BIG);
  135. }
  136. static struct i915_vma *
  137. vma_lookup(struct drm_i915_gem_object *obj,
  138. struct i915_address_space *vm,
  139. const struct i915_ggtt_view *view)
  140. {
  141. struct rb_node *rb;
  142. rb = obj->vma_tree.rb_node;
  143. while (rb) {
  144. struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
  145. long cmp;
  146. cmp = i915_vma_compare(vma, vm, view);
  147. if (cmp == 0)
  148. return vma;
  149. if (cmp < 0)
  150. rb = rb->rb_right;
  151. else
  152. rb = rb->rb_left;
  153. }
  154. return NULL;
  155. }
  156. /**
  157. * i915_vma_instance - return the singleton instance of the VMA
  158. * @obj: parent &struct drm_i915_gem_object to be mapped
  159. * @vm: address space in which the mapping is located
  160. * @view: additional mapping requirements
  161. *
  162. * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
  163. * the same @view characteristics. If a match is not found, one is created.
  164. * Once created, the VMA is kept until either the object is freed, or the
  165. * address space is closed.
  166. *
  167. * Must be called with struct_mutex held.
  168. *
  169. * Returns the vma, or an error pointer.
  170. */
  171. struct i915_vma *
  172. i915_vma_instance(struct drm_i915_gem_object *obj,
  173. struct i915_address_space *vm,
  174. const struct i915_ggtt_view *view)
  175. {
  176. struct i915_vma *vma;
  177. lockdep_assert_held(&obj->base.dev->struct_mutex);
  178. GEM_BUG_ON(view && !i915_is_ggtt(vm));
  179. GEM_BUG_ON(vm->closed);
  180. vma = vma_lookup(obj, vm, view);
  181. if (!vma)
  182. vma = vma_create(obj, vm, view);
  183. GEM_BUG_ON(!IS_ERR(vma) && i915_vma_is_closed(vma));
  184. GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
  185. GEM_BUG_ON(!IS_ERR(vma) && vma_lookup(obj, vm, view) != vma);
  186. return vma;
  187. }
  188. /**
  189. * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
  190. * @vma: VMA to map
  191. * @cache_level: mapping cache level
  192. * @flags: flags like global or local mapping
  193. *
  194. * DMA addresses are taken from the scatter-gather table of this object (or of
  195. * this VMA in case of non-default GGTT views) and PTE entries set up.
  196. * Note that DMA addresses are also the only part of the SG table we care about.
  197. */
  198. int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
  199. u32 flags)
  200. {
  201. u32 bind_flags;
  202. u32 vma_flags;
  203. int ret;
  204. if (WARN_ON(flags == 0))
  205. return -EINVAL;
  206. bind_flags = 0;
  207. if (flags & PIN_GLOBAL)
  208. bind_flags |= I915_VMA_GLOBAL_BIND;
  209. if (flags & PIN_USER)
  210. bind_flags |= I915_VMA_LOCAL_BIND;
  211. vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
  212. if (flags & PIN_UPDATE)
  213. bind_flags |= vma_flags;
  214. else
  215. bind_flags &= ~vma_flags;
  216. if (bind_flags == 0)
  217. return 0;
  218. if (GEM_WARN_ON(range_overflows(vma->node.start,
  219. vma->node.size,
  220. vma->vm->total)))
  221. return -ENODEV;
  222. if (vma_flags == 0 && vma->vm->allocate_va_range) {
  223. trace_i915_va_alloc(vma);
  224. ret = vma->vm->allocate_va_range(vma->vm,
  225. vma->node.start,
  226. vma->node.size);
  227. if (ret)
  228. return ret;
  229. }
  230. trace_i915_vma_bind(vma, bind_flags);
  231. ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
  232. if (ret)
  233. return ret;
  234. vma->flags |= bind_flags;
  235. return 0;
  236. }
  237. void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
  238. {
  239. void __iomem *ptr;
  240. /* Access through the GTT requires the device to be awake. */
  241. assert_rpm_wakelock_held(vma->vm->i915);
  242. lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
  243. if (WARN_ON(!i915_vma_is_map_and_fenceable(vma)))
  244. return IO_ERR_PTR(-ENODEV);
  245. GEM_BUG_ON(!i915_vma_is_ggtt(vma));
  246. GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
  247. ptr = vma->iomap;
  248. if (ptr == NULL) {
  249. ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable,
  250. vma->node.start,
  251. vma->node.size);
  252. if (ptr == NULL)
  253. return IO_ERR_PTR(-ENOMEM);
  254. vma->iomap = ptr;
  255. }
  256. __i915_vma_pin(vma);
  257. return ptr;
  258. }
  259. void i915_vma_unpin_and_release(struct i915_vma **p_vma)
  260. {
  261. struct i915_vma *vma;
  262. struct drm_i915_gem_object *obj;
  263. vma = fetch_and_zero(p_vma);
  264. if (!vma)
  265. return;
  266. obj = vma->obj;
  267. i915_vma_unpin(vma);
  268. i915_vma_close(vma);
  269. __i915_gem_object_release_unless_active(obj);
  270. }
  271. bool
  272. i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
  273. {
  274. if (!drm_mm_node_allocated(&vma->node))
  275. return false;
  276. if (vma->node.size < size)
  277. return true;
  278. GEM_BUG_ON(alignment && !is_power_of_2(alignment));
  279. if (alignment && !IS_ALIGNED(vma->node.start, alignment))
  280. return true;
  281. if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
  282. return true;
  283. if (flags & PIN_OFFSET_BIAS &&
  284. vma->node.start < (flags & PIN_OFFSET_MASK))
  285. return true;
  286. if (flags & PIN_OFFSET_FIXED &&
  287. vma->node.start != (flags & PIN_OFFSET_MASK))
  288. return true;
  289. return false;
  290. }
  291. void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
  292. {
  293. bool mappable, fenceable;
  294. GEM_BUG_ON(!i915_vma_is_ggtt(vma));
  295. GEM_BUG_ON(!vma->fence_size);
  296. /*
  297. * Explicitly disable for rotated VMA since the display does not
  298. * need the fence and the VMA is not accessible to other users.
  299. */
  300. if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
  301. return;
  302. fenceable = (vma->node.size >= vma->fence_size &&
  303. IS_ALIGNED(vma->node.start, vma->fence_alignment));
  304. mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
  305. if (mappable && fenceable)
  306. vma->flags |= I915_VMA_CAN_FENCE;
  307. else
  308. vma->flags &= ~I915_VMA_CAN_FENCE;
  309. }
  310. static bool color_differs(struct drm_mm_node *node, unsigned long color)
  311. {
  312. return node->allocated && node->color != color;
  313. }
  314. bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
  315. {
  316. struct drm_mm_node *node = &vma->node;
  317. struct drm_mm_node *other;
  318. /*
  319. * On some machines we have to be careful when putting differing types
  320. * of snoopable memory together to avoid the prefetcher crossing memory
  321. * domains and dying. During vm initialisation, we decide whether or not
  322. * these constraints apply and set the drm_mm.color_adjust
  323. * appropriately.
  324. */
  325. if (vma->vm->mm.color_adjust == NULL)
  326. return true;
  327. /* Only valid to be called on an already inserted vma */
  328. GEM_BUG_ON(!drm_mm_node_allocated(node));
  329. GEM_BUG_ON(list_empty(&node->node_list));
  330. other = list_prev_entry(node, node_list);
  331. if (color_differs(other, cache_level) && !drm_mm_hole_follows(other))
  332. return false;
  333. other = list_next_entry(node, node_list);
  334. if (color_differs(other, cache_level) && !drm_mm_hole_follows(node))
  335. return false;
  336. return true;
  337. }
  338. /**
  339. * i915_vma_insert - finds a slot for the vma in its address space
  340. * @vma: the vma
  341. * @size: requested size in bytes (can be larger than the VMA)
  342. * @alignment: required alignment
  343. * @flags: mask of PIN_* flags to use
  344. *
  345. * First we try to allocate some free space that meets the requirements for
  346. * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
  347. * preferrably the oldest idle entry to make room for the new VMA.
  348. *
  349. * Returns:
  350. * 0 on success, negative error code otherwise.
  351. */
  352. static int
  353. i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
  354. {
  355. struct drm_i915_private *dev_priv = vma->vm->i915;
  356. struct drm_i915_gem_object *obj = vma->obj;
  357. u64 start, end;
  358. int ret;
  359. GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
  360. GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
  361. size = max(size, vma->size);
  362. alignment = max(alignment, vma->display_alignment);
  363. if (flags & PIN_MAPPABLE) {
  364. size = max_t(typeof(size), size, vma->fence_size);
  365. alignment = max_t(typeof(alignment),
  366. alignment, vma->fence_alignment);
  367. }
  368. GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
  369. GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
  370. GEM_BUG_ON(!is_power_of_2(alignment));
  371. start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
  372. GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
  373. end = vma->vm->total;
  374. if (flags & PIN_MAPPABLE)
  375. end = min_t(u64, end, dev_priv->ggtt.mappable_end);
  376. if (flags & PIN_ZONE_4G)
  377. end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
  378. GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
  379. /* If binding the object/GGTT view requires more space than the entire
  380. * aperture has, reject it early before evicting everything in a vain
  381. * attempt to find space.
  382. */
  383. if (size > end) {
  384. DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
  385. size, obj->base.size,
  386. flags & PIN_MAPPABLE ? "mappable" : "total",
  387. end);
  388. return -E2BIG;
  389. }
  390. ret = i915_gem_object_pin_pages(obj);
  391. if (ret)
  392. return ret;
  393. if (flags & PIN_OFFSET_FIXED) {
  394. u64 offset = flags & PIN_OFFSET_MASK;
  395. if (!IS_ALIGNED(offset, alignment) ||
  396. range_overflows(offset, size, end)) {
  397. ret = -EINVAL;
  398. goto err_unpin;
  399. }
  400. ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
  401. size, offset, obj->cache_level,
  402. flags);
  403. if (ret)
  404. goto err_unpin;
  405. } else {
  406. ret = i915_gem_gtt_insert(vma->vm, &vma->node,
  407. size, alignment, obj->cache_level,
  408. start, end, flags);
  409. if (ret)
  410. goto err_unpin;
  411. GEM_BUG_ON(vma->node.start < start);
  412. GEM_BUG_ON(vma->node.start + vma->node.size > end);
  413. }
  414. GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
  415. GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
  416. list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
  417. list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
  418. obj->bind_count++;
  419. GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
  420. return 0;
  421. err_unpin:
  422. i915_gem_object_unpin_pages(obj);
  423. return ret;
  424. }
  425. int __i915_vma_do_pin(struct i915_vma *vma,
  426. u64 size, u64 alignment, u64 flags)
  427. {
  428. unsigned int bound = vma->flags;
  429. int ret;
  430. lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
  431. GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
  432. GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
  433. if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
  434. ret = -EBUSY;
  435. goto err;
  436. }
  437. if ((bound & I915_VMA_BIND_MASK) == 0) {
  438. ret = i915_vma_insert(vma, size, alignment, flags);
  439. if (ret)
  440. goto err;
  441. }
  442. ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
  443. if (ret)
  444. goto err;
  445. if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
  446. __i915_vma_set_map_and_fenceable(vma);
  447. GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
  448. GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
  449. return 0;
  450. err:
  451. __i915_vma_unpin(vma);
  452. return ret;
  453. }
  454. void i915_vma_destroy(struct i915_vma *vma)
  455. {
  456. GEM_BUG_ON(vma->node.allocated);
  457. GEM_BUG_ON(i915_vma_is_active(vma));
  458. GEM_BUG_ON(!i915_vma_is_closed(vma));
  459. GEM_BUG_ON(vma->fence);
  460. list_del(&vma->vm_link);
  461. if (!i915_vma_is_ggtt(vma))
  462. i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
  463. kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
  464. }
  465. void i915_vma_close(struct i915_vma *vma)
  466. {
  467. GEM_BUG_ON(i915_vma_is_closed(vma));
  468. vma->flags |= I915_VMA_CLOSED;
  469. list_del(&vma->obj_link);
  470. rb_erase(&vma->obj_node, &vma->obj->vma_tree);
  471. if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
  472. WARN_ON(i915_vma_unbind(vma));
  473. }
  474. static void __i915_vma_iounmap(struct i915_vma *vma)
  475. {
  476. GEM_BUG_ON(i915_vma_is_pinned(vma));
  477. if (vma->iomap == NULL)
  478. return;
  479. io_mapping_unmap(vma->iomap);
  480. vma->iomap = NULL;
  481. }
  482. int i915_vma_unbind(struct i915_vma *vma)
  483. {
  484. struct drm_i915_gem_object *obj = vma->obj;
  485. unsigned long active;
  486. int ret;
  487. lockdep_assert_held(&obj->base.dev->struct_mutex);
  488. /* First wait upon any activity as retiring the request may
  489. * have side-effects such as unpinning or even unbinding this vma.
  490. */
  491. active = i915_vma_get_active(vma);
  492. if (active) {
  493. int idx;
  494. /* When a closed VMA is retired, it is unbound - eek.
  495. * In order to prevent it from being recursively closed,
  496. * take a pin on the vma so that the second unbind is
  497. * aborted.
  498. *
  499. * Even more scary is that the retire callback may free
  500. * the object (last active vma). To prevent the explosion
  501. * we defer the actual object free to a worker that can
  502. * only proceed once it acquires the struct_mutex (which
  503. * we currently hold, therefore it cannot free this object
  504. * before we are finished).
  505. */
  506. __i915_vma_pin(vma);
  507. for_each_active(active, idx) {
  508. ret = i915_gem_active_retire(&vma->last_read[idx],
  509. &vma->vm->i915->drm.struct_mutex);
  510. if (ret)
  511. break;
  512. }
  513. __i915_vma_unpin(vma);
  514. if (ret)
  515. return ret;
  516. GEM_BUG_ON(i915_vma_is_active(vma));
  517. }
  518. if (i915_vma_is_pinned(vma))
  519. return -EBUSY;
  520. if (!drm_mm_node_allocated(&vma->node))
  521. goto destroy;
  522. GEM_BUG_ON(obj->bind_count == 0);
  523. GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
  524. if (i915_vma_is_map_and_fenceable(vma)) {
  525. /* release the fence reg _after_ flushing */
  526. ret = i915_vma_put_fence(vma);
  527. if (ret)
  528. return ret;
  529. /* Force a pagefault for domain tracking on next user access */
  530. i915_gem_release_mmap(obj);
  531. __i915_vma_iounmap(vma);
  532. vma->flags &= ~I915_VMA_CAN_FENCE;
  533. }
  534. if (likely(!vma->vm->closed)) {
  535. trace_i915_vma_unbind(vma);
  536. vma->vm->unbind_vma(vma);
  537. }
  538. vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
  539. drm_mm_remove_node(&vma->node);
  540. list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
  541. if (vma->pages != obj->mm.pages) {
  542. GEM_BUG_ON(!vma->pages);
  543. sg_free_table(vma->pages);
  544. kfree(vma->pages);
  545. }
  546. vma->pages = NULL;
  547. /* Since the unbound list is global, only move to that list if
  548. * no more VMAs exist. */
  549. if (--obj->bind_count == 0)
  550. list_move_tail(&obj->global_link,
  551. &to_i915(obj->base.dev)->mm.unbound_list);
  552. /* And finally now the object is completely decoupled from this vma,
  553. * we can drop its hold on the backing storage and allow it to be
  554. * reaped by the shrinker.
  555. */
  556. i915_gem_object_unpin_pages(obj);
  557. GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
  558. destroy:
  559. if (unlikely(i915_vma_is_closed(vma)))
  560. i915_vma_destroy(vma);
  561. return 0;
  562. }