vmwgfx_dmabuf.c 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376
  1. // SPDX-License-Identifier: GPL-2.0 OR MIT
  2. /**************************************************************************
  3. *
  4. * Copyright 2011-2015 VMware, Inc., Palo Alto, CA., USA
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include <drm/ttm/ttm_placement.h>
  28. #include <drm/drmP.h>
  29. #include "vmwgfx_drv.h"
  30. /**
  31. * vmw_dmabuf_pin_in_placement - Validate a buffer to placement.
  32. *
  33. * @dev_priv: Driver private.
  34. * @buf: DMA buffer to move.
  35. * @placement: The placement to pin it.
  36. * @interruptible: Use interruptible wait.
  37. *
  38. * Returns
  39. * -ERESTARTSYS if interrupted by a signal.
  40. */
  41. int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
  42. struct vmw_dma_buffer *buf,
  43. struct ttm_placement *placement,
  44. bool interruptible)
  45. {
  46. struct ttm_operation_ctx ctx = {interruptible, false };
  47. struct ttm_buffer_object *bo = &buf->base;
  48. int ret;
  49. uint32_t new_flags;
  50. ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
  51. if (unlikely(ret != 0))
  52. return ret;
  53. vmw_execbuf_release_pinned_bo(dev_priv);
  54. ret = ttm_bo_reserve(bo, interruptible, false, NULL);
  55. if (unlikely(ret != 0))
  56. goto err;
  57. if (buf->pin_count > 0)
  58. ret = ttm_bo_mem_compat(placement, &bo->mem,
  59. &new_flags) == true ? 0 : -EINVAL;
  60. else
  61. ret = ttm_bo_validate(bo, placement, &ctx);
  62. if (!ret)
  63. vmw_bo_pin_reserved(buf, true);
  64. ttm_bo_unreserve(bo);
  65. err:
  66. ttm_write_unlock(&dev_priv->reservation_sem);
  67. return ret;
  68. }
  69. /**
  70. * vmw_dmabuf_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
  71. *
  72. * This function takes the reservation_sem in write mode.
  73. * Flushes and unpins the query bo to avoid failures.
  74. *
  75. * @dev_priv: Driver private.
  76. * @buf: DMA buffer to move.
  77. * @pin: Pin buffer if true.
  78. * @interruptible: Use interruptible wait.
  79. *
  80. * Returns
  81. * -ERESTARTSYS if interrupted by a signal.
  82. */
  83. int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
  84. struct vmw_dma_buffer *buf,
  85. bool interruptible)
  86. {
  87. struct ttm_operation_ctx ctx = {interruptible, false };
  88. struct ttm_buffer_object *bo = &buf->base;
  89. int ret;
  90. uint32_t new_flags;
  91. ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
  92. if (unlikely(ret != 0))
  93. return ret;
  94. vmw_execbuf_release_pinned_bo(dev_priv);
  95. ret = ttm_bo_reserve(bo, interruptible, false, NULL);
  96. if (unlikely(ret != 0))
  97. goto err;
  98. if (buf->pin_count > 0) {
  99. ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
  100. &new_flags) == true ? 0 : -EINVAL;
  101. goto out_unreserve;
  102. }
  103. ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
  104. if (likely(ret == 0) || ret == -ERESTARTSYS)
  105. goto out_unreserve;
  106. ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
  107. out_unreserve:
  108. if (!ret)
  109. vmw_bo_pin_reserved(buf, true);
  110. ttm_bo_unreserve(bo);
  111. err:
  112. ttm_write_unlock(&dev_priv->reservation_sem);
  113. return ret;
  114. }
  115. /**
  116. * vmw_dmabuf_pin_in_vram - Move a buffer to vram.
  117. *
  118. * This function takes the reservation_sem in write mode.
  119. * Flushes and unpins the query bo to avoid failures.
  120. *
  121. * @dev_priv: Driver private.
  122. * @buf: DMA buffer to move.
  123. * @interruptible: Use interruptible wait.
  124. *
  125. * Returns
  126. * -ERESTARTSYS if interrupted by a signal.
  127. */
  128. int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
  129. struct vmw_dma_buffer *buf,
  130. bool interruptible)
  131. {
  132. return vmw_dmabuf_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
  133. interruptible);
  134. }
  135. /**
  136. * vmw_dmabuf_pin_in_start_of_vram - Move a buffer to start of vram.
  137. *
  138. * This function takes the reservation_sem in write mode.
  139. * Flushes and unpins the query bo to avoid failures.
  140. *
  141. * @dev_priv: Driver private.
  142. * @buf: DMA buffer to pin.
  143. * @interruptible: Use interruptible wait.
  144. *
  145. * Returns
  146. * -ERESTARTSYS if interrupted by a signal.
  147. */
  148. int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
  149. struct vmw_dma_buffer *buf,
  150. bool interruptible)
  151. {
  152. struct ttm_operation_ctx ctx = {interruptible, false };
  153. struct ttm_buffer_object *bo = &buf->base;
  154. struct ttm_placement placement;
  155. struct ttm_place place;
  156. int ret = 0;
  157. uint32_t new_flags;
  158. place = vmw_vram_placement.placement[0];
  159. place.lpfn = bo->num_pages;
  160. placement.num_placement = 1;
  161. placement.placement = &place;
  162. placement.num_busy_placement = 1;
  163. placement.busy_placement = &place;
  164. ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
  165. if (unlikely(ret != 0))
  166. return ret;
  167. vmw_execbuf_release_pinned_bo(dev_priv);
  168. ret = ttm_bo_reserve(bo, interruptible, false, NULL);
  169. if (unlikely(ret != 0))
  170. goto err_unlock;
  171. /*
  172. * Is this buffer already in vram but not at the start of it?
  173. * In that case, evict it first because TTM isn't good at handling
  174. * that situation.
  175. */
  176. if (bo->mem.mem_type == TTM_PL_VRAM &&
  177. bo->mem.start < bo->num_pages &&
  178. bo->mem.start > 0 &&
  179. buf->pin_count == 0) {
  180. ctx.interruptible = false;
  181. (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
  182. }
  183. if (buf->pin_count > 0)
  184. ret = ttm_bo_mem_compat(&placement, &bo->mem,
  185. &new_flags) == true ? 0 : -EINVAL;
  186. else
  187. ret = ttm_bo_validate(bo, &placement, &ctx);
  188. /* For some reason we didn't end up at the start of vram */
  189. WARN_ON(ret == 0 && bo->offset != 0);
  190. if (!ret)
  191. vmw_bo_pin_reserved(buf, true);
  192. ttm_bo_unreserve(bo);
  193. err_unlock:
  194. ttm_write_unlock(&dev_priv->reservation_sem);
  195. return ret;
  196. }
  197. /**
  198. * vmw_dmabuf_unpin - Unpin the buffer given buffer, does not move the buffer.
  199. *
  200. * This function takes the reservation_sem in write mode.
  201. *
  202. * @dev_priv: Driver private.
  203. * @buf: DMA buffer to unpin.
  204. * @interruptible: Use interruptible wait.
  205. *
  206. * Returns
  207. * -ERESTARTSYS if interrupted by a signal.
  208. */
  209. int vmw_dmabuf_unpin(struct vmw_private *dev_priv,
  210. struct vmw_dma_buffer *buf,
  211. bool interruptible)
  212. {
  213. struct ttm_buffer_object *bo = &buf->base;
  214. int ret;
  215. ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
  216. if (unlikely(ret != 0))
  217. return ret;
  218. ret = ttm_bo_reserve(bo, interruptible, false, NULL);
  219. if (unlikely(ret != 0))
  220. goto err;
  221. vmw_bo_pin_reserved(buf, false);
  222. ttm_bo_unreserve(bo);
  223. err:
  224. ttm_read_unlock(&dev_priv->reservation_sem);
  225. return ret;
  226. }
  227. /**
  228. * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
  229. * of a buffer.
  230. *
  231. * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
  232. * @ptr: SVGAGuestPtr returning the result.
  233. */
  234. void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
  235. SVGAGuestPtr *ptr)
  236. {
  237. if (bo->mem.mem_type == TTM_PL_VRAM) {
  238. ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
  239. ptr->offset = bo->offset;
  240. } else {
  241. ptr->gmrId = bo->mem.start;
  242. ptr->offset = 0;
  243. }
  244. }
  245. /**
  246. * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
  247. *
  248. * @vbo: The buffer object. Must be reserved.
  249. * @pin: Whether to pin or unpin.
  250. *
  251. */
  252. void vmw_bo_pin_reserved(struct vmw_dma_buffer *vbo, bool pin)
  253. {
  254. struct ttm_operation_ctx ctx = { false, true };
  255. struct ttm_place pl;
  256. struct ttm_placement placement;
  257. struct ttm_buffer_object *bo = &vbo->base;
  258. uint32_t old_mem_type = bo->mem.mem_type;
  259. int ret;
  260. lockdep_assert_held(&bo->resv->lock.base);
  261. if (pin) {
  262. if (vbo->pin_count++ > 0)
  263. return;
  264. } else {
  265. WARN_ON(vbo->pin_count <= 0);
  266. if (--vbo->pin_count > 0)
  267. return;
  268. }
  269. pl.fpfn = 0;
  270. pl.lpfn = 0;
  271. pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
  272. | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
  273. if (pin)
  274. pl.flags |= TTM_PL_FLAG_NO_EVICT;
  275. memset(&placement, 0, sizeof(placement));
  276. placement.num_placement = 1;
  277. placement.placement = &pl;
  278. ret = ttm_bo_validate(bo, &placement, &ctx);
  279. BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
  280. }
  281. /*
  282. * vmw_dma_buffer_unmap - Tear down a cached buffer object map.
  283. *
  284. * @vbo: The buffer object whose map we are tearing down.
  285. *
  286. * This function tears down a cached map set up using
  287. * vmw_dma_buffer_map_and_cache().
  288. */
  289. void vmw_dma_buffer_unmap(struct vmw_dma_buffer *vbo)
  290. {
  291. if (vbo->map.bo == NULL)
  292. return;
  293. ttm_bo_kunmap(&vbo->map);
  294. }
  295. /*
  296. * vmw_dma_buffer_map_and_cache - Map a buffer object and cache the map
  297. *
  298. * @vbo: The buffer object to map
  299. * Return: A kernel virtual address or NULL if mapping failed.
  300. *
  301. * This function maps a buffer object into the kernel address space, or
  302. * returns the virtual kernel address of an already existing map. The virtual
  303. * address remains valid as long as the buffer object is pinned or reserved.
  304. * The cached map is torn down on either
  305. * 1) Buffer object move
  306. * 2) Buffer object swapout
  307. * 3) Buffer object destruction
  308. *
  309. */
  310. void *vmw_dma_buffer_map_and_cache(struct vmw_dma_buffer *vbo)
  311. {
  312. struct ttm_buffer_object *bo = &vbo->base;
  313. bool not_used;
  314. void *virtual;
  315. int ret;
  316. virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
  317. if (virtual)
  318. return virtual;
  319. ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
  320. if (ret)
  321. DRM_ERROR("Buffer object map failed: %d.\n", ret);
  322. return ttm_kmap_obj_virtual(&vbo->map, &not_used);
  323. }