|
@@ -26,6 +26,7 @@
|
|
|
#include <drm/drmP.h>
|
|
|
|
|
|
#include "amdgpu.h"
|
|
|
+#include "amdgpu_display.h"
|
|
|
#include <drm/amdgpu_drm.h>
|
|
|
#include <linux/dma-buf.h>
|
|
|
|
|
@@ -164,6 +165,50 @@ struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
|
|
|
return bo->tbo.resv;
|
|
|
}
|
|
|
|
|
|
+static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
|
|
|
+ enum dma_data_direction direction)
|
|
|
+{
|
|
|
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
|
|
|
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
|
|
+ struct ttm_operation_ctx ctx = { true, false };
|
|
|
+ u32 domain = amdgpu_display_framebuffer_domains(adev);
|
|
|
+ int ret;
|
|
|
+ bool reads = (direction == DMA_BIDIRECTIONAL ||
|
|
|
+ direction == DMA_FROM_DEVICE);
|
|
|
+
|
|
|
+ if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ /* move to gtt */
|
|
|
+ ret = amdgpu_bo_reserve(bo, false);
|
|
|
+ if (unlikely(ret != 0))
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
|
|
|
+ amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
|
|
|
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
|
|
|
+ }
|
|
|
+
|
|
|
+ amdgpu_bo_unreserve(bo);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static const struct dma_buf_ops amdgpu_dmabuf_ops = {
|
|
|
+ .attach = drm_gem_map_attach,
|
|
|
+ .detach = drm_gem_map_detach,
|
|
|
+ .map_dma_buf = drm_gem_map_dma_buf,
|
|
|
+ .unmap_dma_buf = drm_gem_unmap_dma_buf,
|
|
|
+ .release = drm_gem_dmabuf_release,
|
|
|
+ .begin_cpu_access = amdgpu_gem_begin_cpu_access,
|
|
|
+ .map = drm_gem_dmabuf_kmap,
|
|
|
+ .map_atomic = drm_gem_dmabuf_kmap_atomic,
|
|
|
+ .unmap = drm_gem_dmabuf_kunmap,
|
|
|
+ .unmap_atomic = drm_gem_dmabuf_kunmap_atomic,
|
|
|
+ .mmap = drm_gem_dmabuf_mmap,
|
|
|
+ .vmap = drm_gem_dmabuf_vmap,
|
|
|
+ .vunmap = drm_gem_dmabuf_vunmap,
|
|
|
+};
|
|
|
+
|
|
|
struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
|
|
|
struct drm_gem_object *gobj,
|
|
|
int flags)
|
|
@@ -176,7 +221,30 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
|
|
|
return ERR_PTR(-EPERM);
|
|
|
|
|
|
buf = drm_gem_prime_export(dev, gobj, flags);
|
|
|
- if (!IS_ERR(buf))
|
|
|
+ if (!IS_ERR(buf)) {
|
|
|
buf->file->f_mapping = dev->anon_inode->i_mapping;
|
|
|
+ buf->ops = &amdgpu_dmabuf_ops;
|
|
|
+ }
|
|
|
+
|
|
|
return buf;
|
|
|
}
|
|
|
+
|
|
|
+struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
|
|
|
+ struct dma_buf *dma_buf)
|
|
|
+{
|
|
|
+ struct drm_gem_object *obj;
|
|
|
+
|
|
|
+ if (dma_buf->ops == &amdgpu_dmabuf_ops) {
|
|
|
+ obj = dma_buf->priv;
|
|
|
+ if (obj->dev == dev) {
|
|
|
+ /*
|
|
|
+ * Importing dmabuf exported from out own gem increases
|
|
|
+ * refcount on gem itself instead of f_count of dmabuf.
|
|
|
+ */
|
|
|
+ drm_gem_object_get(obj);
|
|
|
+ return obj;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return drm_gem_prime_import(dev, dma_buf);
|
|
|
+}
|