|
@@ -441,6 +441,21 @@ static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
|
|
|
ttm_bo_unref(&bo);
|
|
|
}
|
|
|
|
|
|
+static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
|
|
|
+ enum ttm_ref_type ref_type)
|
|
|
+{
|
|
|
+ struct vmw_user_dma_buffer *user_bo;
|
|
|
+ user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base);
|
|
|
+
|
|
|
+ switch (ref_type) {
|
|
|
+ case TTM_REF_SYNCCPU_WRITE:
|
|
|
+ ttm_bo_synccpu_write_release(&user_bo->dma.base);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ BUG();
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* vmw_user_dmabuf_alloc - Allocate a user dma buffer
|
|
|
*
|
|
@@ -484,7 +499,8 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
|
|
|
&user_bo->prime,
|
|
|
shareable,
|
|
|
ttm_buffer_type,
|
|
|
- &vmw_user_dmabuf_release, NULL);
|
|
|
+ &vmw_user_dmabuf_release,
|
|
|
+ &vmw_user_dmabuf_ref_obj_release);
|
|
|
if (unlikely(ret != 0)) {
|
|
|
ttm_bo_unref(&tmp);
|
|
|
goto out_no_base_object;
|
|
@@ -517,6 +533,130 @@ int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
|
|
|
vmw_user_bo->prime.base.shareable) ? 0 : -EPERM;
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
|
|
|
+ * access, idling previous GPU operations on the buffer and optionally
|
|
|
+ * blocking it for further command submissions.
|
|
|
+ *
|
|
|
+ * @user_bo: Pointer to the buffer object being grabbed for CPU access
|
|
|
+ * @tfile: Identifying the caller.
|
|
|
+ * @flags: Flags indicating how the grab should be performed.
|
|
|
+ *
|
|
|
+ * A blocking grab will be automatically released when @tfile is closed.
|
|
|
+ */
|
|
|
+static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
|
|
|
+ struct ttm_object_file *tfile,
|
|
|
+ uint32_t flags)
|
|
|
+{
|
|
|
+ struct ttm_buffer_object *bo = &user_bo->dma.base;
|
|
|
+ bool existed;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ if (flags & drm_vmw_synccpu_allow_cs) {
|
|
|
+ struct ttm_bo_device *bdev = bo->bdev;
|
|
|
+
|
|
|
+ spin_lock(&bdev->fence_lock);
|
|
|
+ ret = ttm_bo_wait(bo, false, true,
|
|
|
+ !!(flags & drm_vmw_synccpu_dontblock));
|
|
|
+ spin_unlock(&bdev->fence_lock);
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+
|
|
|
+ ret = ttm_bo_synccpu_write_grab
|
|
|
+ (bo, !!(flags & drm_vmw_synccpu_dontblock));
|
|
|
+ if (unlikely(ret != 0))
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
|
|
|
+ TTM_REF_SYNCCPU_WRITE, &existed);
|
|
|
+ if (ret != 0 || existed)
|
|
|
+ ttm_bo_synccpu_write_release(&user_bo->dma.base);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
|
|
|
+ * and unblock command submission on the buffer if blocked.
|
|
|
+ *
|
|
|
+ * @handle: Handle identifying the buffer object.
|
|
|
+ * @tfile: Identifying the caller.
|
|
|
+ * @flags: Flags indicating the type of release.
|
|
|
+ */
|
|
|
+static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
|
|
|
+ struct ttm_object_file *tfile,
|
|
|
+ uint32_t flags)
|
|
|
+{
|
|
|
+ if (!(flags & drm_vmw_synccpu_allow_cs))
|
|
|
+ return ttm_ref_object_base_unref(tfile, handle,
|
|
|
+ TTM_REF_SYNCCPU_WRITE);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
|
|
|
+ * functionality.
|
|
|
+ *
|
|
|
+ * @dev: Identifies the drm device.
|
|
|
+ * @data: Pointer to the ioctl argument.
|
|
|
+ * @file_priv: Identifies the caller.
|
|
|
+ *
|
|
|
+ * This function checks the ioctl arguments for validity and calls the
|
|
|
+ * relevant synccpu functions.
|
|
|
+ */
|
|
|
+int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
|
|
|
+ struct drm_file *file_priv)
|
|
|
+{
|
|
|
+ struct drm_vmw_synccpu_arg *arg =
|
|
|
+ (struct drm_vmw_synccpu_arg *) data;
|
|
|
+ struct vmw_dma_buffer *dma_buf;
|
|
|
+ struct vmw_user_dma_buffer *user_bo;
|
|
|
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
|
|
|
+ || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
|
|
|
+ drm_vmw_synccpu_dontblock |
|
|
|
+ drm_vmw_synccpu_allow_cs)) != 0) {
|
|
|
+ DRM_ERROR("Illegal synccpu flags.\n");
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ switch (arg->op) {
|
|
|
+ case drm_vmw_synccpu_grab:
|
|
|
+ ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf);
|
|
|
+ if (unlikely(ret != 0))
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ user_bo = container_of(dma_buf, struct vmw_user_dma_buffer,
|
|
|
+ dma);
|
|
|
+ ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
|
|
|
+ vmw_dmabuf_unreference(&dma_buf);
|
|
|
+ if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
|
|
|
+ ret != -EBUSY)) {
|
|
|
+ DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
|
|
|
+ (unsigned int) arg->handle);
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ case drm_vmw_synccpu_release:
|
|
|
+ ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile,
|
|
|
+ arg->flags);
|
|
|
+ if (unlikely(ret != 0)) {
|
|
|
+ DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
|
|
|
+ (unsigned int) arg->handle);
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ DRM_ERROR("Invalid synccpu operation.\n");
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
|
|
|
struct drm_file *file_priv)
|
|
|
{
|