|
@@ -41,7 +41,6 @@ struct vmw_user_surface {
|
|
|
struct ttm_prime_object prime;
|
|
|
struct vmw_surface srf;
|
|
|
uint32_t size;
|
|
|
- uint32_t backup_handle;
|
|
|
};
|
|
|
|
|
|
/**
|
|
@@ -68,6 +67,14 @@ static int vmw_legacy_srf_unbind(struct vmw_resource *res,
|
|
|
struct ttm_validate_buffer *val_buf);
|
|
|
static int vmw_legacy_srf_create(struct vmw_resource *res);
|
|
|
static int vmw_legacy_srf_destroy(struct vmw_resource *res);
|
|
|
+static int vmw_gb_surface_create(struct vmw_resource *res);
|
|
|
+static int vmw_gb_surface_bind(struct vmw_resource *res,
|
|
|
+ struct ttm_validate_buffer *val_buf);
|
|
|
+static int vmw_gb_surface_unbind(struct vmw_resource *res,
|
|
|
+ bool readback,
|
|
|
+ struct ttm_validate_buffer *val_buf);
|
|
|
+static int vmw_gb_surface_destroy(struct vmw_resource *res);
|
|
|
+
|
|
|
|
|
|
static const struct vmw_user_resource_conv user_surface_conv = {
|
|
|
.object_type = VMW_RES_SURFACE,
|
|
@@ -93,6 +100,18 @@ static const struct vmw_res_func vmw_legacy_surface_func = {
|
|
|
.unbind = &vmw_legacy_srf_unbind
|
|
|
};
|
|
|
|
|
|
+static const struct vmw_res_func vmw_gb_surface_func = {
|
|
|
+ .res_type = vmw_res_surface,
|
|
|
+ .needs_backup = true,
|
|
|
+ .may_evict = true,
|
|
|
+ .type_name = "guest backed surfaces",
|
|
|
+ .backup_placement = &vmw_mob_placement,
|
|
|
+ .create = vmw_gb_surface_create,
|
|
|
+ .destroy = vmw_gb_surface_destroy,
|
|
|
+ .bind = vmw_gb_surface_bind,
|
|
|
+ .unbind = vmw_gb_surface_unbind
|
|
|
+};
|
|
|
+
|
|
|
/**
|
|
|
* struct vmw_surface_dma - SVGA3D DMA command
|
|
|
*/
|
|
@@ -291,6 +310,11 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
|
|
|
struct vmw_surface *srf;
|
|
|
void *cmd;
|
|
|
|
|
|
+ if (res->func->destroy == vmw_gb_surface_destroy) {
|
|
|
+ (void) vmw_gb_surface_destroy(res);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
if (res->id != -1) {
|
|
|
|
|
|
cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
|
|
@@ -549,12 +573,15 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
|
|
|
struct vmw_resource *res = &srf->res;
|
|
|
|
|
|
BUG_ON(res_free == NULL);
|
|
|
- (void) vmw_3d_resource_inc(dev_priv, false);
|
|
|
+ if (!dev_priv->has_mob)
|
|
|
+ (void) vmw_3d_resource_inc(dev_priv, false);
|
|
|
ret = vmw_resource_init(dev_priv, res, true, res_free,
|
|
|
+ (dev_priv->has_mob) ? &vmw_gb_surface_func :
|
|
|
&vmw_legacy_surface_func);
|
|
|
|
|
|
if (unlikely(ret != 0)) {
|
|
|
- vmw_3d_resource_dec(dev_priv, false);
|
|
|
+ if (!dev_priv->has_mob)
|
|
|
+ vmw_3d_resource_dec(dev_priv, false);
|
|
|
res_free(res);
|
|
|
return ret;
|
|
|
}
|
|
@@ -894,3 +921,421 @@ out_no_reference:
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
|
+
|
|
|
+/**
|
|
|
+ * vmw_surface_define_encode - Encode a surface_define command.
|
|
|
+ *
|
|
|
+ * @srf: Pointer to a struct vmw_surface object.
|
|
|
+ * @cmd_space: Pointer to memory area in which the commands should be encoded.
|
|
|
+ */
|
|
|
+static int vmw_gb_surface_create(struct vmw_resource *res)
|
|
|
+{
|
|
|
+ struct vmw_private *dev_priv = res->dev_priv;
|
|
|
+ struct vmw_surface *srf = vmw_res_to_srf(res);
|
|
|
+ uint32_t cmd_len, submit_len;
|
|
|
+ int ret;
|
|
|
+ struct {
|
|
|
+ SVGA3dCmdHeader header;
|
|
|
+ SVGA3dCmdDefineGBSurface body;
|
|
|
+ } *cmd;
|
|
|
+
|
|
|
+ if (likely(res->id != -1))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ (void) vmw_3d_resource_inc(dev_priv, false);
|
|
|
+ ret = vmw_resource_alloc_id(res);
|
|
|
+ if (unlikely(ret != 0)) {
|
|
|
+ DRM_ERROR("Failed to allocate a surface id.\n");
|
|
|
+ goto out_no_id;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (unlikely(res->id >= VMWGFX_NUM_GB_SURFACE)) {
|
|
|
+ ret = -EBUSY;
|
|
|
+ goto out_no_fifo;
|
|
|
+ }
|
|
|
+
|
|
|
+ cmd_len = sizeof(cmd->body);
|
|
|
+ submit_len = sizeof(*cmd);
|
|
|
+ cmd = vmw_fifo_reserve(dev_priv, submit_len);
|
|
|
+ if (unlikely(cmd == NULL)) {
|
|
|
+ DRM_ERROR("Failed reserving FIFO space for surface "
|
|
|
+ "creation.\n");
|
|
|
+ ret = -ENOMEM;
|
|
|
+ goto out_no_fifo;
|
|
|
+ }
|
|
|
+
|
|
|
+ cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
|
|
|
+ cmd->header.size = cmd_len;
|
|
|
+ cmd->body.sid = srf->res.id;
|
|
|
+ cmd->body.surfaceFlags = srf->flags;
|
|
|
+ cmd->body.format = cpu_to_le32(srf->format);
|
|
|
+ cmd->body.numMipLevels = srf->mip_levels[0];
|
|
|
+ cmd->body.multisampleCount = srf->multisample_count;
|
|
|
+ cmd->body.autogenFilter = srf->autogen_filter;
|
|
|
+ cmd->body.size.width = srf->base_size.width;
|
|
|
+ cmd->body.size.height = srf->base_size.height;
|
|
|
+ cmd->body.size.depth = srf->base_size.depth;
|
|
|
+ vmw_fifo_commit(dev_priv, submit_len);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+
|
|
|
+out_no_fifo:
|
|
|
+ vmw_resource_release_id(res);
|
|
|
+out_no_id:
|
|
|
+ vmw_3d_resource_dec(dev_priv, false);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+static int vmw_gb_surface_bind(struct vmw_resource *res,
|
|
|
+ struct ttm_validate_buffer *val_buf)
|
|
|
+{
|
|
|
+ struct vmw_private *dev_priv = res->dev_priv;
|
|
|
+ struct {
|
|
|
+ SVGA3dCmdHeader header;
|
|
|
+ SVGA3dCmdBindGBSurface body;
|
|
|
+ } *cmd1;
|
|
|
+ struct {
|
|
|
+ SVGA3dCmdHeader header;
|
|
|
+ SVGA3dCmdUpdateGBSurface body;
|
|
|
+ } *cmd2;
|
|
|
+ uint32_t submit_size;
|
|
|
+ struct ttm_buffer_object *bo = val_buf->bo;
|
|
|
+
|
|
|
+ BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
|
|
|
+
|
|
|
+ submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
|
|
|
+
|
|
|
+ cmd1 = vmw_fifo_reserve(dev_priv, submit_size);
|
|
|
+ if (unlikely(cmd1 == NULL)) {
|
|
|
+ DRM_ERROR("Failed reserving FIFO space for surface "
|
|
|
+ "binding.\n");
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+
|
|
|
+ cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
|
|
|
+ cmd1->header.size = sizeof(cmd1->body);
|
|
|
+ cmd1->body.sid = res->id;
|
|
|
+ cmd1->body.mobid = bo->mem.start;
|
|
|
+ if (res->backup_dirty) {
|
|
|
+ cmd2 = (void *) &cmd1[1];
|
|
|
+ cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE;
|
|
|
+ cmd2->header.size = sizeof(cmd2->body);
|
|
|
+ cmd2->body.sid = res->id;
|
|
|
+ res->backup_dirty = false;
|
|
|
+ }
|
|
|
+ vmw_fifo_commit(dev_priv, submit_size);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int vmw_gb_surface_unbind(struct vmw_resource *res,
|
|
|
+ bool readback,
|
|
|
+ struct ttm_validate_buffer *val_buf)
|
|
|
+{
|
|
|
+ struct vmw_private *dev_priv = res->dev_priv;
|
|
|
+ struct ttm_buffer_object *bo = val_buf->bo;
|
|
|
+ struct vmw_fence_obj *fence;
|
|
|
+
|
|
|
+ struct {
|
|
|
+ SVGA3dCmdHeader header;
|
|
|
+ SVGA3dCmdReadbackGBSurface body;
|
|
|
+ } *cmd1;
|
|
|
+ struct {
|
|
|
+ SVGA3dCmdHeader header;
|
|
|
+ SVGA3dCmdBindGBSurface body;
|
|
|
+ } *cmd2;
|
|
|
+ uint32_t submit_size;
|
|
|
+ uint8_t *cmd;
|
|
|
+
|
|
|
+
|
|
|
+ BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
|
|
|
+
|
|
|
+ submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
|
|
|
+ cmd = vmw_fifo_reserve(dev_priv, submit_size);
|
|
|
+ if (unlikely(cmd == NULL)) {
|
|
|
+ DRM_ERROR("Failed reserving FIFO space for surface "
|
|
|
+ "unbinding.\n");
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+
|
|
|
+ cmd2 = (void *) cmd;
|
|
|
+ if (readback) {
|
|
|
+ cmd1 = (void *) cmd;
|
|
|
+ cmd1->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
|
|
|
+ cmd1->header.size = sizeof(cmd1->body);
|
|
|
+ cmd1->body.sid = res->id;
|
|
|
+ cmd2 = (void *) &cmd1[1];
|
|
|
+ }
|
|
|
+ cmd2->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
|
|
|
+ cmd2->header.size = sizeof(cmd2->body);
|
|
|
+ cmd2->body.sid = res->id;
|
|
|
+ cmd2->body.mobid = SVGA3D_INVALID_ID;
|
|
|
+
|
|
|
+ vmw_fifo_commit(dev_priv, submit_size);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Create a fence object and fence the backup buffer.
|
|
|
+ */
|
|
|
+
|
|
|
+ (void) vmw_execbuf_fence_commands(NULL, dev_priv,
|
|
|
+ &fence, NULL);
|
|
|
+
|
|
|
+ vmw_fence_single_bo(val_buf->bo, fence);
|
|
|
+
|
|
|
+ if (likely(fence != NULL))
|
|
|
+ vmw_fence_obj_unreference(&fence);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int vmw_gb_surface_destroy(struct vmw_resource *res)
|
|
|
+{
|
|
|
+ struct vmw_private *dev_priv = res->dev_priv;
|
|
|
+ struct {
|
|
|
+ SVGA3dCmdHeader header;
|
|
|
+ SVGA3dCmdDestroyGBSurface body;
|
|
|
+ } *cmd;
|
|
|
+
|
|
|
+ if (likely(res->id == -1))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
|
|
|
+ if (unlikely(cmd == NULL)) {
|
|
|
+ DRM_ERROR("Failed reserving FIFO space for surface "
|
|
|
+ "destruction.\n");
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+
|
|
|
+ cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE;
|
|
|
+ cmd->header.size = sizeof(cmd->body);
|
|
|
+ cmd->body.sid = res->id;
|
|
|
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
|
|
+ vmw_resource_release_id(res);
|
|
|
+ vmw_3d_resource_dec(dev_priv, false);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * vmw_gb_surface_define_ioctl - Ioctl function implementing
|
|
|
+ * the user surface define functionality.
|
|
|
+ *
|
|
|
+ * @dev: Pointer to a struct drm_device.
|
|
|
+ * @data: Pointer to data copied from / to user-space.
|
|
|
+ * @file_priv: Pointer to a drm file private structure.
|
|
|
+ */
|
|
|
+int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
|
|
|
+ struct drm_file *file_priv)
|
|
|
+{
|
|
|
+ struct vmw_private *dev_priv = vmw_priv(dev);
|
|
|
+ struct vmw_user_surface *user_srf;
|
|
|
+ struct vmw_surface *srf;
|
|
|
+ struct vmw_resource *res;
|
|
|
+ struct vmw_resource *tmp;
|
|
|
+ union drm_vmw_gb_surface_create_arg *arg =
|
|
|
+ (union drm_vmw_gb_surface_create_arg *)data;
|
|
|
+ struct drm_vmw_gb_surface_create_req *req = &arg->req;
|
|
|
+ struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
|
|
|
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
|
|
+ int ret;
|
|
|
+ uint32_t size;
|
|
|
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
|
|
|
+ const struct svga3d_surface_desc *desc;
|
|
|
+ uint32_t backup_handle;
|
|
|
+
|
|
|
+ if (unlikely(vmw_user_surface_size == 0))
|
|
|
+ vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
|
|
|
+ 128;
|
|
|
+
|
|
|
+ size = vmw_user_surface_size + 128;
|
|
|
+
|
|
|
+ desc = svga3dsurface_get_desc(req->format);
|
|
|
+ if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
|
|
|
+ DRM_ERROR("Invalid surface format for surface creation.\n");
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ ret = ttm_read_lock(&vmaster->lock, true);
|
|
|
+ if (unlikely(ret != 0))
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
|
|
|
+ size, false, true);
|
|
|
+ if (unlikely(ret != 0)) {
|
|
|
+ if (ret != -ERESTARTSYS)
|
|
|
+ DRM_ERROR("Out of graphics memory for surface"
|
|
|
+ " creation.\n");
|
|
|
+ goto out_unlock;
|
|
|
+ }
|
|
|
+
|
|
|
+ user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
|
|
|
+ if (unlikely(user_srf == NULL)) {
|
|
|
+ ret = -ENOMEM;
|
|
|
+ goto out_no_user_srf;
|
|
|
+ }
|
|
|
+
|
|
|
+ srf = &user_srf->srf;
|
|
|
+ res = &srf->res;
|
|
|
+
|
|
|
+ srf->flags = req->svga3d_flags;
|
|
|
+ srf->format = req->format;
|
|
|
+ srf->scanout = req->drm_surface_flags & drm_vmw_surface_flag_scanout;
|
|
|
+ srf->mip_levels[0] = req->mip_levels;
|
|
|
+ srf->num_sizes = 1;
|
|
|
+ srf->sizes = NULL;
|
|
|
+ srf->offsets = NULL;
|
|
|
+ user_srf->size = size;
|
|
|
+ srf->base_size = req->base_size;
|
|
|
+ srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
|
|
|
+ srf->multisample_count = req->multisample_count;
|
|
|
+ res->backup_size = svga3dsurface_get_serialized_size
|
|
|
+ (srf->format, srf->base_size, srf->mip_levels[0],
|
|
|
+ srf->flags & SVGA3D_SURFACE_CUBEMAP);
|
|
|
+
|
|
|
+ user_srf->prime.base.shareable = false;
|
|
|
+ user_srf->prime.base.tfile = NULL;
|
|
|
+
|
|
|
+ /**
|
|
|
+ * From this point, the generic resource management functions
|
|
|
+ * destroy the object on failure.
|
|
|
+ */
|
|
|
+
|
|
|
+ ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
|
|
|
+ if (unlikely(ret != 0))
|
|
|
+ goto out_unlock;
|
|
|
+
|
|
|
+ if (req->buffer_handle != SVGA3D_INVALID_ID) {
|
|
|
+ ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
|
|
|
+ &res->backup);
|
|
|
+ } else if (req->drm_surface_flags &
|
|
|
+ drm_vmw_surface_flag_create_buffer)
|
|
|
+ ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
|
|
|
+ res->backup_size,
|
|
|
+ req->drm_surface_flags &
|
|
|
+ drm_vmw_surface_flag_shareable,
|
|
|
+ &backup_handle,
|
|
|
+ &res->backup);
|
|
|
+
|
|
|
+ if (unlikely(ret != 0)) {
|
|
|
+ vmw_resource_unreference(&res);
|
|
|
+ goto out_unlock;
|
|
|
+ }
|
|
|
+
|
|
|
+ tmp = vmw_resource_reference(&srf->res);
|
|
|
+ ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
|
|
|
+ req->drm_surface_flags &
|
|
|
+ drm_vmw_surface_flag_shareable,
|
|
|
+ VMW_RES_SURFACE,
|
|
|
+ &vmw_user_surface_base_release, NULL);
|
|
|
+
|
|
|
+ if (unlikely(ret != 0)) {
|
|
|
+ vmw_resource_unreference(&tmp);
|
|
|
+ vmw_resource_unreference(&res);
|
|
|
+ goto out_unlock;
|
|
|
+ }
|
|
|
+
|
|
|
+ rep->handle = user_srf->prime.base.hash.key;
|
|
|
+ rep->backup_size = res->backup_size;
|
|
|
+ if (res->backup) {
|
|
|
+ rep->buffer_map_handle =
|
|
|
+ drm_vma_node_offset_addr(&res->backup->base.vma_node);
|
|
|
+ rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
|
|
|
+ rep->buffer_handle = backup_handle;
|
|
|
+ } else {
|
|
|
+ rep->buffer_map_handle = 0;
|
|
|
+ rep->buffer_size = 0;
|
|
|
+ rep->buffer_handle = SVGA3D_INVALID_ID;
|
|
|
+ }
|
|
|
+
|
|
|
+ vmw_resource_unreference(&res);
|
|
|
+
|
|
|
+ ttm_read_unlock(&vmaster->lock);
|
|
|
+ return 0;
|
|
|
+out_no_user_srf:
|
|
|
+ ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
|
|
|
+out_unlock:
|
|
|
+ ttm_read_unlock(&vmaster->lock);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * vmw_gb_surface_reference_ioctl - Ioctl function implementing
|
|
|
+ * the user surface reference functionality.
|
|
|
+ *
|
|
|
+ * @dev: Pointer to a struct drm_device.
|
|
|
+ * @data: Pointer to data copied from / to user-space.
|
|
|
+ * @file_priv: Pointer to a drm file private structure.
|
|
|
+ */
|
|
|
+int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
|
|
|
+ struct drm_file *file_priv)
|
|
|
+{
|
|
|
+ struct vmw_private *dev_priv = vmw_priv(dev);
|
|
|
+ union drm_vmw_gb_surface_reference_arg *arg =
|
|
|
+ (union drm_vmw_gb_surface_reference_arg *)data;
|
|
|
+ struct drm_vmw_surface_arg *req = &arg->req;
|
|
|
+ struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep;
|
|
|
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
|
|
+ struct vmw_surface *srf;
|
|
|
+ struct vmw_user_surface *user_srf;
|
|
|
+ struct ttm_base_object *base;
|
|
|
+ uint32_t backup_handle;
|
|
|
+ int ret = -EINVAL;
|
|
|
+
|
|
|
+ base = ttm_base_object_lookup_for_ref(dev_priv->tdev, req->sid);
|
|
|
+ if (unlikely(base == NULL)) {
|
|
|
+ DRM_ERROR("Could not find surface to reference.\n");
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE))
|
|
|
+ goto out_bad_resource;
|
|
|
+
|
|
|
+ user_srf = container_of(base, struct vmw_user_surface, prime.base);
|
|
|
+ srf = &user_srf->srf;
|
|
|
+ if (srf->res.backup == NULL) {
|
|
|
+ DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
|
|
|
+ goto out_bad_resource;
|
|
|
+ }
|
|
|
+
|
|
|
+ ret = ttm_ref_object_add(tfile, &user_srf->prime.base,
|
|
|
+ TTM_REF_USAGE, NULL);
|
|
|
+ if (unlikely(ret != 0)) {
|
|
|
+ DRM_ERROR("Could not add a reference to a GB surface.\n");
|
|
|
+ goto out_bad_resource;
|
|
|
+ }
|
|
|
+
|
|
|
+ mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
|
|
|
+ ret = vmw_user_dmabuf_reference(tfile, srf->res.backup,
|
|
|
+ &backup_handle);
|
|
|
+ mutex_unlock(&dev_priv->cmdbuf_mutex);
|
|
|
+
|
|
|
+ if (unlikely(ret != 0)) {
|
|
|
+ DRM_ERROR("Could not add a reference to a GB surface "
|
|
|
+ "backup buffer.\n");
|
|
|
+ (void) ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
|
|
|
+ req->sid,
|
|
|
+ TTM_REF_USAGE);
|
|
|
+ goto out_bad_resource;
|
|
|
+ }
|
|
|
+
|
|
|
+ rep->creq.svga3d_flags = srf->flags;
|
|
|
+ rep->creq.format = srf->format;
|
|
|
+ rep->creq.mip_levels = srf->mip_levels[0];
|
|
|
+ rep->creq.drm_surface_flags = 0;
|
|
|
+ rep->creq.multisample_count = srf->multisample_count;
|
|
|
+ rep->creq.autogen_filter = srf->autogen_filter;
|
|
|
+ rep->creq.buffer_handle = backup_handle;
|
|
|
+ rep->creq.base_size = srf->base_size;
|
|
|
+ rep->crep.handle = user_srf->prime.base.hash.key;
|
|
|
+ rep->crep.backup_size = srf->res.backup_size;
|
|
|
+ rep->crep.buffer_handle = backup_handle;
|
|
|
+ rep->crep.buffer_map_handle =
|
|
|
+ drm_vma_node_offset_addr(&srf->res.backup->base.vma_node);
|
|
|
+ rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE;
|
|
|
+
|
|
|
+out_bad_resource:
|
|
|
+ ttm_base_object_unref(&base);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|