|
@@ -35,6 +35,23 @@
|
|
|
|
|
|
#define VMW_RES_HT_ORDER 12
|
|
|
|
|
|
+/*
|
|
|
+ * struct vmw_relocation - Buffer object relocation
|
|
|
+ *
|
|
|
+ * @head: List head for the command submission context's relocation list
|
|
|
+ * @vbo: Non ref-counted pointer to buffer object
|
|
|
+ * @mob_loc: Pointer to location for mob id to be modified
|
|
|
+ * @location: Pointer to location for guest pointer to be modified
|
|
|
+ */
|
|
|
+struct vmw_relocation {
|
|
|
+ struct list_head head;
|
|
|
+ struct vmw_buffer_object *vbo;
|
|
|
+ union {
|
|
|
+ SVGAMobId *mob_loc;
|
|
|
+ SVGAGuestPtr *location;
|
|
|
+ };
|
|
|
+};
|
|
|
+
|
|
|
/**
|
|
|
* enum vmw_resource_relocation_type - Relocation type for resources
|
|
|
*
|
|
@@ -69,35 +86,18 @@ struct vmw_resource_relocation {
|
|
|
enum vmw_resource_relocation_type rel_type:3;
|
|
|
};
|
|
|
|
|
|
-/**
|
|
|
- * struct vmw_resource_val_node - Validation info for resources
|
|
|
- *
|
|
|
- * @head: List head for the software context's resource list.
|
|
|
- * @hash: Hash entry for quick resouce to val_node lookup.
|
|
|
- * @res: Ref-counted pointer to the resource.
|
|
|
- * @switch_backup: Boolean whether to switch backup buffer on unreserve.
|
|
|
- * @new_backup: Refcounted pointer to the new backup buffer.
|
|
|
- * @staged_bindings: If @res is a context, tracks bindings set up during
|
|
|
- * the command batch. Otherwise NULL.
|
|
|
- * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
|
|
|
- * @first_usage: Set to true the first time the resource is referenced in
|
|
|
- * the command stream.
|
|
|
- * @switching_backup: The command stream provides a new backup buffer for a
|
|
|
- * resource.
|
|
|
- * @no_buffer_needed: This means @switching_backup is true on first buffer
|
|
|
- * reference. So resource reservation does not need to allocate a backup
|
|
|
- * buffer for the resource.
|
|
|
+/*
|
|
|
+ * struct vmw_ctx_validation_info - Extra validation metadata for contexts
|
|
|
+ * @head: List head of context list
|
|
|
+ * @ctx: The context resource
|
|
|
+ * @cur: The context's persistent binding state
|
|
|
+ * @staged: The binding state changes of this command buffer
|
|
|
*/
|
|
|
-struct vmw_resource_val_node {
|
|
|
+struct vmw_ctx_validation_info {
|
|
|
struct list_head head;
|
|
|
- struct drm_hash_item hash;
|
|
|
- struct vmw_resource *res;
|
|
|
- struct vmw_buffer_object *new_backup;
|
|
|
- struct vmw_ctx_binding_state *staged_bindings;
|
|
|
- unsigned long new_backup_offset;
|
|
|
- u32 first_usage : 1;
|
|
|
- u32 switching_backup : 1;
|
|
|
- u32 no_buffer_needed : 1;
|
|
|
+ struct vmw_resource *ctx;
|
|
|
+ struct vmw_ctx_binding_state *cur;
|
|
|
+ struct vmw_ctx_binding_state *staged;
|
|
|
};
|
|
|
|
|
|
/**
|
|
@@ -127,10 +127,6 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
|
|
|
struct vmw_sw_context *sw_context,
|
|
|
SVGAMobId *id,
|
|
|
struct vmw_buffer_object **vmw_bo_p);
|
|
|
-static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
|
|
|
- struct vmw_buffer_object *vbo,
|
|
|
- bool validate_as_mob,
|
|
|
- uint32_t *p_val_node);
|
|
|
/**
|
|
|
* vmw_ptr_diff - Compute the offset from a to b in bytes
|
|
|
*
|
|
@@ -145,48 +141,38 @@ static size_t vmw_ptr_diff(void *a, void *b)
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * vmw_resources_unreserve - unreserve resources previously reserved for
|
|
|
- * command submission.
|
|
|
- *
|
|
|
- * @sw_context: pointer to the software context
|
|
|
- * @backoff: Whether command submission failed.
|
|
|
+ * vmw_execbuf_bindings_commit - Commit modified binding state
|
|
|
+ * @sw_context: The command submission context
|
|
|
+ * @backoff: Whether this is part of the error path and binding state
|
|
|
+ * changes should be ignored
|
|
|
*/
|
|
|
-static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
|
|
|
- bool backoff)
|
|
|
+static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
|
|
|
+ bool backoff)
|
|
|
{
|
|
|
- struct vmw_resource_val_node *val;
|
|
|
- struct list_head *list = &sw_context->resource_list;
|
|
|
+ struct vmw_ctx_validation_info *entry;
|
|
|
|
|
|
- if (sw_context->dx_query_mob && !backoff)
|
|
|
- vmw_context_bind_dx_query(sw_context->dx_query_ctx,
|
|
|
- sw_context->dx_query_mob);
|
|
|
+ list_for_each_entry(entry, &sw_context->ctx_list, head) {
|
|
|
+ if (!backoff)
|
|
|
+ vmw_binding_state_commit(entry->cur, entry->staged);
|
|
|
+ if (entry->staged != sw_context->staged_bindings)
|
|
|
+ vmw_binding_state_free(entry->staged);
|
|
|
+ else
|
|
|
+ sw_context->staged_bindings_inuse = false;
|
|
|
+ }
|
|
|
|
|
|
- list_for_each_entry(val, list, head) {
|
|
|
- struct vmw_resource *res = val->res;
|
|
|
- bool switch_backup =
|
|
|
- (backoff) ? false : val->switching_backup;
|
|
|
-
|
|
|
- /*
|
|
|
- * Transfer staged context bindings to the
|
|
|
- * persistent context binding tracker.
|
|
|
- */
|
|
|
- if (unlikely(val->staged_bindings)) {
|
|
|
- if (!backoff) {
|
|
|
- vmw_binding_state_commit
|
|
|
- (vmw_context_binding_state(val->res),
|
|
|
- val->staged_bindings);
|
|
|
- }
|
|
|
+ /* List entries are freed with the validation context */
|
|
|
+ INIT_LIST_HEAD(&sw_context->ctx_list);
|
|
|
+}
|
|
|
|
|
|
- if (val->staged_bindings != sw_context->staged_bindings)
|
|
|
- vmw_binding_state_free(val->staged_bindings);
|
|
|
- else
|
|
|
- sw_context->staged_bindings_inuse = false;
|
|
|
- val->staged_bindings = NULL;
|
|
|
- }
|
|
|
- vmw_resource_unreserve(res, switch_backup, val->new_backup,
|
|
|
- val->new_backup_offset);
|
|
|
- vmw_bo_unreference(&val->new_backup);
|
|
|
- }
|
|
|
+/**
|
|
|
+ * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
|
|
|
+ * @sw_context: The command submission context
|
|
|
+ */
|
|
|
+static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
|
|
|
+{
|
|
|
+ if (sw_context->dx_query_mob)
|
|
|
+ vmw_context_bind_dx_query(sw_context->dx_query_ctx,
|
|
|
+ sw_context->dx_query_mob);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -194,16 +180,17 @@ static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
|
|
|
* added to the validate list.
|
|
|
*
|
|
|
* @dev_priv: Pointer to the device private:
|
|
|
- * @sw_context: The validation context:
|
|
|
- * @node: The validation node holding this context.
|
|
|
+ * @sw_context: The command submission context
|
|
|
+ * @node: The validation node holding the context resource metadata
|
|
|
*/
|
|
|
static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
|
|
|
struct vmw_sw_context *sw_context,
|
|
|
- struct vmw_resource_val_node *node)
|
|
|
+ struct vmw_resource *res,
|
|
|
+ struct vmw_ctx_validation_info *node)
|
|
|
{
|
|
|
int ret;
|
|
|
|
|
|
- ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res);
|
|
|
+ ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
|
|
|
if (unlikely(ret != 0))
|
|
|
goto out_err;
|
|
|
|
|
@@ -220,91 +207,138 @@ static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
|
|
|
}
|
|
|
|
|
|
if (sw_context->staged_bindings_inuse) {
|
|
|
- node->staged_bindings = vmw_binding_state_alloc(dev_priv);
|
|
|
- if (IS_ERR(node->staged_bindings)) {
|
|
|
+ node->staged = vmw_binding_state_alloc(dev_priv);
|
|
|
+ if (IS_ERR(node->staged)) {
|
|
|
DRM_ERROR("Failed to allocate context binding "
|
|
|
"information.\n");
|
|
|
- ret = PTR_ERR(node->staged_bindings);
|
|
|
- node->staged_bindings = NULL;
|
|
|
+ ret = PTR_ERR(node->staged);
|
|
|
+ node->staged = NULL;
|
|
|
goto out_err;
|
|
|
}
|
|
|
} else {
|
|
|
- node->staged_bindings = sw_context->staged_bindings;
|
|
|
+ node->staged = sw_context->staged_bindings;
|
|
|
sw_context->staged_bindings_inuse = true;
|
|
|
}
|
|
|
|
|
|
+ node->ctx = res;
|
|
|
+ node->cur = vmw_context_binding_state(res);
|
|
|
+ list_add_tail(&node->head, &sw_context->ctx_list);
|
|
|
+
|
|
|
return 0;
|
|
|
out_err:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * vmw_resource_val_add - Add a resource to the software context's
|
|
|
- * resource list if it's not already on it.
|
|
|
+ * vmw_execbuf_res_size - calculate extra size fore the resource validation
|
|
|
+ * node
|
|
|
+ * @dev_priv: Pointer to the device private struct.
|
|
|
+ * @res_type: The resource type.
|
|
|
*
|
|
|
- * @sw_context: Pointer to the software context.
|
|
|
+ * Guest-backed contexts and DX contexts require extra size to store
|
|
|
+ * execbuf private information in the validation node. Typically the
|
|
|
+ * binding manager associated data structures.
|
|
|
+ *
|
|
|
+ * Returns: The extra size requirement based on resource type.
|
|
|
+ */
|
|
|
+static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv,
|
|
|
+ enum vmw_res_type res_type)
|
|
|
+{
|
|
|
+ return (res_type == vmw_res_dx_context ||
|
|
|
+ (res_type == vmw_res_context && dev_priv->has_mob)) ?
|
|
|
+ sizeof(struct vmw_ctx_validation_info) : 0;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * vmw_execbuf_rcache_update - Update a resource-node cache entry
|
|
|
+ *
|
|
|
+ * @rcache: Pointer to the entry to update.
|
|
|
* @res: Pointer to the resource.
|
|
|
- * @p_node On successful return points to a valid pointer to a
|
|
|
- * struct vmw_resource_val_node, if non-NULL on entry.
|
|
|
+ * @private: Pointer to the execbuf-private space in the resource
|
|
|
+ * validation node.
|
|
|
+ */
|
|
|
+static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
|
|
|
+ struct vmw_resource *res,
|
|
|
+ void *private)
|
|
|
+{
|
|
|
+ rcache->res = res;
|
|
|
+ rcache->private = private;
|
|
|
+ rcache->valid = 1;
|
|
|
+ rcache->valid_handle = 0;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * vmw_execbuf_res_noref_val_add - Add a resource described by an
|
|
|
+ * unreferenced rcu-protected pointer to the validation list.
|
|
|
+ * @sw_context: Pointer to the software context.
|
|
|
+ * @res: Unreferenced rcu-protected pointer to the resource.
|
|
|
+ *
|
|
|
+ * Returns: 0 on success. Negative error code on failure. Typical error
|
|
|
+ * codes are %-EINVAL on inconsistency and %-ESRCH if the resource was
|
|
|
+ * doomed.
|
|
|
*/
|
|
|
-static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
|
|
|
- struct vmw_resource *res,
|
|
|
- struct vmw_resource_val_node **p_node)
|
|
|
+static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
|
|
|
+ struct vmw_resource *res)
|
|
|
{
|
|
|
struct vmw_private *dev_priv = res->dev_priv;
|
|
|
- struct vmw_resource_val_node *node;
|
|
|
- struct drm_hash_item *hash;
|
|
|
int ret;
|
|
|
-
|
|
|
- if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
|
|
|
- &hash) == 0)) {
|
|
|
- node = container_of(hash, struct vmw_resource_val_node, hash);
|
|
|
- node->first_usage = false;
|
|
|
- if (unlikely(p_node != NULL))
|
|
|
- *p_node = node;
|
|
|
+ enum vmw_res_type res_type = vmw_res_type(res);
|
|
|
+ struct vmw_res_cache_entry *rcache;
|
|
|
+ struct vmw_ctx_validation_info *ctx_info;
|
|
|
+ bool first_usage;
|
|
|
+ unsigned int priv_size;
|
|
|
+
|
|
|
+ rcache = &sw_context->res_cache[res_type];
|
|
|
+ if (likely(rcache->valid && rcache->res == res)) {
|
|
|
+ vmw_user_resource_noref_release();
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
- node = kzalloc(sizeof(*node), GFP_KERNEL);
|
|
|
- if (unlikely(!node)) {
|
|
|
- DRM_ERROR("Failed to allocate a resource validation "
|
|
|
- "entry.\n");
|
|
|
- return -ENOMEM;
|
|
|
- }
|
|
|
-
|
|
|
- node->hash.key = (unsigned long) res;
|
|
|
- ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
|
|
|
- if (unlikely(ret != 0)) {
|
|
|
- DRM_ERROR("Failed to initialize a resource validation "
|
|
|
- "entry.\n");
|
|
|
- kfree(node);
|
|
|
+ priv_size = vmw_execbuf_res_size(dev_priv, res_type);
|
|
|
+ ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
|
|
|
+ (void **)&ctx_info, &first_usage);
|
|
|
+ vmw_user_resource_noref_release();
|
|
|
+ if (ret)
|
|
|
return ret;
|
|
|
+
|
|
|
+ if (priv_size && first_usage) {
|
|
|
+ ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
|
|
|
+ ctx_info);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
}
|
|
|
- node->res = vmw_resource_reference(res);
|
|
|
- node->first_usage = true;
|
|
|
- if (unlikely(p_node != NULL))
|
|
|
- *p_node = node;
|
|
|
|
|
|
- if (!dev_priv->has_mob) {
|
|
|
- list_add_tail(&node->head, &sw_context->resource_list);
|
|
|
+ vmw_execbuf_rcache_update(rcache, res, ctx_info);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * vmw_execbuf_res_noctx_val_add - Add a non-context resource to the resource
|
|
|
+ * validation list if it's not already on it
|
|
|
+ * @sw_context: Pointer to the software context.
|
|
|
+ * @res: Pointer to the resource.
|
|
|
+ *
|
|
|
+ * Returns: Zero on success. Negative error code on failure.
|
|
|
+ */
|
|
|
+static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context,
|
|
|
+ struct vmw_resource *res)
|
|
|
+{
|
|
|
+ struct vmw_res_cache_entry *rcache;
|
|
|
+ enum vmw_res_type res_type = vmw_res_type(res);
|
|
|
+ void *ptr;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ rcache = &sw_context->res_cache[res_type];
|
|
|
+ if (likely(rcache->valid && rcache->res == res))
|
|
|
return 0;
|
|
|
- }
|
|
|
|
|
|
- switch (vmw_res_type(res)) {
|
|
|
- case vmw_res_context:
|
|
|
- case vmw_res_dx_context:
|
|
|
- list_add(&node->head, &sw_context->ctx_resource_list);
|
|
|
- ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node);
|
|
|
- break;
|
|
|
- case vmw_res_cotable:
|
|
|
- list_add_tail(&node->head, &sw_context->ctx_resource_list);
|
|
|
- break;
|
|
|
- default:
|
|
|
- list_add_tail(&node->head, &sw_context->resource_list);
|
|
|
- break;
|
|
|
- }
|
|
|
+ ret = vmw_validation_add_resource(sw_context->ctx, res, 0, &ptr, NULL);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
|
|
|
- return ret;
|
|
|
+ vmw_execbuf_rcache_update(rcache, res, ptr);
|
|
|
+
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -325,11 +359,11 @@ static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
|
|
|
* First add the resource the view is pointing to, otherwise
|
|
|
* it may be swapped out when the view is validated.
|
|
|
*/
|
|
|
- ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL);
|
|
|
+ ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view));
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
|
|
|
- return vmw_resource_val_add(sw_context, view, NULL);
|
|
|
+ return vmw_execbuf_res_noctx_val_add(sw_context, view);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -342,28 +376,33 @@ static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
|
|
|
*
|
|
|
* The view is represented by a view id and the DX context it's created on,
|
|
|
* or scheduled for creation on. If there is no DX context set, the function
|
|
|
- * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure.
|
|
|
+ * will return an -EINVAL error pointer.
|
|
|
+ *
|
|
|
+ * Returns: Unreferenced pointer to the resource on success, negative error
|
|
|
+ * pointer on failure.
|
|
|
*/
|
|
|
-static int vmw_view_id_val_add(struct vmw_sw_context *sw_context,
|
|
|
- enum vmw_view_type view_type, u32 id)
|
|
|
+static struct vmw_resource *
|
|
|
+vmw_view_id_val_add(struct vmw_sw_context *sw_context,
|
|
|
+ enum vmw_view_type view_type, u32 id)
|
|
|
{
|
|
|
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
|
|
|
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
|
|
|
struct vmw_resource *view;
|
|
|
int ret;
|
|
|
|
|
|
if (!ctx_node) {
|
|
|
DRM_ERROR("DX Context not set.\n");
|
|
|
- return -EINVAL;
|
|
|
+ return ERR_PTR(-EINVAL);
|
|
|
}
|
|
|
|
|
|
view = vmw_view_lookup(sw_context->man, view_type, id);
|
|
|
if (IS_ERR(view))
|
|
|
- return PTR_ERR(view);
|
|
|
+ return view;
|
|
|
|
|
|
ret = vmw_view_res_val_add(sw_context, view);
|
|
|
- vmw_resource_unreference(&view);
|
|
|
+ if (ret)
|
|
|
+ return ERR_PTR(ret);
|
|
|
|
|
|
- return ret;
|
|
|
+ return view;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -394,8 +433,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
|
|
|
if (IS_ERR(res))
|
|
|
continue;
|
|
|
|
|
|
- ret = vmw_resource_val_add(sw_context, res, NULL);
|
|
|
- vmw_resource_unreference(&res);
|
|
|
+ ret = vmw_execbuf_res_noctx_val_add(sw_context, res);
|
|
|
if (unlikely(ret != 0))
|
|
|
return ret;
|
|
|
}
|
|
@@ -407,17 +445,11 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
|
|
|
binding_list = vmw_context_binding_list(ctx);
|
|
|
|
|
|
list_for_each_entry(entry, binding_list, ctx_list) {
|
|
|
- /* entry->res is not refcounted */
|
|
|
- res = vmw_resource_reference_unless_doomed(entry->res);
|
|
|
- if (unlikely(res == NULL))
|
|
|
- continue;
|
|
|
-
|
|
|
if (vmw_res_type(entry->res) == vmw_res_view)
|
|
|
ret = vmw_view_res_val_add(sw_context, entry->res);
|
|
|
else
|
|
|
- ret = vmw_resource_val_add(sw_context, entry->res,
|
|
|
- NULL);
|
|
|
- vmw_resource_unreference(&res);
|
|
|
+ ret = vmw_execbuf_res_noctx_val_add(sw_context,
|
|
|
+ entry->res);
|
|
|
if (unlikely(ret != 0))
|
|
|
break;
|
|
|
}
|
|
@@ -427,9 +459,8 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
|
|
|
|
|
|
dx_query_mob = vmw_context_get_dx_query_mob(ctx);
|
|
|
if (dx_query_mob)
|
|
|
- ret = vmw_bo_to_validate_list(sw_context,
|
|
|
- dx_query_mob,
|
|
|
- true, NULL);
|
|
|
+ ret = vmw_validation_add_bo(sw_context->ctx,
|
|
|
+ dx_query_mob, true, false);
|
|
|
}
|
|
|
|
|
|
mutex_unlock(&dev_priv->binding_mutex);
|
|
@@ -445,7 +476,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
|
|
|
* id that needs fixup is located. Granularity is one byte.
|
|
|
* @rel_type: Relocation type.
|
|
|
*/
|
|
|
-static int vmw_resource_relocation_add(struct list_head *list,
|
|
|
+static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
|
|
|
const struct vmw_resource *res,
|
|
|
unsigned long offset,
|
|
|
enum vmw_resource_relocation_type
|
|
@@ -453,7 +484,7 @@ static int vmw_resource_relocation_add(struct list_head *list,
|
|
|
{
|
|
|
struct vmw_resource_relocation *rel;
|
|
|
|
|
|
- rel = kmalloc(sizeof(*rel), GFP_KERNEL);
|
|
|
+ rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel));
|
|
|
if (unlikely(!rel)) {
|
|
|
DRM_ERROR("Failed to allocate a resource relocation.\n");
|
|
|
return -ENOMEM;
|
|
@@ -462,7 +493,7 @@ static int vmw_resource_relocation_add(struct list_head *list,
|
|
|
rel->res = res;
|
|
|
rel->offset = offset;
|
|
|
rel->rel_type = rel_type;
|
|
|
- list_add_tail(&rel->head, list);
|
|
|
+ list_add_tail(&rel->head, &sw_context->res_relocations);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -470,16 +501,13 @@ static int vmw_resource_relocation_add(struct list_head *list,
|
|
|
/**
|
|
|
* vmw_resource_relocations_free - Free all relocations on a list
|
|
|
*
|
|
|
- * @list: Pointer to the head of the relocation list.
|
|
|
+ * @list: Pointer to the head of the relocation list
|
|
|
*/
|
|
|
static void vmw_resource_relocations_free(struct list_head *list)
|
|
|
{
|
|
|
- struct vmw_resource_relocation *rel, *n;
|
|
|
+ /* Memory is validation context memory, so no need to free it */
|
|
|
|
|
|
- list_for_each_entry_safe(rel, n, list, head) {
|
|
|
- list_del(&rel->head);
|
|
|
- kfree(rel);
|
|
|
- }
|
|
|
+ INIT_LIST_HEAD(list);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -531,68 +559,6 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * vmw_bo_to_validate_list - add a bo to a validate list
|
|
|
- *
|
|
|
- * @sw_context: The software context used for this command submission batch.
|
|
|
- * @bo: The buffer object to add.
|
|
|
- * @validate_as_mob: Validate this buffer as a MOB.
|
|
|
- * @p_val_node: If non-NULL Will be updated with the validate node number
|
|
|
- * on return.
|
|
|
- *
|
|
|
- * Returns -EINVAL if the limit of number of buffer objects per command
|
|
|
- * submission is reached.
|
|
|
- */
|
|
|
-static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
|
|
|
- struct vmw_buffer_object *vbo,
|
|
|
- bool validate_as_mob,
|
|
|
- uint32_t *p_val_node)
|
|
|
-{
|
|
|
- uint32_t val_node;
|
|
|
- struct vmw_validate_buffer *vval_buf;
|
|
|
- struct ttm_validate_buffer *val_buf;
|
|
|
- struct drm_hash_item *hash;
|
|
|
- int ret;
|
|
|
-
|
|
|
- if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
|
|
|
- &hash) == 0)) {
|
|
|
- vval_buf = container_of(hash, struct vmw_validate_buffer,
|
|
|
- hash);
|
|
|
- if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
|
|
|
- DRM_ERROR("Inconsistent buffer usage.\n");
|
|
|
- return -EINVAL;
|
|
|
- }
|
|
|
- val_buf = &vval_buf->base;
|
|
|
- val_node = vval_buf - sw_context->val_bufs;
|
|
|
- } else {
|
|
|
- val_node = sw_context->cur_val_buf;
|
|
|
- if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
|
|
|
- DRM_ERROR("Max number of DMA buffers per submission "
|
|
|
- "exceeded.\n");
|
|
|
- return -EINVAL;
|
|
|
- }
|
|
|
- vval_buf = &sw_context->val_bufs[val_node];
|
|
|
- vval_buf->hash.key = (unsigned long) vbo;
|
|
|
- ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
|
|
|
- if (unlikely(ret != 0)) {
|
|
|
- DRM_ERROR("Failed to initialize a buffer validation "
|
|
|
- "entry.\n");
|
|
|
- return ret;
|
|
|
- }
|
|
|
- ++sw_context->cur_val_buf;
|
|
|
- val_buf = &vval_buf->base;
|
|
|
- val_buf->bo = ttm_bo_reference(&vbo->base);
|
|
|
- val_buf->shared = false;
|
|
|
- list_add_tail(&val_buf->head, &sw_context->validate_nodes);
|
|
|
- vval_buf->validate_as_mob = validate_as_mob;
|
|
|
- }
|
|
|
-
|
|
|
- if (p_val_node)
|
|
|
- *p_val_node = val_node;
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
/**
|
|
|
* vmw_resources_reserve - Reserve all resources on the sw_context's
|
|
|
* resource list.
|
|
@@ -605,27 +571,11 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
|
|
|
*/
|
|
|
static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
|
|
|
{
|
|
|
- struct vmw_resource_val_node *val;
|
|
|
- int ret = 0;
|
|
|
-
|
|
|
- list_for_each_entry(val, &sw_context->resource_list, head) {
|
|
|
- struct vmw_resource *res = val->res;
|
|
|
-
|
|
|
- ret = vmw_resource_reserve(res, true, val->no_buffer_needed);
|
|
|
- if (unlikely(ret != 0))
|
|
|
- return ret;
|
|
|
-
|
|
|
- if (res->backup) {
|
|
|
- struct vmw_buffer_object *vbo = res->backup;
|
|
|
-
|
|
|
- ret = vmw_bo_to_validate_list
|
|
|
- (sw_context, vbo,
|
|
|
- vmw_resource_needs_backup(res), NULL);
|
|
|
+ int ret;
|
|
|
|
|
|
- if (unlikely(ret != 0))
|
|
|
- return ret;
|
|
|
- }
|
|
|
- }
|
|
|
+ ret = vmw_validation_res_reserve(sw_context->ctx, true);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
|
|
|
if (sw_context->dx_query_mob) {
|
|
|
struct vmw_buffer_object *expected_dx_query_mob;
|
|
@@ -641,87 +591,6 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * vmw_resources_validate - Validate all resources on the sw_context's
|
|
|
- * resource list.
|
|
|
- *
|
|
|
- * @sw_context: Pointer to the software context.
|
|
|
- *
|
|
|
- * Before this function is called, all resource backup buffers must have
|
|
|
- * been validated.
|
|
|
- */
|
|
|
-static int vmw_resources_validate(struct vmw_sw_context *sw_context)
|
|
|
-{
|
|
|
- struct vmw_resource_val_node *val;
|
|
|
- int ret;
|
|
|
-
|
|
|
- list_for_each_entry(val, &sw_context->resource_list, head) {
|
|
|
- struct vmw_resource *res = val->res;
|
|
|
- struct vmw_buffer_object *backup = res->backup;
|
|
|
-
|
|
|
- ret = vmw_resource_validate(res);
|
|
|
- if (unlikely(ret != 0)) {
|
|
|
- if (ret != -ERESTARTSYS)
|
|
|
- DRM_ERROR("Failed to validate resource.\n");
|
|
|
- return ret;
|
|
|
- }
|
|
|
-
|
|
|
- /* Check if the resource switched backup buffer */
|
|
|
- if (backup && res->backup && (backup != res->backup)) {
|
|
|
- struct vmw_buffer_object *vbo = res->backup;
|
|
|
-
|
|
|
- ret = vmw_bo_to_validate_list
|
|
|
- (sw_context, vbo,
|
|
|
- vmw_resource_needs_backup(res), NULL);
|
|
|
- if (ret) {
|
|
|
- ttm_bo_unreserve(&vbo->base);
|
|
|
- return ret;
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-/**
|
|
|
- * vmw_cmd_res_reloc_add - Add a resource to a software context's
|
|
|
- * relocation- and validation lists.
|
|
|
- *
|
|
|
- * @dev_priv: Pointer to a struct vmw_private identifying the device.
|
|
|
- * @sw_context: Pointer to the software context.
|
|
|
- * @id_loc: Pointer to where the id that needs translation is located.
|
|
|
- * @res: Valid pointer to a struct vmw_resource.
|
|
|
- * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
|
|
|
- * used for this resource is returned here.
|
|
|
- */
|
|
|
-static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
|
|
|
- struct vmw_sw_context *sw_context,
|
|
|
- uint32_t *id_loc,
|
|
|
- struct vmw_resource *res,
|
|
|
- struct vmw_resource_val_node **p_val)
|
|
|
-{
|
|
|
- int ret;
|
|
|
- struct vmw_resource_val_node *node;
|
|
|
-
|
|
|
- *p_val = NULL;
|
|
|
- ret = vmw_resource_relocation_add(&sw_context->res_relocations,
|
|
|
- res,
|
|
|
- vmw_ptr_diff(sw_context->buf_start,
|
|
|
- id_loc),
|
|
|
- vmw_res_rel_normal);
|
|
|
- if (unlikely(ret != 0))
|
|
|
- return ret;
|
|
|
-
|
|
|
- ret = vmw_resource_val_add(sw_context, res, &node);
|
|
|
- if (unlikely(ret != 0))
|
|
|
- return ret;
|
|
|
-
|
|
|
- if (p_val)
|
|
|
- *p_val = node;
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-
|
|
|
/**
|
|
|
* vmw_cmd_res_check - Check that a resource is present and if so, put it
|
|
|
* on the resource validate list unless it's already there.
|
|
@@ -741,17 +610,16 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
|
|
|
enum vmw_res_type res_type,
|
|
|
const struct vmw_user_resource_conv *converter,
|
|
|
uint32_t *id_loc,
|
|
|
- struct vmw_resource_val_node **p_val)
|
|
|
+ struct vmw_resource **p_res)
|
|
|
{
|
|
|
- struct vmw_res_cache_entry *rcache =
|
|
|
- &sw_context->res_cache[res_type];
|
|
|
+ struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
|
|
|
struct vmw_resource *res;
|
|
|
- struct vmw_resource_val_node *node;
|
|
|
int ret;
|
|
|
|
|
|
+ if (p_res)
|
|
|
+ *p_res = NULL;
|
|
|
+
|
|
|
if (*id_loc == SVGA3D_INVALID_ID) {
|
|
|
- if (p_val)
|
|
|
- *p_val = NULL;
|
|
|
if (res_type == vmw_res_context) {
|
|
|
DRM_ERROR("Illegal context invalid id.\n");
|
|
|
return -EINVAL;
|
|
@@ -759,56 +627,41 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
- /*
|
|
|
- * Fastpath in case of repeated commands referencing the same
|
|
|
- * resource
|
|
|
- */
|
|
|
+ if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
|
|
|
+ res = rcache->res;
|
|
|
+ } else {
|
|
|
+ unsigned int size = vmw_execbuf_res_size(dev_priv, res_type);
|
|
|
|
|
|
- if (likely(rcache->valid && *id_loc == rcache->handle)) {
|
|
|
- const struct vmw_resource *res = rcache->res;
|
|
|
+ ret = vmw_validation_preload_res(sw_context->ctx, size);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
|
|
|
- rcache->node->first_usage = false;
|
|
|
- if (p_val)
|
|
|
- *p_val = rcache->node;
|
|
|
+ res = vmw_user_resource_noref_lookup_handle
|
|
|
+ (dev_priv, sw_context->fp->tfile, *id_loc, converter);
|
|
|
+ if (unlikely(IS_ERR(res))) {
|
|
|
+ DRM_ERROR("Could not find or use resource 0x%08x.\n",
|
|
|
+ (unsigned int) *id_loc);
|
|
|
+ return PTR_ERR(res);
|
|
|
+ }
|
|
|
|
|
|
- return vmw_resource_relocation_add
|
|
|
- (&sw_context->res_relocations, res,
|
|
|
- vmw_ptr_diff(sw_context->buf_start, id_loc),
|
|
|
- vmw_res_rel_normal);
|
|
|
- }
|
|
|
+ ret = vmw_execbuf_res_noref_val_add(sw_context, res);
|
|
|
+ if (unlikely(ret != 0))
|
|
|
+ return ret;
|
|
|
|
|
|
- ret = vmw_user_resource_lookup_handle(dev_priv,
|
|
|
- sw_context->fp->tfile,
|
|
|
- *id_loc,
|
|
|
- converter,
|
|
|
- &res);
|
|
|
- if (unlikely(ret != 0)) {
|
|
|
- DRM_ERROR("Could not find or use resource 0x%08x.\n",
|
|
|
- (unsigned) *id_loc);
|
|
|
- dump_stack();
|
|
|
- return ret;
|
|
|
+ if (rcache->valid && rcache->res == res) {
|
|
|
+ rcache->valid_handle = true;
|
|
|
+ rcache->handle = *id_loc;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
- rcache->valid = true;
|
|
|
- rcache->res = res;
|
|
|
- rcache->handle = *id_loc;
|
|
|
-
|
|
|
- ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc,
|
|
|
- res, &node);
|
|
|
- if (unlikely(ret != 0))
|
|
|
- goto out_no_reloc;
|
|
|
+ ret = vmw_resource_relocation_add(sw_context, res,
|
|
|
+ vmw_ptr_diff(sw_context->buf_start,
|
|
|
+ id_loc),
|
|
|
+ vmw_res_rel_normal);
|
|
|
+ if (p_res)
|
|
|
+ *p_res = res;
|
|
|
|
|
|
- rcache->node = node;
|
|
|
- if (p_val)
|
|
|
- *p_val = node;
|
|
|
- vmw_resource_unreference(&res);
|
|
|
return 0;
|
|
|
-
|
|
|
-out_no_reloc:
|
|
|
- BUG_ON(sw_context->error_resource != NULL);
|
|
|
- sw_context->error_resource = res;
|
|
|
-
|
|
|
- return ret;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -861,22 +714,18 @@ static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
|
|
|
*/
|
|
|
static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
|
|
|
{
|
|
|
- struct vmw_resource_val_node *val;
|
|
|
+ struct vmw_ctx_validation_info *val;
|
|
|
int ret;
|
|
|
|
|
|
- list_for_each_entry(val, &sw_context->resource_list, head) {
|
|
|
- if (unlikely(!val->staged_bindings))
|
|
|
- break;
|
|
|
-
|
|
|
- ret = vmw_binding_rebind_all
|
|
|
- (vmw_context_binding_state(val->res));
|
|
|
+ list_for_each_entry(val, &sw_context->ctx_list, head) {
|
|
|
+ ret = vmw_binding_rebind_all(val->cur);
|
|
|
if (unlikely(ret != 0)) {
|
|
|
if (ret != -ERESTARTSYS)
|
|
|
DRM_ERROR("Failed to rebind context.\n");
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
- ret = vmw_rebind_all_dx_query(val->res);
|
|
|
+ ret = vmw_rebind_all_dx_query(val->ctx);
|
|
|
if (ret != 0)
|
|
|
return ret;
|
|
|
}
|
|
@@ -903,45 +752,33 @@ static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
|
|
|
uint32 view_ids[], u32 num_views,
|
|
|
u32 first_slot)
|
|
|
{
|
|
|
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
|
|
|
- struct vmw_cmdbuf_res_manager *man;
|
|
|
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
|
|
|
u32 i;
|
|
|
- int ret;
|
|
|
|
|
|
if (!ctx_node) {
|
|
|
DRM_ERROR("DX Context not set.\n");
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
- man = sw_context->man;
|
|
|
for (i = 0; i < num_views; ++i) {
|
|
|
struct vmw_ctx_bindinfo_view binding;
|
|
|
struct vmw_resource *view = NULL;
|
|
|
|
|
|
if (view_ids[i] != SVGA3D_INVALID_ID) {
|
|
|
- view = vmw_view_lookup(man, view_type, view_ids[i]);
|
|
|
+ view = vmw_view_id_val_add(sw_context, view_type,
|
|
|
+ view_ids[i]);
|
|
|
if (IS_ERR(view)) {
|
|
|
DRM_ERROR("View not found.\n");
|
|
|
return PTR_ERR(view);
|
|
|
}
|
|
|
-
|
|
|
- ret = vmw_view_res_val_add(sw_context, view);
|
|
|
- if (ret) {
|
|
|
- DRM_ERROR("Could not add view to "
|
|
|
- "validation list.\n");
|
|
|
- vmw_resource_unreference(&view);
|
|
|
- return ret;
|
|
|
- }
|
|
|
}
|
|
|
- binding.bi.ctx = ctx_node->res;
|
|
|
+ binding.bi.ctx = ctx_node->ctx;
|
|
|
binding.bi.res = view;
|
|
|
binding.bi.bt = binding_type;
|
|
|
binding.shader_slot = shader_slot;
|
|
|
binding.slot = first_slot + i;
|
|
|
- vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
|
|
|
+ vmw_binding_add(ctx_node->staged, &binding.bi,
|
|
|
shader_slot, binding.slot);
|
|
|
- if (view)
|
|
|
- vmw_resource_unreference(&view);
|
|
|
}
|
|
|
|
|
|
return 0;
|
|
@@ -971,6 +808,34 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
|
|
|
user_context_converter, &cmd->cid, NULL);
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * vmw_execbuf_info_from_res - Get the private validation metadata for a
|
|
|
+ * recently validated resource
|
|
|
+ * @sw_context: Pointer to the command submission context
|
|
|
+ * @res: The resource
|
|
|
+ *
|
|
|
+ * The resource pointed to by @res needs to be present in the command submission
|
|
|
+ * context's resource cache and hence the last resource of that type to be
|
|
|
+ * processed by the validation code.
|
|
|
+ *
|
|
|
+ * Return: a pointer to the private metadata of the resource, or NULL
|
|
|
+ * if it wasn't found
|
|
|
+ */
|
|
|
+static struct vmw_ctx_validation_info *
|
|
|
+vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
|
|
|
+ struct vmw_resource *res)
|
|
|
+{
|
|
|
+ struct vmw_res_cache_entry *rcache =
|
|
|
+ &sw_context->res_cache[vmw_res_type(res)];
|
|
|
+
|
|
|
+ if (rcache->valid && rcache->res == res)
|
|
|
+ return rcache->private;
|
|
|
+
|
|
|
+ WARN_ON_ONCE(true);
|
|
|
+ return NULL;
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
|
|
|
struct vmw_sw_context *sw_context,
|
|
|
SVGA3dCmdHeader *header)
|
|
@@ -979,8 +844,8 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
|
|
|
SVGA3dCmdHeader header;
|
|
|
SVGA3dCmdSetRenderTarget body;
|
|
|
} *cmd;
|
|
|
- struct vmw_resource_val_node *ctx_node;
|
|
|
- struct vmw_resource_val_node *res_node;
|
|
|
+ struct vmw_resource *ctx;
|
|
|
+ struct vmw_resource *res;
|
|
|
int ret;
|
|
|
|
|
|
cmd = container_of(header, struct vmw_sid_cmd, header);
|
|
@@ -993,25 +858,29 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
|
|
|
|
|
|
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
|
|
|
user_context_converter, &cmd->body.cid,
|
|
|
- &ctx_node);
|
|
|
+ &ctx);
|
|
|
if (unlikely(ret != 0))
|
|
|
return ret;
|
|
|
|
|
|
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
|
|
|
- user_surface_converter,
|
|
|
- &cmd->body.target.sid, &res_node);
|
|
|
- if (unlikely(ret != 0))
|
|
|
+ user_surface_converter, &cmd->body.target.sid,
|
|
|
+ &res);
|
|
|
+ if (unlikely(ret))
|
|
|
return ret;
|
|
|
|
|
|
if (dev_priv->has_mob) {
|
|
|
struct vmw_ctx_bindinfo_view binding;
|
|
|
+ struct vmw_ctx_validation_info *node;
|
|
|
|
|
|
- binding.bi.ctx = ctx_node->res;
|
|
|
- binding.bi.res = res_node ? res_node->res : NULL;
|
|
|
+ node = vmw_execbuf_info_from_res(sw_context, ctx);
|
|
|
+ if (!node)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ binding.bi.ctx = ctx;
|
|
|
+ binding.bi.res = res;
|
|
|
binding.bi.bt = vmw_ctx_binding_rt;
|
|
|
binding.slot = cmd->body.type;
|
|
|
- vmw_binding_add(ctx_node->staged_bindings,
|
|
|
- &binding.bi, 0, binding.slot);
|
|
|
+ vmw_binding_add(node->staged, &binding.bi, 0, binding.slot);
|
|
|
}
|
|
|
|
|
|
return 0;
|
|
@@ -1030,8 +899,8 @@ static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
|
|
|
cmd = container_of(header, struct vmw_sid_cmd, header);
|
|
|
|
|
|
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
|
|
|
- user_surface_converter,
|
|
|
- &cmd->body.src.sid, NULL);
|
|
|
+ user_surface_converter,
|
|
|
+ &cmd->body.src.sid, NULL);
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
|
|
@@ -1171,17 +1040,17 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
|
|
|
|
|
|
if (unlikely(sw_context->cur_query_bo != NULL)) {
|
|
|
sw_context->needs_post_query_barrier = true;
|
|
|
- ret = vmw_bo_to_validate_list(sw_context,
|
|
|
- sw_context->cur_query_bo,
|
|
|
- dev_priv->has_mob, NULL);
|
|
|
+ ret = vmw_validation_add_bo(sw_context->ctx,
|
|
|
+ sw_context->cur_query_bo,
|
|
|
+ dev_priv->has_mob, false);
|
|
|
if (unlikely(ret != 0))
|
|
|
return ret;
|
|
|
}
|
|
|
sw_context->cur_query_bo = new_query_bo;
|
|
|
|
|
|
- ret = vmw_bo_to_validate_list(sw_context,
|
|
|
- dev_priv->dummy_query_bo,
|
|
|
- dev_priv->has_mob, NULL);
|
|
|
+ ret = vmw_validation_add_bo(sw_context->ctx,
|
|
|
+ dev_priv->dummy_query_bo,
|
|
|
+ dev_priv->has_mob, false);
|
|
|
if (unlikely(ret != 0))
|
|
|
return ret;
|
|
|
|
|
@@ -1269,7 +1138,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
|
|
|
* @sw_context: The software context used for this command batch validation.
|
|
|
* @id: Pointer to the user-space handle to be translated.
|
|
|
* @vmw_bo_p: Points to a location that, on successful return will carry
|
|
|
- * a reference-counted pointer to the DMA buffer identified by the
|
|
|
+ * a non-reference-counted pointer to the buffer object identified by the
|
|
|
* user-space handle in @id.
|
|
|
*
|
|
|
* This function saves information needed to translate a user-space buffer
|
|
@@ -1284,40 +1153,34 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
|
|
|
SVGAMobId *id,
|
|
|
struct vmw_buffer_object **vmw_bo_p)
|
|
|
{
|
|
|
- struct vmw_buffer_object *vmw_bo = NULL;
|
|
|
+ struct vmw_buffer_object *vmw_bo;
|
|
|
uint32_t handle = *id;
|
|
|
struct vmw_relocation *reloc;
|
|
|
int ret;
|
|
|
|
|
|
- ret = vmw_user_bo_lookup(sw_context->fp->tfile, handle, &vmw_bo, NULL);
|
|
|
- if (unlikely(ret != 0)) {
|
|
|
+ vmw_validation_preload_bo(sw_context->ctx);
|
|
|
+ vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
|
|
|
+ if (IS_ERR(vmw_bo)) {
|
|
|
DRM_ERROR("Could not find or use MOB buffer.\n");
|
|
|
- ret = -EINVAL;
|
|
|
- goto out_no_reloc;
|
|
|
+ return PTR_ERR(vmw_bo);
|
|
|
}
|
|
|
|
|
|
- if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
|
|
|
- DRM_ERROR("Max number relocations per submission"
|
|
|
- " exceeded\n");
|
|
|
- ret = -EINVAL;
|
|
|
- goto out_no_reloc;
|
|
|
- }
|
|
|
+ ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
|
|
|
+ vmw_user_bo_noref_release();
|
|
|
+ if (unlikely(ret != 0))
|
|
|
+ return ret;
|
|
|
|
|
|
- reloc = &sw_context->relocs[sw_context->cur_reloc++];
|
|
|
- reloc->mob_loc = id;
|
|
|
- reloc->location = NULL;
|
|
|
+ reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
|
|
|
+ if (!reloc)
|
|
|
+ return -ENOMEM;
|
|
|
|
|
|
- ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index);
|
|
|
- if (unlikely(ret != 0))
|
|
|
- goto out_no_reloc;
|
|
|
+ reloc->mob_loc = id;
|
|
|
+ reloc->vbo = vmw_bo;
|
|
|
|
|
|
*vmw_bo_p = vmw_bo;
|
|
|
- return 0;
|
|
|
+ list_add_tail(&reloc->head, &sw_context->bo_relocations);
|
|
|
|
|
|
-out_no_reloc:
|
|
|
- vmw_bo_unreference(&vmw_bo);
|
|
|
- *vmw_bo_p = NULL;
|
|
|
- return ret;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -1328,7 +1191,7 @@ out_no_reloc:
|
|
|
* @sw_context: The software context used for this command batch validation.
|
|
|
* @ptr: Pointer to the user-space handle to be translated.
|
|
|
* @vmw_bo_p: Points to a location that, on successful return will carry
|
|
|
- * a reference-counted pointer to the DMA buffer identified by the
|
|
|
+ * a non-reference-counted pointer to the DMA buffer identified by the
|
|
|
* user-space handle in @id.
|
|
|
*
|
|
|
* This function saves information needed to translate a user-space buffer
|
|
@@ -1344,39 +1207,33 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
|
|
|
SVGAGuestPtr *ptr,
|
|
|
struct vmw_buffer_object **vmw_bo_p)
|
|
|
{
|
|
|
- struct vmw_buffer_object *vmw_bo = NULL;
|
|
|
+ struct vmw_buffer_object *vmw_bo;
|
|
|
uint32_t handle = ptr->gmrId;
|
|
|
struct vmw_relocation *reloc;
|
|
|
int ret;
|
|
|
|
|
|
- ret = vmw_user_bo_lookup(sw_context->fp->tfile, handle, &vmw_bo, NULL);
|
|
|
- if (unlikely(ret != 0)) {
|
|
|
+ vmw_validation_preload_bo(sw_context->ctx);
|
|
|
+ vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
|
|
|
+ if (IS_ERR(vmw_bo)) {
|
|
|
DRM_ERROR("Could not find or use GMR region.\n");
|
|
|
- ret = -EINVAL;
|
|
|
- goto out_no_reloc;
|
|
|
- }
|
|
|
-
|
|
|
- if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
|
|
|
- DRM_ERROR("Max number relocations per submission"
|
|
|
- " exceeded\n");
|
|
|
- ret = -EINVAL;
|
|
|
- goto out_no_reloc;
|
|
|
+ return PTR_ERR(vmw_bo);
|
|
|
}
|
|
|
|
|
|
- reloc = &sw_context->relocs[sw_context->cur_reloc++];
|
|
|
- reloc->location = ptr;
|
|
|
-
|
|
|
- ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
|
|
|
+ ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
|
|
|
+ vmw_user_bo_noref_release();
|
|
|
if (unlikely(ret != 0))
|
|
|
- goto out_no_reloc;
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
|
|
|
+ if (!reloc)
|
|
|
+ return -ENOMEM;
|
|
|
|
|
|
+ reloc->location = ptr;
|
|
|
+ reloc->vbo = vmw_bo;
|
|
|
*vmw_bo_p = vmw_bo;
|
|
|
- return 0;
|
|
|
+ list_add_tail(&reloc->head, &sw_context->bo_relocations);
|
|
|
|
|
|
-out_no_reloc:
|
|
|
- vmw_bo_unreference(&vmw_bo);
|
|
|
- *vmw_bo_p = NULL;
|
|
|
- return ret;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
|
|
@@ -1400,7 +1257,7 @@ static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
|
|
|
} *cmd;
|
|
|
|
|
|
int ret;
|
|
|
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
|
|
|
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
|
|
|
struct vmw_resource *cotable_res;
|
|
|
|
|
|
|
|
@@ -1415,9 +1272,8 @@ static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
|
|
|
cmd->q.type >= SVGA3D_QUERYTYPE_MAX)
|
|
|
return -EINVAL;
|
|
|
|
|
|
- cotable_res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXQUERY);
|
|
|
+ cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
|
|
|
ret = vmw_cotable_notify(cotable_res, cmd->q.queryId);
|
|
|
- vmw_resource_unreference(&cotable_res);
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
@@ -1462,11 +1318,8 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
|
|
|
return ret;
|
|
|
|
|
|
sw_context->dx_query_mob = vmw_bo;
|
|
|
- sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
|
|
|
-
|
|
|
- vmw_bo_unreference(&vmw_bo);
|
|
|
-
|
|
|
- return ret;
|
|
|
+ sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
|
|
@@ -1567,7 +1420,6 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
|
|
|
|
|
|
ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
|
|
|
|
|
|
- vmw_bo_unreference(&vmw_bo);
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -1621,7 +1473,6 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
|
|
|
|
|
|
ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
|
|
|
|
|
|
- vmw_bo_unreference(&vmw_bo);
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -1654,7 +1505,6 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
|
|
|
if (unlikely(ret != 0))
|
|
|
return ret;
|
|
|
|
|
|
- vmw_bo_unreference(&vmw_bo);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -1706,7 +1556,6 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
|
|
|
if (unlikely(ret != 0))
|
|
|
return ret;
|
|
|
|
|
|
- vmw_bo_unreference(&vmw_bo);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -1757,7 +1606,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
|
|
|
if (unlikely(ret != 0)) {
|
|
|
if (unlikely(ret != -ERESTARTSYS))
|
|
|
DRM_ERROR("could not find surface for DMA.\n");
|
|
|
- goto out_no_surface;
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
|
|
@@ -1765,9 +1614,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
|
|
|
vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
|
|
|
header);
|
|
|
|
|
|
-out_no_surface:
|
|
|
- vmw_bo_unreference(&vmw_bo);
|
|
|
- return ret;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
static int vmw_cmd_draw(struct vmw_private *dev_priv,
|
|
@@ -1837,8 +1684,8 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
|
|
|
((unsigned long) header + header->size + sizeof(header));
|
|
|
SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
|
|
|
((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
|
|
|
- struct vmw_resource_val_node *ctx_node;
|
|
|
- struct vmw_resource_val_node *res_node;
|
|
|
+ struct vmw_resource *ctx;
|
|
|
+ struct vmw_resource *res;
|
|
|
int ret;
|
|
|
|
|
|
cmd = container_of(header, struct vmw_tex_state_cmd,
|
|
@@ -1846,7 +1693,7 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
|
|
|
|
|
|
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
|
|
|
user_context_converter, &cmd->state.cid,
|
|
|
- &ctx_node);
|
|
|
+ &ctx);
|
|
|
if (unlikely(ret != 0))
|
|
|
return ret;
|
|
|
|
|
@@ -1862,19 +1709,24 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
|
|
|
|
|
|
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
|
|
|
user_surface_converter,
|
|
|
- &cur_state->value, &res_node);
|
|
|
+ &cur_state->value, &res);
|
|
|
if (unlikely(ret != 0))
|
|
|
return ret;
|
|
|
|
|
|
if (dev_priv->has_mob) {
|
|
|
struct vmw_ctx_bindinfo_tex binding;
|
|
|
+ struct vmw_ctx_validation_info *node;
|
|
|
+
|
|
|
+ node = vmw_execbuf_info_from_res(sw_context, ctx);
|
|
|
+ if (!node)
|
|
|
+ return -EINVAL;
|
|
|
|
|
|
- binding.bi.ctx = ctx_node->res;
|
|
|
- binding.bi.res = res_node ? res_node->res : NULL;
|
|
|
+ binding.bi.ctx = ctx;
|
|
|
+ binding.bi.res = res;
|
|
|
binding.bi.bt = vmw_ctx_binding_tex;
|
|
|
binding.texture_stage = cur_state->stage;
|
|
|
- vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
|
|
|
- 0, binding.texture_stage);
|
|
|
+ vmw_binding_add(node->staged, &binding.bi, 0,
|
|
|
+ binding.texture_stage);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -1893,14 +1745,9 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
|
|
|
SVGAFifoCmdDefineGMRFB body;
|
|
|
} *cmd = buf;
|
|
|
|
|
|
- ret = vmw_translate_guest_ptr(dev_priv, sw_context,
|
|
|
- &cmd->body.ptr,
|
|
|
- &vmw_bo);
|
|
|
- if (unlikely(ret != 0))
|
|
|
- return ret;
|
|
|
-
|
|
|
- vmw_bo_unreference(&vmw_bo);
|
|
|
-
|
|
|
+ return vmw_translate_guest_ptr(dev_priv, sw_context,
|
|
|
+ &cmd->body.ptr,
|
|
|
+ &vmw_bo);
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -1922,25 +1769,24 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
|
|
|
*/
|
|
|
static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
|
|
|
struct vmw_sw_context *sw_context,
|
|
|
- struct vmw_resource_val_node *val_node,
|
|
|
+ struct vmw_resource *res,
|
|
|
uint32_t *buf_id,
|
|
|
unsigned long backup_offset)
|
|
|
{
|
|
|
- struct vmw_buffer_object *dma_buf;
|
|
|
+ struct vmw_buffer_object *vbo;
|
|
|
+ void *info;
|
|
|
int ret;
|
|
|
|
|
|
- ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
|
|
|
+ info = vmw_execbuf_info_from_res(sw_context, res);
|
|
|
+ if (!info)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo);
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
|
|
|
- val_node->switching_backup = true;
|
|
|
- if (val_node->first_usage)
|
|
|
- val_node->no_buffer_needed = true;
|
|
|
-
|
|
|
- vmw_bo_unreference(&val_node->new_backup);
|
|
|
- val_node->new_backup = dma_buf;
|
|
|
- val_node->new_backup_offset = backup_offset;
|
|
|
-
|
|
|
+ vmw_validation_res_switch_backup(sw_context->ctx, info, vbo,
|
|
|
+ backup_offset);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -1970,15 +1816,15 @@ static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
|
|
|
uint32_t *buf_id,
|
|
|
unsigned long backup_offset)
|
|
|
{
|
|
|
- struct vmw_resource_val_node *val_node;
|
|
|
+ struct vmw_resource *res;
|
|
|
int ret;
|
|
|
|
|
|
ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
|
|
|
- converter, res_id, &val_node);
|
|
|
+ converter, res_id, &res);
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
|
|
|
- return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node,
|
|
|
+ return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
|
|
|
buf_id, backup_offset);
|
|
|
}
|
|
|
|
|
@@ -2170,14 +2016,14 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
|
|
|
} *cmd;
|
|
|
int ret;
|
|
|
size_t size;
|
|
|
- struct vmw_resource_val_node *val;
|
|
|
+ struct vmw_resource *ctx;
|
|
|
|
|
|
cmd = container_of(header, struct vmw_shader_define_cmd,
|
|
|
header);
|
|
|
|
|
|
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
|
|
|
user_context_converter, &cmd->body.cid,
|
|
|
- &val);
|
|
|
+ &ctx);
|
|
|
if (unlikely(ret != 0))
|
|
|
return ret;
|
|
|
|
|
@@ -2186,14 +2032,14 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
|
|
|
|
|
|
size = cmd->header.size - sizeof(cmd->body);
|
|
|
ret = vmw_compat_shader_add(dev_priv,
|
|
|
- vmw_context_res_man(val->res),
|
|
|
+ vmw_context_res_man(ctx),
|
|
|
cmd->body.shid, cmd + 1,
|
|
|
cmd->body.type, size,
|
|
|
&sw_context->staged_cmd_res);
|
|
|
if (unlikely(ret != 0))
|
|
|
return ret;
|
|
|
|
|
|
- return vmw_resource_relocation_add(&sw_context->res_relocations,
|
|
|
+ return vmw_resource_relocation_add(sw_context,
|
|
|
NULL,
|
|
|
vmw_ptr_diff(sw_context->buf_start,
|
|
|
&cmd->header.id),
|
|
@@ -2217,28 +2063,28 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
|
|
|
SVGA3dCmdDestroyShader body;
|
|
|
} *cmd;
|
|
|
int ret;
|
|
|
- struct vmw_resource_val_node *val;
|
|
|
+ struct vmw_resource *ctx;
|
|
|
|
|
|
cmd = container_of(header, struct vmw_shader_destroy_cmd,
|
|
|
header);
|
|
|
|
|
|
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
|
|
|
user_context_converter, &cmd->body.cid,
|
|
|
- &val);
|
|
|
+ &ctx);
|
|
|
if (unlikely(ret != 0))
|
|
|
return ret;
|
|
|
|
|
|
if (unlikely(!dev_priv->has_mob))
|
|
|
return 0;
|
|
|
|
|
|
- ret = vmw_shader_remove(vmw_context_res_man(val->res),
|
|
|
+ ret = vmw_shader_remove(vmw_context_res_man(ctx),
|
|
|
cmd->body.shid,
|
|
|
cmd->body.type,
|
|
|
&sw_context->staged_cmd_res);
|
|
|
if (unlikely(ret != 0))
|
|
|
return ret;
|
|
|
|
|
|
- return vmw_resource_relocation_add(&sw_context->res_relocations,
|
|
|
+ return vmw_resource_relocation_add(sw_context,
|
|
|
NULL,
|
|
|
vmw_ptr_diff(sw_context->buf_start,
|
|
|
&cmd->header.id),
|
|
@@ -2261,9 +2107,9 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
|
|
|
SVGA3dCmdHeader header;
|
|
|
SVGA3dCmdSetShader body;
|
|
|
} *cmd;
|
|
|
- struct vmw_resource_val_node *ctx_node, *res_node = NULL;
|
|
|
struct vmw_ctx_bindinfo_shader binding;
|
|
|
- struct vmw_resource *res = NULL;
|
|
|
+ struct vmw_resource *ctx, *res = NULL;
|
|
|
+ struct vmw_ctx_validation_info *ctx_info;
|
|
|
int ret;
|
|
|
|
|
|
cmd = container_of(header, struct vmw_set_shader_cmd,
|
|
@@ -2277,7 +2123,7 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
|
|
|
|
|
|
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
|
|
|
user_context_converter, &cmd->body.cid,
|
|
|
- &ctx_node);
|
|
|
+ &ctx);
|
|
|
if (unlikely(ret != 0))
|
|
|
return ret;
|
|
|
|
|
@@ -2285,34 +2131,35 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
|
|
|
return 0;
|
|
|
|
|
|
if (cmd->body.shid != SVGA3D_INVALID_ID) {
|
|
|
- res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
|
|
|
+ res = vmw_shader_lookup(vmw_context_res_man(ctx),
|
|
|
cmd->body.shid,
|
|
|
cmd->body.type);
|
|
|
|
|
|
if (!IS_ERR(res)) {
|
|
|
- ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
|
|
|
- &cmd->body.shid, res,
|
|
|
- &res_node);
|
|
|
- vmw_resource_unreference(&res);
|
|
|
+ ret = vmw_execbuf_res_noctx_val_add(sw_context, res);
|
|
|
if (unlikely(ret != 0))
|
|
|
return ret;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- if (!res_node) {
|
|
|
+ if (IS_ERR_OR_NULL(res)) {
|
|
|
ret = vmw_cmd_res_check(dev_priv, sw_context,
|
|
|
vmw_res_shader,
|
|
|
user_shader_converter,
|
|
|
- &cmd->body.shid, &res_node);
|
|
|
+ &cmd->body.shid, &res);
|
|
|
if (unlikely(ret != 0))
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
- binding.bi.ctx = ctx_node->res;
|
|
|
- binding.bi.res = res_node ? res_node->res : NULL;
|
|
|
+ ctx_info = vmw_execbuf_info_from_res(sw_context, ctx);
|
|
|
+ if (!ctx_info)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ binding.bi.ctx = ctx;
|
|
|
+ binding.bi.res = res;
|
|
|
binding.bi.bt = vmw_ctx_binding_shader;
|
|
|
binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
|
|
|
- vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
|
|
|
+ vmw_binding_add(ctx_info->staged, &binding.bi,
|
|
|
binding.shader_slot, 0);
|
|
|
return 0;
|
|
|
}
|
|
@@ -2393,8 +2240,8 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
|
|
|
SVGA3dCmdHeader header;
|
|
|
SVGA3dCmdDXSetSingleConstantBuffer body;
|
|
|
} *cmd;
|
|
|
- struct vmw_resource_val_node *res_node = NULL;
|
|
|
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
|
|
|
+ struct vmw_resource *res = NULL;
|
|
|
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
|
|
|
struct vmw_ctx_bindinfo_cb binding;
|
|
|
int ret;
|
|
|
|
|
@@ -2406,12 +2253,12 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
|
|
|
cmd = container_of(header, typeof(*cmd), header);
|
|
|
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
|
|
|
user_surface_converter,
|
|
|
- &cmd->body.sid, &res_node);
|
|
|
+ &cmd->body.sid, &res);
|
|
|
if (unlikely(ret != 0))
|
|
|
return ret;
|
|
|
|
|
|
- binding.bi.ctx = ctx_node->res;
|
|
|
- binding.bi.res = res_node ? res_node->res : NULL;
|
|
|
+ binding.bi.ctx = ctx_node->ctx;
|
|
|
+ binding.bi.res = res;
|
|
|
binding.bi.bt = vmw_ctx_binding_cb;
|
|
|
binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
|
|
|
binding.offset = cmd->body.offsetInBytes;
|
|
@@ -2426,7 +2273,7 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
- vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
|
|
|
+ vmw_binding_add(ctx_node->staged, &binding.bi,
|
|
|
binding.shader_slot, binding.slot);
|
|
|
|
|
|
return 0;
|
|
@@ -2482,7 +2329,7 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
|
|
|
SVGA3dCmdDXSetShader body;
|
|
|
} *cmd;
|
|
|
struct vmw_resource *res = NULL;
|
|
|
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
|
|
|
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
|
|
|
struct vmw_ctx_bindinfo_shader binding;
|
|
|
int ret = 0;
|
|
|
|
|
@@ -2506,23 +2353,20 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
|
|
|
return PTR_ERR(res);
|
|
|
}
|
|
|
|
|
|
- ret = vmw_resource_val_add(sw_context, res, NULL);
|
|
|
+ ret = vmw_execbuf_res_noctx_val_add(sw_context, res);
|
|
|
if (ret)
|
|
|
- goto out_unref;
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
- binding.bi.ctx = ctx_node->res;
|
|
|
+ binding.bi.ctx = ctx_node->ctx;
|
|
|
binding.bi.res = res;
|
|
|
binding.bi.bt = vmw_ctx_binding_dx_shader;
|
|
|
binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
|
|
|
|
|
|
- vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
|
|
|
+ vmw_binding_add(ctx_node->staged, &binding.bi,
|
|
|
binding.shader_slot, 0);
|
|
|
-out_unref:
|
|
|
- if (res)
|
|
|
- vmw_resource_unreference(&res);
|
|
|
|
|
|
- return ret;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -2537,9 +2381,9 @@ static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
|
|
|
struct vmw_sw_context *sw_context,
|
|
|
SVGA3dCmdHeader *header)
|
|
|
{
|
|
|
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
|
|
|
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
|
|
|
struct vmw_ctx_bindinfo_vb binding;
|
|
|
- struct vmw_resource_val_node *res_node;
|
|
|
+ struct vmw_resource *res;
|
|
|
struct {
|
|
|
SVGA3dCmdHeader header;
|
|
|
SVGA3dCmdDXSetVertexBuffers body;
|
|
@@ -2564,18 +2408,18 @@ static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
|
|
|
for (i = 0; i < num; i++) {
|
|
|
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
|
|
|
user_surface_converter,
|
|
|
- &cmd->buf[i].sid, &res_node);
|
|
|
+ &cmd->buf[i].sid, &res);
|
|
|
if (unlikely(ret != 0))
|
|
|
return ret;
|
|
|
|
|
|
- binding.bi.ctx = ctx_node->res;
|
|
|
+ binding.bi.ctx = ctx_node->ctx;
|
|
|
binding.bi.bt = vmw_ctx_binding_vb;
|
|
|
- binding.bi.res = ((res_node) ? res_node->res : NULL);
|
|
|
+ binding.bi.res = res;
|
|
|
binding.offset = cmd->buf[i].offset;
|
|
|
binding.stride = cmd->buf[i].stride;
|
|
|
binding.slot = i + cmd->body.startBuffer;
|
|
|
|
|
|
- vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
|
|
|
+ vmw_binding_add(ctx_node->staged, &binding.bi,
|
|
|
0, binding.slot);
|
|
|
}
|
|
|
|
|
@@ -2594,9 +2438,9 @@ static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
|
|
|
struct vmw_sw_context *sw_context,
|
|
|
SVGA3dCmdHeader *header)
|
|
|
{
|
|
|
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
|
|
|
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
|
|
|
struct vmw_ctx_bindinfo_ib binding;
|
|
|
- struct vmw_resource_val_node *res_node;
|
|
|
+ struct vmw_resource *res;
|
|
|
struct {
|
|
|
SVGA3dCmdHeader header;
|
|
|
SVGA3dCmdDXSetIndexBuffer body;
|
|
@@ -2611,17 +2455,17 @@ static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
|
|
|
cmd = container_of(header, typeof(*cmd), header);
|
|
|
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
|
|
|
user_surface_converter,
|
|
|
- &cmd->body.sid, &res_node);
|
|
|
+ &cmd->body.sid, &res);
|
|
|
if (unlikely(ret != 0))
|
|
|
return ret;
|
|
|
|
|
|
- binding.bi.ctx = ctx_node->res;
|
|
|
- binding.bi.res = ((res_node) ? res_node->res : NULL);
|
|
|
+ binding.bi.ctx = ctx_node->ctx;
|
|
|
+ binding.bi.res = res;
|
|
|
binding.bi.bt = vmw_ctx_binding_ib;
|
|
|
binding.offset = cmd->body.offset;
|
|
|
binding.format = cmd->body.format;
|
|
|
|
|
|
- vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0);
|
|
|
+ vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -2679,8 +2523,8 @@ static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
|
|
|
SVGA3dCmdDXClearRenderTargetView body;
|
|
|
} *cmd = container_of(header, typeof(*cmd), header);
|
|
|
|
|
|
- return vmw_view_id_val_add(sw_context, vmw_view_rt,
|
|
|
- cmd->body.renderTargetViewId);
|
|
|
+ return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_rt,
|
|
|
+ cmd->body.renderTargetViewId));
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -2700,16 +2544,16 @@ static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
|
|
|
SVGA3dCmdDXClearDepthStencilView body;
|
|
|
} *cmd = container_of(header, typeof(*cmd), header);
|
|
|
|
|
|
- return vmw_view_id_val_add(sw_context, vmw_view_ds,
|
|
|
- cmd->body.depthStencilViewId);
|
|
|
+ return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_ds,
|
|
|
+ cmd->body.depthStencilViewId));
|
|
|
}
|
|
|
|
|
|
static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
|
|
|
struct vmw_sw_context *sw_context,
|
|
|
SVGA3dCmdHeader *header)
|
|
|
{
|
|
|
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
|
|
|
- struct vmw_resource_val_node *srf_node;
|
|
|
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
|
|
|
+ struct vmw_resource *srf;
|
|
|
struct vmw_resource *res;
|
|
|
enum vmw_view_type view_type;
|
|
|
int ret;
|
|
@@ -2734,19 +2578,18 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
|
|
|
cmd = container_of(header, typeof(*cmd), header);
|
|
|
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
|
|
|
user_surface_converter,
|
|
|
- &cmd->sid, &srf_node);
|
|
|
+ &cmd->sid, &srf);
|
|
|
if (unlikely(ret != 0))
|
|
|
return ret;
|
|
|
|
|
|
- res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]);
|
|
|
+ res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
|
|
|
ret = vmw_cotable_notify(res, cmd->defined_id);
|
|
|
- vmw_resource_unreference(&res);
|
|
|
if (unlikely(ret != 0))
|
|
|
return ret;
|
|
|
|
|
|
return vmw_view_add(sw_context->man,
|
|
|
- ctx_node->res,
|
|
|
- srf_node->res,
|
|
|
+ ctx_node->ctx,
|
|
|
+ srf,
|
|
|
view_type,
|
|
|
cmd->defined_id,
|
|
|
header,
|
|
@@ -2766,9 +2609,9 @@ static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
|
|
|
struct vmw_sw_context *sw_context,
|
|
|
SVGA3dCmdHeader *header)
|
|
|
{
|
|
|
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
|
|
|
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
|
|
|
struct vmw_ctx_bindinfo_so binding;
|
|
|
- struct vmw_resource_val_node *res_node;
|
|
|
+ struct vmw_resource *res;
|
|
|
struct {
|
|
|
SVGA3dCmdHeader header;
|
|
|
SVGA3dCmdDXSetSOTargets body;
|
|
@@ -2793,18 +2636,18 @@ static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
|
|
|
for (i = 0; i < num; i++) {
|
|
|
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
|
|
|
user_surface_converter,
|
|
|
- &cmd->targets[i].sid, &res_node);
|
|
|
+ &cmd->targets[i].sid, &res);
|
|
|
if (unlikely(ret != 0))
|
|
|
return ret;
|
|
|
|
|
|
- binding.bi.ctx = ctx_node->res;
|
|
|
- binding.bi.res = ((res_node) ? res_node->res : NULL);
|
|
|
+ binding.bi.ctx = ctx_node->ctx;
|
|
|
+ binding.bi.res = res;
|
|
|
binding.bi.bt = vmw_ctx_binding_so,
|
|
|
binding.offset = cmd->targets[i].offset;
|
|
|
binding.size = cmd->targets[i].sizeInBytes;
|
|
|
binding.slot = i;
|
|
|
|
|
|
- vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
|
|
|
+ vmw_binding_add(ctx_node->staged, &binding.bi,
|
|
|
0, binding.slot);
|
|
|
}
|
|
|
|
|
@@ -2815,7 +2658,7 @@ static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
|
|
|
struct vmw_sw_context *sw_context,
|
|
|
SVGA3dCmdHeader *header)
|
|
|
{
|
|
|
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
|
|
|
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
|
|
|
struct vmw_resource *res;
|
|
|
/*
|
|
|
* This is based on the fact that all affected define commands have
|
|
@@ -2834,10 +2677,9 @@ static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
|
|
|
}
|
|
|
|
|
|
so_type = vmw_so_cmd_to_type(header->id);
|
|
|
- res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]);
|
|
|
+ res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
|
|
|
cmd = container_of(header, typeof(*cmd), header);
|
|
|
ret = vmw_cotable_notify(res, cmd->defined_id);
|
|
|
- vmw_resource_unreference(&res);
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
@@ -2882,7 +2724,7 @@ static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
|
|
|
struct vmw_sw_context *sw_context,
|
|
|
SVGA3dCmdHeader *header)
|
|
|
{
|
|
|
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
|
|
|
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
|
|
|
|
|
|
if (unlikely(ctx_node == NULL)) {
|
|
|
DRM_ERROR("DX Context not set.\n");
|
|
@@ -2907,7 +2749,7 @@ static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
|
|
|
struct vmw_sw_context *sw_context,
|
|
|
SVGA3dCmdHeader *header)
|
|
|
{
|
|
|
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
|
|
|
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
|
|
|
struct {
|
|
|
SVGA3dCmdHeader header;
|
|
|
union vmw_view_destroy body;
|
|
@@ -2934,7 +2776,7 @@ static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
|
|
|
* relocation to conditionally make this command a NOP to avoid
|
|
|
* device errors.
|
|
|
*/
|
|
|
- return vmw_resource_relocation_add(&sw_context->res_relocations,
|
|
|
+ return vmw_resource_relocation_add(sw_context,
|
|
|
view,
|
|
|
vmw_ptr_diff(sw_context->buf_start,
|
|
|
&cmd->header.id),
|
|
@@ -2953,7 +2795,7 @@ static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
|
|
|
struct vmw_sw_context *sw_context,
|
|
|
SVGA3dCmdHeader *header)
|
|
|
{
|
|
|
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
|
|
|
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
|
|
|
struct vmw_resource *res;
|
|
|
struct {
|
|
|
SVGA3dCmdHeader header;
|
|
@@ -2966,13 +2808,12 @@ static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
- res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER);
|
|
|
+ res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
|
|
|
ret = vmw_cotable_notify(res, cmd->body.shaderId);
|
|
|
- vmw_resource_unreference(&res);
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
|
|
|
- return vmw_dx_shader_add(sw_context->man, ctx_node->res,
|
|
|
+ return vmw_dx_shader_add(sw_context->man, ctx_node->ctx,
|
|
|
cmd->body.shaderId, cmd->body.type,
|
|
|
&sw_context->staged_cmd_res);
|
|
|
}
|
|
@@ -2989,7 +2830,7 @@ static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
|
|
|
struct vmw_sw_context *sw_context,
|
|
|
SVGA3dCmdHeader *header)
|
|
|
{
|
|
|
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
|
|
|
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
|
|
|
struct {
|
|
|
SVGA3dCmdHeader header;
|
|
|
SVGA3dCmdDXDestroyShader body;
|
|
@@ -3021,8 +2862,7 @@ static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
|
|
|
struct vmw_sw_context *sw_context,
|
|
|
SVGA3dCmdHeader *header)
|
|
|
{
|
|
|
- struct vmw_resource_val_node *ctx_node;
|
|
|
- struct vmw_resource_val_node *res_node;
|
|
|
+ struct vmw_resource *ctx;
|
|
|
struct vmw_resource *res;
|
|
|
struct {
|
|
|
SVGA3dCmdHeader header;
|
|
@@ -3033,38 +2873,33 @@ static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
|
|
|
if (cmd->body.cid != SVGA3D_INVALID_ID) {
|
|
|
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
|
|
|
user_context_converter,
|
|
|
- &cmd->body.cid, &ctx_node);
|
|
|
+ &cmd->body.cid, &ctx);
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
} else {
|
|
|
- ctx_node = sw_context->dx_ctx_node;
|
|
|
- if (!ctx_node) {
|
|
|
+ if (!sw_context->dx_ctx_node) {
|
|
|
DRM_ERROR("DX Context not set.\n");
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
+ ctx = sw_context->dx_ctx_node->ctx;
|
|
|
}
|
|
|
|
|
|
- res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
|
|
|
+ res = vmw_shader_lookup(vmw_context_res_man(ctx),
|
|
|
cmd->body.shid, 0);
|
|
|
if (IS_ERR(res)) {
|
|
|
DRM_ERROR("Could not find shader to bind.\n");
|
|
|
return PTR_ERR(res);
|
|
|
}
|
|
|
|
|
|
- ret = vmw_resource_val_add(sw_context, res, &res_node);
|
|
|
+ ret = vmw_execbuf_res_noctx_val_add(sw_context, res);
|
|
|
if (ret) {
|
|
|
DRM_ERROR("Error creating resource validation node.\n");
|
|
|
- goto out_unref;
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
-
|
|
|
- ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node,
|
|
|
- &cmd->body.mobid,
|
|
|
- cmd->body.offsetInBytes);
|
|
|
-out_unref:
|
|
|
- vmw_resource_unreference(&res);
|
|
|
-
|
|
|
- return ret;
|
|
|
+ return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
|
|
|
+ &cmd->body.mobid,
|
|
|
+ cmd->body.offsetInBytes);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -3083,8 +2918,8 @@ static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
|
|
|
SVGA3dCmdDXGenMips body;
|
|
|
} *cmd = container_of(header, typeof(*cmd), header);
|
|
|
|
|
|
- return vmw_view_id_val_add(sw_context, vmw_view_sr,
|
|
|
- cmd->body.shaderResourceViewId);
|
|
|
+ return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_sr,
|
|
|
+ cmd->body.shaderResourceViewId));
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -3638,20 +3473,18 @@ static int vmw_cmd_check_all(struct vmw_private *dev_priv,
|
|
|
|
|
|
static void vmw_free_relocations(struct vmw_sw_context *sw_context)
|
|
|
{
|
|
|
- sw_context->cur_reloc = 0;
|
|
|
+ /* Memory is validation context memory, so no need to free it */
|
|
|
+
|
|
|
+ INIT_LIST_HEAD(&sw_context->bo_relocations);
|
|
|
}
|
|
|
|
|
|
static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
|
|
|
{
|
|
|
- uint32_t i;
|
|
|
struct vmw_relocation *reloc;
|
|
|
- struct ttm_validate_buffer *validate;
|
|
|
struct ttm_buffer_object *bo;
|
|
|
|
|
|
- for (i = 0; i < sw_context->cur_reloc; ++i) {
|
|
|
- reloc = &sw_context->relocs[i];
|
|
|
- validate = &sw_context->val_bufs[reloc->index].base;
|
|
|
- bo = validate->bo;
|
|
|
+ list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
|
|
|
+ bo = &reloc->vbo->base;
|
|
|
switch (bo->mem.mem_type) {
|
|
|
case TTM_PL_VRAM:
|
|
|
reloc->location->offset += bo->offset;
|
|
@@ -3670,110 +3503,6 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
|
|
|
vmw_free_relocations(sw_context);
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * vmw_resource_list_unrefererence - Free up a resource list and unreference
|
|
|
- * all resources referenced by it.
|
|
|
- *
|
|
|
- * @list: The resource list.
|
|
|
- */
|
|
|
-static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context,
|
|
|
- struct list_head *list)
|
|
|
-{
|
|
|
- struct vmw_resource_val_node *val, *val_next;
|
|
|
-
|
|
|
- /*
|
|
|
- * Drop references to resources held during command submission.
|
|
|
- */
|
|
|
-
|
|
|
- list_for_each_entry_safe(val, val_next, list, head) {
|
|
|
- list_del_init(&val->head);
|
|
|
- vmw_resource_unreference(&val->res);
|
|
|
-
|
|
|
- if (val->staged_bindings) {
|
|
|
- if (val->staged_bindings != sw_context->staged_bindings)
|
|
|
- vmw_binding_state_free(val->staged_bindings);
|
|
|
- else
|
|
|
- sw_context->staged_bindings_inuse = false;
|
|
|
- val->staged_bindings = NULL;
|
|
|
- }
|
|
|
-
|
|
|
- kfree(val);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-static void vmw_clear_validations(struct vmw_sw_context *sw_context)
|
|
|
-{
|
|
|
- struct vmw_validate_buffer *entry, *next;
|
|
|
- struct vmw_resource_val_node *val;
|
|
|
-
|
|
|
- /*
|
|
|
- * Drop references to DMA buffers held during command submission.
|
|
|
- */
|
|
|
- list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
|
|
|
- base.head) {
|
|
|
- list_del(&entry->base.head);
|
|
|
- ttm_bo_unref(&entry->base.bo);
|
|
|
- (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
|
|
|
- sw_context->cur_val_buf--;
|
|
|
- }
|
|
|
- BUG_ON(sw_context->cur_val_buf != 0);
|
|
|
-
|
|
|
- list_for_each_entry(val, &sw_context->resource_list, head)
|
|
|
- (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
|
|
|
-}
|
|
|
-
|
|
|
-int vmw_validate_single_buffer(struct vmw_private *dev_priv,
|
|
|
- struct ttm_buffer_object *bo,
|
|
|
- bool interruptible,
|
|
|
- bool validate_as_mob)
|
|
|
-{
|
|
|
- struct vmw_buffer_object *vbo =
|
|
|
- container_of(bo, struct vmw_buffer_object, base);
|
|
|
- struct ttm_operation_ctx ctx = { interruptible, false };
|
|
|
- int ret;
|
|
|
-
|
|
|
- if (vbo->pin_count > 0)
|
|
|
- return 0;
|
|
|
-
|
|
|
- if (validate_as_mob)
|
|
|
- return ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
|
|
|
-
|
|
|
- /**
|
|
|
- * Put BO in VRAM if there is space, otherwise as a GMR.
|
|
|
- * If there is no space in VRAM and GMR ids are all used up,
|
|
|
- * start evicting GMRs to make room. If the DMA buffer can't be
|
|
|
- * used as a GMR, this will return -ENOMEM.
|
|
|
- */
|
|
|
-
|
|
|
- ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
|
|
|
- if (likely(ret == 0 || ret == -ERESTARTSYS))
|
|
|
- return ret;
|
|
|
-
|
|
|
- /**
|
|
|
- * If that failed, try VRAM again, this time evicting
|
|
|
- * previous contents.
|
|
|
- */
|
|
|
-
|
|
|
- ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
|
|
|
- return ret;
|
|
|
-}
|
|
|
-
|
|
|
-static int vmw_validate_buffers(struct vmw_private *dev_priv,
|
|
|
- struct vmw_sw_context *sw_context)
|
|
|
-{
|
|
|
- struct vmw_validate_buffer *entry;
|
|
|
- int ret;
|
|
|
-
|
|
|
- list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
|
|
|
- ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
|
|
|
- true,
|
|
|
- entry->validate_as_mob);
|
|
|
- if (unlikely(ret != 0))
|
|
|
- return ret;
|
|
|
- }
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
|
|
|
uint32_t size)
|
|
|
{
|
|
@@ -3946,7 +3675,7 @@ static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
|
|
|
|
|
|
if (sw_context->dx_ctx_node)
|
|
|
cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
|
|
|
- sw_context->dx_ctx_node->res->id);
|
|
|
+ sw_context->dx_ctx_node->ctx->id);
|
|
|
else
|
|
|
cmd = vmw_fifo_reserve(dev_priv, command_size);
|
|
|
if (!cmd) {
|
|
@@ -3980,7 +3709,7 @@ static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
|
|
|
u32 command_size,
|
|
|
struct vmw_sw_context *sw_context)
|
|
|
{
|
|
|
- u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id :
|
|
|
+ u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
|
|
|
SVGA3D_INVALID_ID);
|
|
|
void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
|
|
|
id, false, header);
|
|
@@ -4057,31 +3786,35 @@ static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
|
|
|
struct vmw_sw_context *sw_context,
|
|
|
uint32_t handle)
|
|
|
{
|
|
|
- struct vmw_resource_val_node *ctx_node;
|
|
|
struct vmw_resource *res;
|
|
|
int ret;
|
|
|
+ unsigned int size;
|
|
|
|
|
|
if (handle == SVGA3D_INVALID_ID)
|
|
|
return 0;
|
|
|
|
|
|
- ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile,
|
|
|
- handle, user_context_converter,
|
|
|
- &res);
|
|
|
- if (unlikely(ret != 0)) {
|
|
|
+ size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context);
|
|
|
+ ret = vmw_validation_preload_res(sw_context->ctx, size);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ res = vmw_user_resource_noref_lookup_handle
|
|
|
+ (dev_priv, sw_context->fp->tfile, handle,
|
|
|
+ user_context_converter);
|
|
|
+ if (unlikely(IS_ERR(res))) {
|
|
|
DRM_ERROR("Could not find or user DX context 0x%08x.\n",
|
|
|
(unsigned) handle);
|
|
|
- return ret;
|
|
|
+ return PTR_ERR(res);
|
|
|
}
|
|
|
|
|
|
- ret = vmw_resource_val_add(sw_context, res, &ctx_node);
|
|
|
+ ret = vmw_execbuf_res_noref_val_add(sw_context, res);
|
|
|
if (unlikely(ret != 0))
|
|
|
- goto out_err;
|
|
|
+ return ret;
|
|
|
|
|
|
- sw_context->dx_ctx_node = ctx_node;
|
|
|
+ sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
|
|
|
sw_context->man = vmw_context_res_man(res);
|
|
|
-out_err:
|
|
|
- vmw_resource_unreference(&res);
|
|
|
- return ret;
|
|
|
+
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
int vmw_execbuf_process(struct drm_file *file_priv,
|
|
@@ -4097,15 +3830,12 @@ int vmw_execbuf_process(struct drm_file *file_priv,
|
|
|
{
|
|
|
struct vmw_sw_context *sw_context = &dev_priv->ctx;
|
|
|
struct vmw_fence_obj *fence = NULL;
|
|
|
- struct vmw_resource *error_resource;
|
|
|
- struct list_head resource_list;
|
|
|
struct vmw_cmdbuf_header *header;
|
|
|
- struct ww_acquire_ctx ticket;
|
|
|
uint32_t handle;
|
|
|
int ret;
|
|
|
int32_t out_fence_fd = -1;
|
|
|
struct sync_file *sync_file = NULL;
|
|
|
-
|
|
|
+ DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
|
|
|
|
|
|
if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
|
|
|
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
|
|
@@ -4157,10 +3887,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
|
|
|
sw_context->kernel = true;
|
|
|
|
|
|
sw_context->fp = vmw_fpriv(file_priv);
|
|
|
- sw_context->cur_reloc = 0;
|
|
|
- sw_context->cur_val_buf = 0;
|
|
|
- INIT_LIST_HEAD(&sw_context->resource_list);
|
|
|
- INIT_LIST_HEAD(&sw_context->ctx_resource_list);
|
|
|
+ INIT_LIST_HEAD(&sw_context->ctx_list);
|
|
|
sw_context->cur_query_bo = dev_priv->pinned_bo;
|
|
|
sw_context->last_query_ctx = NULL;
|
|
|
sw_context->needs_post_query_barrier = false;
|
|
@@ -4168,8 +3895,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
|
|
|
sw_context->dx_query_mob = NULL;
|
|
|
sw_context->dx_query_ctx = NULL;
|
|
|
memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
|
|
|
- INIT_LIST_HEAD(&sw_context->validate_nodes);
|
|
|
INIT_LIST_HEAD(&sw_context->res_relocations);
|
|
|
+ INIT_LIST_HEAD(&sw_context->bo_relocations);
|
|
|
if (sw_context->staged_bindings)
|
|
|
vmw_binding_state_reset(sw_context->staged_bindings);
|
|
|
|
|
@@ -4180,24 +3907,13 @@ int vmw_execbuf_process(struct drm_file *file_priv,
|
|
|
sw_context->res_ht_initialized = true;
|
|
|
}
|
|
|
INIT_LIST_HEAD(&sw_context->staged_cmd_res);
|
|
|
- INIT_LIST_HEAD(&resource_list);
|
|
|
+ sw_context->ctx = &val_ctx;
|
|
|
ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
|
|
|
- if (unlikely(ret != 0)) {
|
|
|
- list_splice_init(&sw_context->ctx_resource_list,
|
|
|
- &sw_context->resource_list);
|
|
|
+ if (unlikely(ret != 0))
|
|
|
goto out_err_nores;
|
|
|
- }
|
|
|
|
|
|
ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
|
|
|
command_size);
|
|
|
- /*
|
|
|
- * Merge the resource lists before checking the return status
|
|
|
- * from vmd_cmd_check_all so that all the open hashtabs will
|
|
|
- * be handled properly even if vmw_cmd_check_all fails.
|
|
|
- */
|
|
|
- list_splice_init(&sw_context->ctx_resource_list,
|
|
|
- &sw_context->resource_list);
|
|
|
-
|
|
|
if (unlikely(ret != 0))
|
|
|
goto out_err_nores;
|
|
|
|
|
@@ -4205,18 +3921,18 @@ int vmw_execbuf_process(struct drm_file *file_priv,
|
|
|
if (unlikely(ret != 0))
|
|
|
goto out_err_nores;
|
|
|
|
|
|
- ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
|
|
|
- true, NULL);
|
|
|
+ ret = vmw_validation_bo_reserve(&val_ctx, true);
|
|
|
if (unlikely(ret != 0))
|
|
|
goto out_err_nores;
|
|
|
|
|
|
- ret = vmw_validate_buffers(dev_priv, sw_context);
|
|
|
+ ret = vmw_validation_bo_validate(&val_ctx, true);
|
|
|
if (unlikely(ret != 0))
|
|
|
goto out_err;
|
|
|
|
|
|
- ret = vmw_resources_validate(sw_context);
|
|
|
+ ret = vmw_validation_res_validate(&val_ctx, true);
|
|
|
if (unlikely(ret != 0))
|
|
|
goto out_err;
|
|
|
+ vmw_validation_drop_ht(&val_ctx);
|
|
|
|
|
|
ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
|
|
|
if (unlikely(ret != 0)) {
|
|
@@ -4255,17 +3971,16 @@ int vmw_execbuf_process(struct drm_file *file_priv,
|
|
|
if (ret != 0)
|
|
|
DRM_ERROR("Fence submission error. Syncing.\n");
|
|
|
|
|
|
- vmw_resources_unreserve(sw_context, false);
|
|
|
+ vmw_execbuf_bindings_commit(sw_context, false);
|
|
|
+ vmw_bind_dx_query_mob(sw_context);
|
|
|
+ vmw_validation_res_unreserve(&val_ctx, false);
|
|
|
|
|
|
- ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
|
|
|
- (void *) fence);
|
|
|
+ vmw_validation_bo_fence(sw_context->ctx, fence);
|
|
|
|
|
|
if (unlikely(dev_priv->pinned_bo != NULL &&
|
|
|
!dev_priv->query_cid_valid))
|
|
|
__vmw_execbuf_release_pinned_bo(dev_priv, fence);
|
|
|
|
|
|
- vmw_clear_validations(sw_context);
|
|
|
-
|
|
|
/*
|
|
|
* If anything fails here, give up trying to export the fence
|
|
|
* and do a sync since the user mode will not be able to sync
|
|
@@ -4300,7 +4015,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
|
|
|
vmw_fence_obj_unreference(&fence);
|
|
|
}
|
|
|
|
|
|
- list_splice_init(&sw_context->resource_list, &resource_list);
|
|
|
vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
|
|
|
mutex_unlock(&dev_priv->cmdbuf_mutex);
|
|
|
|
|
@@ -4308,36 +4022,33 @@ int vmw_execbuf_process(struct drm_file *file_priv,
|
|
|
* Unreference resources outside of the cmdbuf_mutex to
|
|
|
* avoid deadlocks in resource destruction paths.
|
|
|
*/
|
|
|
- vmw_resource_list_unreference(sw_context, &resource_list);
|
|
|
+ vmw_validation_unref_lists(&val_ctx);
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
out_unlock_binding:
|
|
|
mutex_unlock(&dev_priv->binding_mutex);
|
|
|
out_err:
|
|
|
- ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
|
|
|
+ vmw_validation_bo_backoff(&val_ctx);
|
|
|
out_err_nores:
|
|
|
- vmw_resources_unreserve(sw_context, true);
|
|
|
+ vmw_execbuf_bindings_commit(sw_context, true);
|
|
|
+ vmw_validation_res_unreserve(&val_ctx, true);
|
|
|
vmw_resource_relocations_free(&sw_context->res_relocations);
|
|
|
vmw_free_relocations(sw_context);
|
|
|
- vmw_clear_validations(sw_context);
|
|
|
if (unlikely(dev_priv->pinned_bo != NULL &&
|
|
|
!dev_priv->query_cid_valid))
|
|
|
__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
|
|
|
out_unlock:
|
|
|
- list_splice_init(&sw_context->resource_list, &resource_list);
|
|
|
- error_resource = sw_context->error_resource;
|
|
|
- sw_context->error_resource = NULL;
|
|
|
vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
|
|
|
+ vmw_validation_drop_ht(&val_ctx);
|
|
|
+ WARN_ON(!list_empty(&sw_context->ctx_list));
|
|
|
mutex_unlock(&dev_priv->cmdbuf_mutex);
|
|
|
|
|
|
/*
|
|
|
* Unreference resources outside of the cmdbuf_mutex to
|
|
|
* avoid deadlocks in resource destruction paths.
|
|
|
*/
|
|
|
- vmw_resource_list_unreference(sw_context, &resource_list);
|
|
|
- if (unlikely(error_resource != NULL))
|
|
|
- vmw_resource_unreference(&error_resource);
|
|
|
+ vmw_validation_unref_lists(&val_ctx);
|
|
|
out_free_header:
|
|
|
if (header)
|
|
|
vmw_cmdbuf_header_free(header);
|
|
@@ -4398,38 +4109,31 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
|
|
|
struct vmw_fence_obj *fence)
|
|
|
{
|
|
|
int ret = 0;
|
|
|
- struct list_head validate_list;
|
|
|
- struct ttm_validate_buffer pinned_val, query_val;
|
|
|
struct vmw_fence_obj *lfence = NULL;
|
|
|
- struct ww_acquire_ctx ticket;
|
|
|
+ DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
|
|
|
|
|
|
if (dev_priv->pinned_bo == NULL)
|
|
|
goto out_unlock;
|
|
|
|
|
|
- INIT_LIST_HEAD(&validate_list);
|
|
|
-
|
|
|
- pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base);
|
|
|
- pinned_val.shared = false;
|
|
|
- list_add_tail(&pinned_val.head, &validate_list);
|
|
|
+ ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo, false,
|
|
|
+ false);
|
|
|
+ if (ret)
|
|
|
+ goto out_no_reserve;
|
|
|
|
|
|
- query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base);
|
|
|
- query_val.shared = false;
|
|
|
- list_add_tail(&query_val.head, &validate_list);
|
|
|
+ ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo, false,
|
|
|
+ false);
|
|
|
+ if (ret)
|
|
|
+ goto out_no_reserve;
|
|
|
|
|
|
- ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
|
|
|
- false, NULL);
|
|
|
- if (unlikely(ret != 0)) {
|
|
|
- vmw_execbuf_unpin_panic(dev_priv);
|
|
|
+ ret = vmw_validation_bo_reserve(&val_ctx, false);
|
|
|
+ if (ret)
|
|
|
goto out_no_reserve;
|
|
|
- }
|
|
|
|
|
|
if (dev_priv->query_cid_valid) {
|
|
|
BUG_ON(fence != NULL);
|
|
|
ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
|
|
|
- if (unlikely(ret != 0)) {
|
|
|
- vmw_execbuf_unpin_panic(dev_priv);
|
|
|
+ if (ret)
|
|
|
goto out_no_emit;
|
|
|
- }
|
|
|
dev_priv->query_cid_valid = false;
|
|
|
}
|
|
|
|
|
@@ -4443,22 +4147,22 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
|
|
|
NULL);
|
|
|
fence = lfence;
|
|
|
}
|
|
|
- ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
|
|
|
+ vmw_validation_bo_fence(&val_ctx, fence);
|
|
|
if (lfence != NULL)
|
|
|
vmw_fence_obj_unreference(&lfence);
|
|
|
|
|
|
- ttm_bo_unref(&query_val.bo);
|
|
|
- ttm_bo_unref(&pinned_val.bo);
|
|
|
+ vmw_validation_unref_lists(&val_ctx);
|
|
|
vmw_bo_unreference(&dev_priv->pinned_bo);
|
|
|
out_unlock:
|
|
|
return;
|
|
|
|
|
|
out_no_emit:
|
|
|
- ttm_eu_backoff_reservation(&ticket, &validate_list);
|
|
|
+ vmw_validation_bo_backoff(&val_ctx);
|
|
|
out_no_reserve:
|
|
|
- ttm_bo_unref(&query_val.bo);
|
|
|
- ttm_bo_unref(&pinned_val.bo);
|
|
|
+ vmw_validation_unref_lists(&val_ctx);
|
|
|
+ vmw_execbuf_unpin_panic(dev_priv);
|
|
|
vmw_bo_unreference(&dev_priv->pinned_bo);
|
|
|
+
|
|
|
}
|
|
|
|
|
|
/**
|