|
@@ -29,6 +29,8 @@
|
|
|
#include "vmwgfx_reg.h"
|
|
|
#include <drm/ttm/ttm_bo_api.h>
|
|
|
#include <drm/ttm/ttm_placement.h>
|
|
|
+#include "vmwgfx_so.h"
|
|
|
+#include "vmwgfx_binding.h"
|
|
|
|
|
|
#define VMW_RES_HT_ORDER 12
|
|
|
|
|
@@ -59,8 +61,11 @@ struct vmw_resource_relocation {
|
|
|
* @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
|
|
|
* @first_usage: Set to true the first time the resource is referenced in
|
|
|
* the command stream.
|
|
|
- * @no_buffer_needed: Resources do not need to allocate buffer backup on
|
|
|
- * reservation. The command stream will provide one.
|
|
|
+ * @switching_backup: The command stream provides a new backup buffer for a
|
|
|
+ * resource.
|
|
|
+ * @no_buffer_needed: This means @switching_backup is true on first buffer
|
|
|
+ * reference. So resource reservation does not need to allocate a backup
|
|
|
+ * buffer for the resource.
|
|
|
*/
|
|
|
struct vmw_resource_val_node {
|
|
|
struct list_head head;
|
|
@@ -69,8 +74,9 @@ struct vmw_resource_val_node {
|
|
|
struct vmw_dma_buffer *new_backup;
|
|
|
struct vmw_ctx_binding_state *staged_bindings;
|
|
|
unsigned long new_backup_offset;
|
|
|
- bool first_usage;
|
|
|
- bool no_buffer_needed;
|
|
|
+ u32 first_usage : 1;
|
|
|
+ u32 switching_backup : 1;
|
|
|
+ u32 no_buffer_needed : 1;
|
|
|
};
|
|
|
|
|
|
/**
|
|
@@ -92,6 +98,10 @@ struct vmw_cmd_entry {
|
|
|
[(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
|
|
|
(_gb_disable), (_gb_enable)}
|
|
|
|
|
|
+static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
|
|
|
+ struct vmw_sw_context *sw_context,
|
|
|
+ struct vmw_resource *ctx);
|
|
|
+
|
|
|
/**
|
|
|
* vmw_resource_unreserve - unreserve resources previously reserved for
|
|
|
* command submission.
|
|
@@ -99,15 +109,16 @@ struct vmw_cmd_entry {
|
|
|
* @list_head: list of resources to unreserve.
|
|
|
* @backoff: Whether command submission failed.
|
|
|
*/
|
|
|
-static void vmw_resource_list_unreserve(struct list_head *list,
|
|
|
+static void vmw_resource_list_unreserve(struct vmw_sw_context *sw_context,
|
|
|
+ struct list_head *list,
|
|
|
bool backoff)
|
|
|
{
|
|
|
struct vmw_resource_val_node *val;
|
|
|
|
|
|
list_for_each_entry(val, list, head) {
|
|
|
struct vmw_resource *res = val->res;
|
|
|
- struct vmw_dma_buffer *new_backup =
|
|
|
- backoff ? NULL : val->new_backup;
|
|
|
+ bool switch_backup =
|
|
|
+ (backoff) ? false : val->switching_backup;
|
|
|
|
|
|
/*
|
|
|
* Transfer staged context bindings to the
|
|
@@ -115,18 +126,71 @@ static void vmw_resource_list_unreserve(struct list_head *list,
|
|
|
*/
|
|
|
if (unlikely(val->staged_bindings)) {
|
|
|
if (!backoff) {
|
|
|
- vmw_context_binding_state_transfer
|
|
|
- (val->res, val->staged_bindings);
|
|
|
+ vmw_binding_state_commit
|
|
|
+ (vmw_context_binding_state(val->res),
|
|
|
+ val->staged_bindings);
|
|
|
}
|
|
|
- kfree(val->staged_bindings);
|
|
|
+
|
|
|
+ if (val->staged_bindings != sw_context->staged_bindings)
|
|
|
+ vmw_binding_state_free(val->staged_bindings);
|
|
|
+ else
|
|
|
+ sw_context->staged_bindings_inuse = false;
|
|
|
val->staged_bindings = NULL;
|
|
|
}
|
|
|
- vmw_resource_unreserve(res, new_backup,
|
|
|
- val->new_backup_offset);
|
|
|
+ vmw_resource_unreserve(res, switch_backup, val->new_backup,
|
|
|
+ val->new_backup_offset);
|
|
|
vmw_dmabuf_unreference(&val->new_backup);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
|
|
|
+ * added to the validate list.
|
|
|
+ *
|
|
|
+ * @dev_priv: Pointer to the device private:
|
|
|
+ * @sw_context: The validation context:
|
|
|
+ * @node: The validation node holding this context.
|
|
|
+ */
|
|
|
+static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
|
|
|
+ struct vmw_sw_context *sw_context,
|
|
|
+ struct vmw_resource_val_node *node)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res);
|
|
|
+ if (unlikely(ret != 0))
|
|
|
+ goto out_err;
|
|
|
+
|
|
|
+ if (!sw_context->staged_bindings) {
|
|
|
+ sw_context->staged_bindings =
|
|
|
+ vmw_binding_state_alloc(dev_priv);
|
|
|
+ if (IS_ERR(sw_context->staged_bindings)) {
|
|
|
+ DRM_ERROR("Failed to allocate context binding "
|
|
|
+ "information.\n");
|
|
|
+ ret = PTR_ERR(sw_context->staged_bindings);
|
|
|
+ sw_context->staged_bindings = NULL;
|
|
|
+ goto out_err;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (sw_context->staged_bindings_inuse) {
|
|
|
+ node->staged_bindings = vmw_binding_state_alloc(dev_priv);
|
|
|
+ if (IS_ERR(node->staged_bindings)) {
|
|
|
+ DRM_ERROR("Failed to allocate context binding "
|
|
|
+ "information.\n");
|
|
|
+ ret = PTR_ERR(node->staged_bindings);
|
|
|
+ node->staged_bindings = NULL;
|
|
|
+ goto out_err;
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ node->staged_bindings = sw_context->staged_bindings;
|
|
|
+ sw_context->staged_bindings_inuse = true;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+out_err:
|
|
|
+ return ret;
|
|
|
+}
|
|
|
|
|
|
/**
|
|
|
* vmw_resource_val_add - Add a resource to the software context's
|
|
@@ -141,6 +205,7 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
|
|
|
struct vmw_resource *res,
|
|
|
struct vmw_resource_val_node **p_node)
|
|
|
{
|
|
|
+ struct vmw_private *dev_priv = res->dev_priv;
|
|
|
struct vmw_resource_val_node *node;
|
|
|
struct drm_hash_item *hash;
|
|
|
int ret;
|
|
@@ -169,14 +234,90 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
|
|
|
kfree(node);
|
|
|
return ret;
|
|
|
}
|
|
|
- list_add_tail(&node->head, &sw_context->resource_list);
|
|
|
node->res = vmw_resource_reference(res);
|
|
|
node->first_usage = true;
|
|
|
-
|
|
|
if (unlikely(p_node != NULL))
|
|
|
*p_node = node;
|
|
|
|
|
|
- return 0;
|
|
|
+ if (!dev_priv->has_mob) {
|
|
|
+ list_add_tail(&node->head, &sw_context->resource_list);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ switch (vmw_res_type(res)) {
|
|
|
+ case vmw_res_context:
|
|
|
+ case vmw_res_dx_context:
|
|
|
+ list_add(&node->head, &sw_context->ctx_resource_list);
|
|
|
+ ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node);
|
|
|
+ break;
|
|
|
+ case vmw_res_cotable:
|
|
|
+ list_add_tail(&node->head, &sw_context->ctx_resource_list);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ list_add_tail(&node->head, &sw_context->resource_list);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * vmw_view_res_val_add - Add a view and the surface it's pointing to
|
|
|
+ * to the validation list
|
|
|
+ *
|
|
|
+ * @sw_context: The software context holding the validation list.
|
|
|
+ * @view: Pointer to the view resource.
|
|
|
+ *
|
|
|
+ * Returns 0 if success, negative error code otherwise.
|
|
|
+ */
|
|
|
+static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
|
|
|
+ struct vmw_resource *view)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * First add the resource the view is pointing to, otherwise
|
|
|
+ * it may be swapped out when the view is validated.
|
|
|
+ */
|
|
|
+ ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ return vmw_resource_val_add(sw_context, view, NULL);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * vmw_view_id_val_add - Look up a view and add it and the surface it's
|
|
|
+ * pointing to to the validation list.
|
|
|
+ *
|
|
|
+ * @sw_context: The software context holding the validation list.
|
|
|
+ * @view_type: The view type to look up.
|
|
|
+ * @id: view id of the view.
|
|
|
+ *
|
|
|
+ * The view is represented by a view id and the DX context it's created on,
|
|
|
+ * or scheduled for creation on. If there is no DX context set, the function
|
|
|
+ * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure.
|
|
|
+ */
|
|
|
+static int vmw_view_id_val_add(struct vmw_sw_context *sw_context,
|
|
|
+ enum vmw_view_type view_type, u32 id)
|
|
|
+{
|
|
|
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
|
|
|
+ struct vmw_resource *view;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ if (!ctx_node) {
|
|
|
+ DRM_ERROR("DX Context not set.\n");
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ view = vmw_view_lookup(sw_context->man, view_type, id);
|
|
|
+ if (IS_ERR(view))
|
|
|
+ return PTR_ERR(view);
|
|
|
+
|
|
|
+ ret = vmw_view_res_val_add(sw_context, view);
|
|
|
+ vmw_resource_unreference(&view);
|
|
|
+
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -195,19 +336,41 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
|
|
|
struct vmw_resource *ctx)
|
|
|
{
|
|
|
struct list_head *binding_list;
|
|
|
- struct vmw_ctx_binding *entry;
|
|
|
+ struct vmw_ctx_bindinfo *entry;
|
|
|
int ret = 0;
|
|
|
struct vmw_resource *res;
|
|
|
+ u32 i;
|
|
|
+
|
|
|
+ /* Add all cotables to the validation list. */
|
|
|
+ if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
|
|
|
+ for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
|
|
|
+ res = vmw_context_cotable(ctx, i);
|
|
|
+ if (IS_ERR(res))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ ret = vmw_resource_val_add(sw_context, res, NULL);
|
|
|
+ vmw_resource_unreference(&res);
|
|
|
+ if (unlikely(ret != 0))
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
|
|
|
+ /* Add all resources bound to the context to the validation list */
|
|
|
mutex_lock(&dev_priv->binding_mutex);
|
|
|
binding_list = vmw_context_binding_list(ctx);
|
|
|
|
|
|
list_for_each_entry(entry, binding_list, ctx_list) {
|
|
|
- res = vmw_resource_reference_unless_doomed(entry->bi.res);
|
|
|
+ /* entry->res is not refcounted */
|
|
|
+ res = vmw_resource_reference_unless_doomed(entry->res);
|
|
|
if (unlikely(res == NULL))
|
|
|
continue;
|
|
|
|
|
|
- ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL);
|
|
|
+ if (vmw_res_type(entry->res) == vmw_res_view)
|
|
|
+ ret = vmw_view_res_val_add(sw_context, entry->res);
|
|
|
+ else
|
|
|
+ ret = vmw_resource_val_add(sw_context, entry->res,
|
|
|
+ NULL);
|
|
|
vmw_resource_unreference(&res);
|
|
|
if (unlikely(ret != 0))
|
|
|
break;
|
|
@@ -409,6 +572,7 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
|
|
|
|
|
|
list_for_each_entry(val, &sw_context->resource_list, head) {
|
|
|
struct vmw_resource *res = val->res;
|
|
|
+ struct vmw_dma_buffer *backup = res->backup;
|
|
|
|
|
|
ret = vmw_resource_validate(res);
|
|
|
if (unlikely(ret != 0)) {
|
|
@@ -416,18 +580,29 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
|
|
|
DRM_ERROR("Failed to validate resource.\n");
|
|
|
return ret;
|
|
|
}
|
|
|
+
|
|
|
+ /* Check if the resource switched backup buffer */
|
|
|
+ if (backup && res->backup && (backup != res->backup)) {
|
|
|
+ struct vmw_dma_buffer *vbo = res->backup;
|
|
|
+
|
|
|
+ ret = vmw_bo_to_validate_list
|
|
|
+ (sw_context, vbo,
|
|
|
+ vmw_resource_needs_backup(res), NULL);
|
|
|
+ if (ret) {
|
|
|
+ ttm_bo_unreserve(&vbo->base);
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+ }
|
|
|
}
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-
|
|
|
/**
|
|
|
* vmw_cmd_res_reloc_add - Add a resource to a software context's
|
|
|
* relocation- and validation lists.
|
|
|
*
|
|
|
* @dev_priv: Pointer to a struct vmw_private identifying the device.
|
|
|
* @sw_context: Pointer to the software context.
|
|
|
- * @res_type: Resource type.
|
|
|
* @id_loc: Pointer to where the id that needs translation is located.
|
|
|
* @res: Valid pointer to a struct vmw_resource.
|
|
|
* @p_val: If non null, a pointer to the struct vmw_resource_validate_node
|
|
@@ -435,7 +610,6 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
|
|
|
*/
|
|
|
static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
|
|
|
struct vmw_sw_context *sw_context,
|
|
|
- enum vmw_res_type res_type,
|
|
|
uint32_t *id_loc,
|
|
|
struct vmw_resource *res,
|
|
|
struct vmw_resource_val_node **p_val)
|
|
@@ -454,29 +628,6 @@ static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
|
|
|
if (unlikely(ret != 0))
|
|
|
return ret;
|
|
|
|
|
|
- if (res_type == vmw_res_context && dev_priv->has_mob &&
|
|
|
- node->first_usage) {
|
|
|
-
|
|
|
- /*
|
|
|
- * Put contexts first on the list to be able to exit
|
|
|
- * list traversal for contexts early.
|
|
|
- */
|
|
|
- list_del(&node->head);
|
|
|
- list_add(&node->head, &sw_context->resource_list);
|
|
|
-
|
|
|
- ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
|
|
|
- if (unlikely(ret != 0))
|
|
|
- return ret;
|
|
|
- node->staged_bindings =
|
|
|
- kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
|
|
|
- if (node->staged_bindings == NULL) {
|
|
|
- DRM_ERROR("Failed to allocate context binding "
|
|
|
- "information.\n");
|
|
|
- return -ENOMEM;
|
|
|
- }
|
|
|
- INIT_LIST_HEAD(&node->staged_bindings->list);
|
|
|
- }
|
|
|
-
|
|
|
if (p_val)
|
|
|
*p_val = node;
|
|
|
|
|
@@ -554,7 +705,7 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
|
|
|
rcache->res = res;
|
|
|
rcache->handle = *id_loc;
|
|
|
|
|
|
- ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, res_type, id_loc,
|
|
|
+ ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc,
|
|
|
res, &node);
|
|
|
if (unlikely(ret != 0))
|
|
|
goto out_no_reloc;
|
|
@@ -589,7 +740,8 @@ static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
|
|
|
if (unlikely(!val->staged_bindings))
|
|
|
break;
|
|
|
|
|
|
- ret = vmw_context_rebind_all(val->res);
|
|
|
+ ret = vmw_binding_rebind_all
|
|
|
+ (vmw_context_binding_state(val->res));
|
|
|
if (unlikely(ret != 0)) {
|
|
|
if (ret != -ERESTARTSYS)
|
|
|
DRM_ERROR("Failed to rebind context.\n");
|
|
@@ -600,6 +752,69 @@ static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * vmw_view_bindings_add - Add an array of view bindings to a context
|
|
|
+ * binding state tracker.
|
|
|
+ *
|
|
|
+ * @sw_context: The execbuf state used for this command.
|
|
|
+ * @view_type: View type for the bindings.
|
|
|
+ * @binding_type: Binding type for the bindings.
|
|
|
+ * @shader_slot: The shader slot to user for the bindings.
|
|
|
+ * @view_ids: Array of view ids to be bound.
|
|
|
+ * @num_views: Number of view ids in @view_ids.
|
|
|
+ * @first_slot: The binding slot to be used for the first view id in @view_ids.
|
|
|
+ */
|
|
|
+static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
|
|
|
+ enum vmw_view_type view_type,
|
|
|
+ enum vmw_ctx_binding_type binding_type,
|
|
|
+ uint32 shader_slot,
|
|
|
+ uint32 view_ids[], u32 num_views,
|
|
|
+ u32 first_slot)
|
|
|
+{
|
|
|
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
|
|
|
+ struct vmw_cmdbuf_res_manager *man;
|
|
|
+ u32 i;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ if (!ctx_node) {
|
|
|
+ DRM_ERROR("DX Context not set.\n");
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ man = sw_context->man;
|
|
|
+ for (i = 0; i < num_views; ++i) {
|
|
|
+ struct vmw_ctx_bindinfo_view binding;
|
|
|
+ struct vmw_resource *view = NULL;
|
|
|
+
|
|
|
+ if (view_ids[i] != SVGA3D_INVALID_ID) {
|
|
|
+ view = vmw_view_lookup(man, view_type, view_ids[i]);
|
|
|
+ if (IS_ERR(view)) {
|
|
|
+ DRM_ERROR("View not found.\n");
|
|
|
+ return PTR_ERR(view);
|
|
|
+ }
|
|
|
+
|
|
|
+ ret = vmw_view_res_val_add(sw_context, view);
|
|
|
+ if (ret) {
|
|
|
+ DRM_ERROR("Could not add view to "
|
|
|
+ "validation list.\n");
|
|
|
+ vmw_resource_unreference(&view);
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ binding.bi.ctx = ctx_node->res;
|
|
|
+ binding.bi.res = view;
|
|
|
+ binding.bi.bt = binding_type;
|
|
|
+ binding.shader_slot = shader_slot;
|
|
|
+ binding.slot = first_slot + i;
|
|
|
+ vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
|
|
|
+ shader_slot, binding.slot);
|
|
|
+ if (view)
|
|
|
+ vmw_resource_unreference(&view);
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* vmw_cmd_cid_check - Check a command header for valid context information.
|
|
|
*
|
|
@@ -638,6 +853,12 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
|
|
|
|
|
|
cmd = container_of(header, struct vmw_sid_cmd, header);
|
|
|
|
|
|
+ if (cmd->body.type >= SVGA3D_RT_MAX) {
|
|
|
+ DRM_ERROR("Illegal render target type %u.\n",
|
|
|
+ (unsigned) cmd->body.type);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
|
|
|
user_context_converter, &cmd->body.cid,
|
|
|
&ctx_node);
|
|
@@ -651,13 +872,14 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
|
|
|
return ret;
|
|
|
|
|
|
if (dev_priv->has_mob) {
|
|
|
- struct vmw_ctx_bindinfo bi;
|
|
|
+ struct vmw_ctx_bindinfo_view binding;
|
|
|
|
|
|
- bi.ctx = ctx_node->res;
|
|
|
- bi.res = res_node ? res_node->res : NULL;
|
|
|
- bi.bt = vmw_ctx_binding_rt;
|
|
|
- bi.i1.rt_type = cmd->body.type;
|
|
|
- return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
|
|
|
+ binding.bi.ctx = ctx_node->res;
|
|
|
+ binding.bi.res = res_node ? res_node->res : NULL;
|
|
|
+ binding.bi.bt = vmw_ctx_binding_rt;
|
|
|
+ binding.slot = cmd->body.type;
|
|
|
+ vmw_binding_add(ctx_node->staged_bindings,
|
|
|
+ &binding.bi, 0, binding.slot);
|
|
|
}
|
|
|
|
|
|
return 0;
|
|
@@ -1364,6 +1586,12 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
|
|
|
if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
|
|
|
continue;
|
|
|
|
|
|
+ if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
|
|
|
+ DRM_ERROR("Illegal texture/sampler unit %u.\n",
|
|
|
+ (unsigned) cur_state->stage);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
|
|
|
user_surface_converter,
|
|
|
&cur_state->value, &res_node);
|
|
@@ -1371,14 +1599,14 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
|
|
|
return ret;
|
|
|
|
|
|
if (dev_priv->has_mob) {
|
|
|
- struct vmw_ctx_bindinfo bi;
|
|
|
-
|
|
|
- bi.ctx = ctx_node->res;
|
|
|
- bi.res = res_node ? res_node->res : NULL;
|
|
|
- bi.bt = vmw_ctx_binding_tex;
|
|
|
- bi.i1.texture_stage = cur_state->stage;
|
|
|
- vmw_context_binding_add(ctx_node->staged_bindings,
|
|
|
- &bi);
|
|
|
+ struct vmw_ctx_bindinfo_tex binding;
|
|
|
+
|
|
|
+ binding.bi.ctx = ctx_node->res;
|
|
|
+ binding.bi.res = res_node ? res_node->res : NULL;
|
|
|
+ binding.bi.bt = vmw_ctx_binding_tex;
|
|
|
+ binding.texture_stage = cur_state->stage;
|
|
|
+ vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
|
|
|
+ 0, binding.texture_stage);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -1408,6 +1636,47 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+
|
|
|
+/**
|
|
|
+ * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
|
|
|
+ * switching
|
|
|
+ *
|
|
|
+ * @dev_priv: Pointer to a device private struct.
|
|
|
+ * @sw_context: The software context being used for this batch.
|
|
|
+ * @val_node: The validation node representing the resource.
|
|
|
+ * @buf_id: Pointer to the user-space backup buffer handle in the command
|
|
|
+ * stream.
|
|
|
+ * @backup_offset: Offset of backup into MOB.
|
|
|
+ *
|
|
|
+ * This function prepares for registering a switch of backup buffers
|
|
|
+ * in the resource metadata just prior to unreserving. It's basically a wrapper
|
|
|
+ * around vmw_cmd_res_switch_backup with a different interface.
|
|
|
+ */
|
|
|
+static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
|
|
|
+ struct vmw_sw_context *sw_context,
|
|
|
+ struct vmw_resource_val_node *val_node,
|
|
|
+ uint32_t *buf_id,
|
|
|
+ unsigned long backup_offset)
|
|
|
+{
|
|
|
+ struct vmw_dma_buffer *dma_buf;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ val_node->switching_backup = true;
|
|
|
+ if (val_node->first_usage)
|
|
|
+ val_node->no_buffer_needed = true;
|
|
|
+
|
|
|
+ vmw_dmabuf_unreference(&val_node->new_backup);
|
|
|
+ val_node->new_backup = dma_buf;
|
|
|
+ val_node->new_backup_offset = backup_offset;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
/**
|
|
|
* vmw_cmd_switch_backup - Utility function to handle backup buffer switching
|
|
|
*
|
|
@@ -1421,7 +1690,8 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
|
|
|
* @backup_offset: Offset of backup into MOB.
|
|
|
*
|
|
|
* This function prepares for registering a switch of backup buffers
|
|
|
- * in the resource metadata just prior to unreserving.
|
|
|
+ * in the resource metadata just prior to unreserving. It's basically a wrapper
|
|
|
+ * around vmw_cmd_res_switch_backup with a different interface.
|
|
|
*/
|
|
|
static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
|
|
|
struct vmw_sw_context *sw_context,
|
|
@@ -1432,27 +1702,16 @@ static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
|
|
|
uint32_t *buf_id,
|
|
|
unsigned long backup_offset)
|
|
|
{
|
|
|
- int ret;
|
|
|
- struct vmw_dma_buffer *dma_buf;
|
|
|
struct vmw_resource_val_node *val_node;
|
|
|
+ int ret;
|
|
|
|
|
|
ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
|
|
|
converter, res_id, &val_node);
|
|
|
- if (unlikely(ret != 0))
|
|
|
- return ret;
|
|
|
-
|
|
|
- ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
|
|
|
- if (unlikely(ret != 0))
|
|
|
+ if (ret)
|
|
|
return ret;
|
|
|
|
|
|
- if (val_node->first_usage)
|
|
|
- val_node->no_buffer_needed = true;
|
|
|
-
|
|
|
- vmw_dmabuf_unreference(&val_node->new_backup);
|
|
|
- val_node->new_backup = dma_buf;
|
|
|
- val_node->new_backup_offset = backup_offset;
|
|
|
-
|
|
|
- return 0;
|
|
|
+ return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node,
|
|
|
+ buf_id, backup_offset);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -1704,10 +1963,10 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
|
|
|
if (unlikely(!dev_priv->has_mob))
|
|
|
return 0;
|
|
|
|
|
|
- ret = vmw_compat_shader_remove(vmw_context_res_man(val->res),
|
|
|
- cmd->body.shid,
|
|
|
- cmd->body.type,
|
|
|
- &sw_context->staged_cmd_res);
|
|
|
+ ret = vmw_shader_remove(vmw_context_res_man(val->res),
|
|
|
+ cmd->body.shid,
|
|
|
+ cmd->body.type,
|
|
|
+ &sw_context->staged_cmd_res);
|
|
|
if (unlikely(ret != 0))
|
|
|
return ret;
|
|
|
|
|
@@ -1735,13 +1994,19 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
|
|
|
SVGA3dCmdSetShader body;
|
|
|
} *cmd;
|
|
|
struct vmw_resource_val_node *ctx_node, *res_node = NULL;
|
|
|
- struct vmw_ctx_bindinfo bi;
|
|
|
+ struct vmw_ctx_bindinfo_shader binding;
|
|
|
struct vmw_resource *res = NULL;
|
|
|
int ret;
|
|
|
|
|
|
cmd = container_of(header, struct vmw_set_shader_cmd,
|
|
|
header);
|
|
|
|
|
|
+ if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
|
|
|
+ DRM_ERROR("Illegal shader type %u.\n",
|
|
|
+ (unsigned) cmd->body.type);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
|
|
|
user_context_converter, &cmd->body.cid,
|
|
|
&ctx_node);
|
|
@@ -1752,14 +2017,12 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
|
|
|
return 0;
|
|
|
|
|
|
if (cmd->body.shid != SVGA3D_INVALID_ID) {
|
|
|
- res = vmw_compat_shader_lookup
|
|
|
- (vmw_context_res_man(ctx_node->res),
|
|
|
- cmd->body.shid,
|
|
|
- cmd->body.type);
|
|
|
+ res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
|
|
|
+ cmd->body.shid,
|
|
|
+ cmd->body.type);
|
|
|
|
|
|
if (!IS_ERR(res)) {
|
|
|
ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
|
|
|
- vmw_res_shader,
|
|
|
&cmd->body.shid, res,
|
|
|
&res_node);
|
|
|
vmw_resource_unreference(&res);
|
|
@@ -1777,11 +2040,13 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
- bi.ctx = ctx_node->res;
|
|
|
- bi.res = res_node ? res_node->res : NULL;
|
|
|
- bi.bt = vmw_ctx_binding_shader;
|
|
|
- bi.i1.shader_type = cmd->body.type;
|
|
|
- return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
|
|
|
+ binding.bi.ctx = ctx_node->res;
|
|
|
+ binding.bi.res = res_node ? res_node->res : NULL;
|
|
|
+ binding.bi.bt = vmw_ctx_binding_shader;
|
|
|
+ binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
|
|
|
+ vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
|
|
|
+ binding.shader_slot, 0);
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -1843,78 +2108,705 @@ static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
|
|
|
cmd->body.offsetInBytes);
|
|
|
}
|
|
|
|
|
|
-static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
|
|
|
- struct vmw_sw_context *sw_context,
|
|
|
- void *buf, uint32_t *size)
|
|
|
+/**
|
|
|
+ * vmw_cmd_dx_set_single_constant_buffer - Validate an
|
|
|
+ * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
|
|
|
+ *
|
|
|
+ * @dev_priv: Pointer to a device private struct.
|
|
|
+ * @sw_context: The software context being used for this batch.
|
|
|
+ * @header: Pointer to the command header in the command stream.
|
|
|
+ */
|
|
|
+static int
|
|
|
+vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
|
|
|
+ struct vmw_sw_context *sw_context,
|
|
|
+ SVGA3dCmdHeader *header)
|
|
|
{
|
|
|
- uint32_t size_remaining = *size;
|
|
|
- uint32_t cmd_id;
|
|
|
+ struct {
|
|
|
+ SVGA3dCmdHeader header;
|
|
|
+ SVGA3dCmdDXSetSingleConstantBuffer body;
|
|
|
+ } *cmd;
|
|
|
+ struct vmw_resource_val_node *res_node = NULL;
|
|
|
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
|
|
|
+ struct vmw_ctx_bindinfo_cb binding;
|
|
|
+ int ret;
|
|
|
|
|
|
- cmd_id = ((uint32_t *)buf)[0];
|
|
|
- switch (cmd_id) {
|
|
|
- case SVGA_CMD_UPDATE:
|
|
|
- *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
|
|
|
- break;
|
|
|
- case SVGA_CMD_DEFINE_GMRFB:
|
|
|
- *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
|
|
|
- break;
|
|
|
- case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
|
|
|
- *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
|
|
|
- break;
|
|
|
- case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
|
|
|
- *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
|
|
|
- break;
|
|
|
- default:
|
|
|
- DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
|
|
|
+ if (unlikely(ctx_node == NULL)) {
|
|
|
+ DRM_ERROR("DX Context not set.\n");
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
- if (*size > size_remaining) {
|
|
|
- DRM_ERROR("Invalid SVGA command (size mismatch):"
|
|
|
- " %u.\n", cmd_id);
|
|
|
- return -EINVAL;
|
|
|
- }
|
|
|
+ cmd = container_of(header, typeof(*cmd), header);
|
|
|
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
|
|
|
+ user_surface_converter,
|
|
|
+ &cmd->body.sid, &res_node);
|
|
|
+ if (unlikely(ret != 0))
|
|
|
+ return ret;
|
|
|
|
|
|
- if (unlikely(!sw_context->kernel)) {
|
|
|
- DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
|
|
|
- return -EPERM;
|
|
|
+ binding.bi.ctx = ctx_node->res;
|
|
|
+ binding.bi.res = res_node ? res_node->res : NULL;
|
|
|
+ binding.bi.bt = vmw_ctx_binding_cb;
|
|
|
+ binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
|
|
|
+ binding.offset = cmd->body.offsetInBytes;
|
|
|
+ binding.size = cmd->body.sizeInBytes;
|
|
|
+ binding.slot = cmd->body.slot;
|
|
|
+
|
|
|
+ if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
|
|
|
+ binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
|
|
|
+ DRM_ERROR("Illegal const buffer shader %u slot %u.\n",
|
|
|
+ (unsigned) cmd->body.type,
|
|
|
+ (unsigned) binding.slot);
|
|
|
+ return -EINVAL;
|
|
|
}
|
|
|
|
|
|
- if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
|
|
|
- return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
|
|
|
+ vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
|
|
|
+ binding.shader_slot, binding.slot);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
|
|
|
- VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
|
|
|
- false, false, false),
|
|
|
- VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
|
|
|
- false, false, false),
|
|
|
- VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
|
|
|
- true, false, false),
|
|
|
- VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
|
|
|
- true, false, false),
|
|
|
- VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
|
|
|
- true, false, false),
|
|
|
- VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
|
|
|
- false, false, false),
|
|
|
- VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
|
|
|
- false, false, false),
|
|
|
- VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
|
|
|
- true, false, false),
|
|
|
- VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
|
|
|
- true, false, false),
|
|
|
- VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
|
|
|
- true, false, false),
|
|
|
- VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
|
|
|
- &vmw_cmd_set_render_target_check, true, false, false),
|
|
|
- VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
|
|
|
- true, false, false),
|
|
|
- VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
|
|
|
- true, false, false),
|
|
|
- VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
|
|
|
- true, false, false),
|
|
|
+/**
|
|
|
+ * vmw_cmd_dx_set_shader_res - Validate an
|
|
|
+ * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command
|
|
|
+ *
|
|
|
+ * @dev_priv: Pointer to a device private struct.
|
|
|
+ * @sw_context: The software context being used for this batch.
|
|
|
+ * @header: Pointer to the command header in the command stream.
|
|
|
+ */
|
|
|
+static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
|
|
|
+ struct vmw_sw_context *sw_context,
|
|
|
+ SVGA3dCmdHeader *header)
|
|
|
+{
|
|
|
+ struct {
|
|
|
+ SVGA3dCmdHeader header;
|
|
|
+ SVGA3dCmdDXSetShaderResources body;
|
|
|
+ } *cmd = container_of(header, typeof(*cmd), header);
|
|
|
+ u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
|
|
|
+ sizeof(SVGA3dShaderResourceViewId);
|
|
|
+
|
|
|
+ if ((u64) cmd->body.startView + (u64) num_sr_view >
|
|
|
+ (u64) SVGA3D_DX_MAX_SRVIEWS ||
|
|
|
+ cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
|
|
|
+ DRM_ERROR("Invalid shader binding.\n");
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ return vmw_view_bindings_add(sw_context, vmw_view_sr,
|
|
|
+ vmw_ctx_binding_sr,
|
|
|
+ cmd->body.type - SVGA3D_SHADERTYPE_MIN,
|
|
|
+ (void *) &cmd[1], num_sr_view,
|
|
|
+ cmd->body.startView);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER
|
|
|
+ * command
|
|
|
+ *
|
|
|
+ * @dev_priv: Pointer to a device private struct.
|
|
|
+ * @sw_context: The software context being used for this batch.
|
|
|
+ * @header: Pointer to the command header in the command stream.
|
|
|
+ */
|
|
|
+static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
|
|
|
+ struct vmw_sw_context *sw_context,
|
|
|
+ SVGA3dCmdHeader *header)
|
|
|
+{
|
|
|
+ struct {
|
|
|
+ SVGA3dCmdHeader header;
|
|
|
+ SVGA3dCmdDXSetShader body;
|
|
|
+ } *cmd;
|
|
|
+ struct vmw_resource *res = NULL;
|
|
|
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
|
|
|
+ struct vmw_ctx_bindinfo_shader binding;
|
|
|
+ int ret = 0;
|
|
|
+
|
|
|
+ if (unlikely(ctx_node == NULL)) {
|
|
|
+ DRM_ERROR("DX Context not set.\n");
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ cmd = container_of(header, typeof(*cmd), header);
|
|
|
+
|
|
|
+ if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
|
|
|
+ DRM_ERROR("Illegal shader type %u.\n",
|
|
|
+ (unsigned) cmd->body.type);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
|
|
|
+ res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
|
|
|
+ if (IS_ERR(res)) {
|
|
|
+ DRM_ERROR("Could not find shader for binding.\n");
|
|
|
+ return PTR_ERR(res);
|
|
|
+ }
|
|
|
+
|
|
|
+ ret = vmw_resource_val_add(sw_context, res, NULL);
|
|
|
+ if (ret)
|
|
|
+ goto out_unref;
|
|
|
+ }
|
|
|
+
|
|
|
+ binding.bi.ctx = ctx_node->res;
|
|
|
+ binding.bi.res = res;
|
|
|
+ binding.bi.bt = vmw_ctx_binding_dx_shader;
|
|
|
+ binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
|
|
|
+
|
|
|
+ vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
|
|
|
+ binding.shader_slot, 0);
|
|
|
+out_unref:
|
|
|
+ if (res)
|
|
|
+ vmw_resource_unreference(&res);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * vmw_cmd_dx_set_vertex_buffers - Validates an
|
|
|
+ * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command
|
|
|
+ *
|
|
|
+ * @dev_priv: Pointer to a device private struct.
|
|
|
+ * @sw_context: The software context being used for this batch.
|
|
|
+ * @header: Pointer to the command header in the command stream.
|
|
|
+ */
|
|
|
+static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
|
|
|
+ struct vmw_sw_context *sw_context,
|
|
|
+ SVGA3dCmdHeader *header)
|
|
|
+{
|
|
|
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
|
|
|
+ struct vmw_ctx_bindinfo_vb binding;
|
|
|
+ struct vmw_resource_val_node *res_node;
|
|
|
+ struct {
|
|
|
+ SVGA3dCmdHeader header;
|
|
|
+ SVGA3dCmdDXSetVertexBuffers body;
|
|
|
+ SVGA3dVertexBuffer buf[];
|
|
|
+ } *cmd;
|
|
|
+ int i, ret, num;
|
|
|
+
|
|
|
+ if (unlikely(ctx_node == NULL)) {
|
|
|
+ DRM_ERROR("DX Context not set.\n");
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ cmd = container_of(header, typeof(*cmd), header);
|
|
|
+ num = (cmd->header.size - sizeof(cmd->body)) /
|
|
|
+ sizeof(SVGA3dVertexBuffer);
|
|
|
+ if ((u64)num + (u64)cmd->body.startBuffer >
|
|
|
+ (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
|
|
|
+ DRM_ERROR("Invalid number of vertex buffers.\n");
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ for (i = 0; i < num; i++) {
|
|
|
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
|
|
|
+ user_surface_converter,
|
|
|
+ &cmd->buf[i].sid, &res_node);
|
|
|
+ if (unlikely(ret != 0))
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ binding.bi.ctx = ctx_node->res;
|
|
|
+ binding.bi.bt = vmw_ctx_binding_vb;
|
|
|
+ binding.bi.res = ((res_node) ? res_node->res : NULL);
|
|
|
+ binding.offset = cmd->buf[i].offset;
|
|
|
+ binding.stride = cmd->buf[i].stride;
|
|
|
+ binding.slot = i + cmd->body.startBuffer;
|
|
|
+
|
|
|
+ vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
|
|
|
+ 0, binding.slot);
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
|
|
|
+ * SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command.
|
|
|
+ *
|
|
|
+ * @dev_priv: Pointer to a device private struct.
|
|
|
+ * @sw_context: The software context being used for this batch.
|
|
|
+ * @header: Pointer to the command header in the command stream.
|
|
|
+ */
|
|
|
+static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
|
|
|
+ struct vmw_sw_context *sw_context,
|
|
|
+ SVGA3dCmdHeader *header)
|
|
|
+{
|
|
|
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
|
|
|
+ struct vmw_ctx_bindinfo_ib binding;
|
|
|
+ struct vmw_resource_val_node *res_node;
|
|
|
+ struct {
|
|
|
+ SVGA3dCmdHeader header;
|
|
|
+ SVGA3dCmdDXSetIndexBuffer body;
|
|
|
+ } *cmd;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ if (unlikely(ctx_node == NULL)) {
|
|
|
+ DRM_ERROR("DX Context not set.\n");
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ cmd = container_of(header, typeof(*cmd), header);
|
|
|
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
|
|
|
+ user_surface_converter,
|
|
|
+ &cmd->body.sid, &res_node);
|
|
|
+ if (unlikely(ret != 0))
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ binding.bi.ctx = ctx_node->res;
|
|
|
+ binding.bi.res = ((res_node) ? res_node->res : NULL);
|
|
|
+ binding.bi.bt = vmw_ctx_binding_ib;
|
|
|
+ binding.offset = cmd->body.offset;
|
|
|
+ binding.format = cmd->body.format;
|
|
|
+
|
|
|
+ vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * vmw_cmd_dx_set_rendertarget - Validate an
|
|
|
+ * SVGA_3D_CMD_DX_SET_RENDERTARGETS command
|
|
|
+ *
|
|
|
+ * @dev_priv: Pointer to a device private struct.
|
|
|
+ * @sw_context: The software context being used for this batch.
|
|
|
+ * @header: Pointer to the command header in the command stream.
|
|
|
+ */
|
|
|
+static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
|
|
|
+ struct vmw_sw_context *sw_context,
|
|
|
+ SVGA3dCmdHeader *header)
|
|
|
+{
|
|
|
+ struct {
|
|
|
+ SVGA3dCmdHeader header;
|
|
|
+ SVGA3dCmdDXSetRenderTargets body;
|
|
|
+ } *cmd = container_of(header, typeof(*cmd), header);
|
|
|
+ int ret;
|
|
|
+ u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
|
|
|
+ sizeof(SVGA3dRenderTargetViewId);
|
|
|
+
|
|
|
+ if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
|
|
|
+ DRM_ERROR("Invalid DX Rendertarget binding.\n");
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ ret = vmw_view_bindings_add(sw_context, vmw_view_ds,
|
|
|
+ vmw_ctx_binding_ds, 0,
|
|
|
+ &cmd->body.depthStencilViewId, 1, 0);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ return vmw_view_bindings_add(sw_context, vmw_view_rt,
|
|
|
+ vmw_ctx_binding_dx_rt, 0,
|
|
|
+ (void *)&cmd[1], num_rt_view, 0);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * vmw_cmd_dx_clear_rendertarget_view - Validate an
|
|
|
+ * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
|
|
|
+ *
|
|
|
+ * @dev_priv: Pointer to a device private struct.
|
|
|
+ * @sw_context: The software context being used for this batch.
|
|
|
+ * @header: Pointer to the command header in the command stream.
|
|
|
+ */
|
|
|
+static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
|
|
|
+ struct vmw_sw_context *sw_context,
|
|
|
+ SVGA3dCmdHeader *header)
|
|
|
+{
|
|
|
+ struct {
|
|
|
+ SVGA3dCmdHeader header;
|
|
|
+ SVGA3dCmdDXClearRenderTargetView body;
|
|
|
+ } *cmd = container_of(header, typeof(*cmd), header);
|
|
|
+
|
|
|
+ return vmw_view_id_val_add(sw_context, vmw_view_rt,
|
|
|
+ cmd->body.renderTargetViewId);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * vmw_cmd_dx_clear_rendertarget_view - Validate an
|
|
|
+ * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
|
|
|
+ *
|
|
|
+ * @dev_priv: Pointer to a device private struct.
|
|
|
+ * @sw_context: The software context being used for this batch.
|
|
|
+ * @header: Pointer to the command header in the command stream.
|
|
|
+ */
|
|
|
+static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
|
|
|
+ struct vmw_sw_context *sw_context,
|
|
|
+ SVGA3dCmdHeader *header)
|
|
|
+{
|
|
|
+ struct {
|
|
|
+ SVGA3dCmdHeader header;
|
|
|
+ SVGA3dCmdDXClearDepthStencilView body;
|
|
|
+ } *cmd = container_of(header, typeof(*cmd), header);
|
|
|
+
|
|
|
+ return vmw_view_id_val_add(sw_context, vmw_view_ds,
|
|
|
+ cmd->body.depthStencilViewId);
|
|
|
+}
|
|
|
+
|
|
|
+static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
|
|
|
+ struct vmw_sw_context *sw_context,
|
|
|
+ SVGA3dCmdHeader *header)
|
|
|
+{
|
|
|
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
|
|
|
+ struct vmw_resource_val_node *srf_node;
|
|
|
+ struct vmw_resource *res;
|
|
|
+ enum vmw_view_type view_type;
|
|
|
+ int ret;
|
|
|
+ /*
|
|
|
+ * This is based on the fact that all affected define commands have
|
|
|
+ * the same initial command body layout.
|
|
|
+ */
|
|
|
+ struct {
|
|
|
+ SVGA3dCmdHeader header;
|
|
|
+ uint32 defined_id;
|
|
|
+ uint32 sid;
|
|
|
+ } *cmd;
|
|
|
+
|
|
|
+ if (unlikely(ctx_node == NULL)) {
|
|
|
+ DRM_ERROR("DX Context not set.\n");
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ view_type = vmw_view_cmd_to_type(header->id);
|
|
|
+ cmd = container_of(header, typeof(*cmd), header);
|
|
|
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
|
|
|
+ user_surface_converter,
|
|
|
+ &cmd->sid, &srf_node);
|
|
|
+ if (unlikely(ret != 0))
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]);
|
|
|
+ ret = vmw_cotable_notify(res, cmd->defined_id);
|
|
|
+ vmw_resource_unreference(&res);
|
|
|
+ if (unlikely(ret != 0))
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ return vmw_view_add(sw_context->man,
|
|
|
+ ctx_node->res,
|
|
|
+ srf_node->res,
|
|
|
+ view_type,
|
|
|
+ cmd->defined_id,
|
|
|
+ header,
|
|
|
+ header->size + sizeof(*header),
|
|
|
+ &sw_context->staged_cmd_res);
|
|
|
+}
|
|
|
+
|
|
|
+static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
|
|
|
+ struct vmw_sw_context *sw_context,
|
|
|
+ SVGA3dCmdHeader *header)
|
|
|
+{
|
|
|
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
|
|
|
+ struct vmw_resource *res;
|
|
|
+ /*
|
|
|
+ * This is based on the fact that all affected define commands have
|
|
|
+ * the same initial command body layout.
|
|
|
+ */
|
|
|
+ struct {
|
|
|
+ SVGA3dCmdHeader header;
|
|
|
+ uint32 defined_id;
|
|
|
+ } *cmd;
|
|
|
+ enum vmw_so_type so_type;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ if (unlikely(ctx_node == NULL)) {
|
|
|
+ DRM_ERROR("DX Context not set.\n");
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ so_type = vmw_so_cmd_to_type(header->id);
|
|
|
+ res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]);
|
|
|
+ cmd = container_of(header, typeof(*cmd), header);
|
|
|
+ ret = vmw_cotable_notify(res, cmd->defined_id);
|
|
|
+ vmw_resource_unreference(&res);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * vmw_cmd_dx_check_subresource - Validate an
|
|
|
+ * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command
|
|
|
+ *
|
|
|
+ * @dev_priv: Pointer to a device private struct.
|
|
|
+ * @sw_context: The software context being used for this batch.
|
|
|
+ * @header: Pointer to the command header in the command stream.
|
|
|
+ */
|
|
|
+static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
|
|
|
+ struct vmw_sw_context *sw_context,
|
|
|
+ SVGA3dCmdHeader *header)
|
|
|
+{
|
|
|
+ struct {
|
|
|
+ SVGA3dCmdHeader header;
|
|
|
+ union {
|
|
|
+ SVGA3dCmdDXReadbackSubResource r_body;
|
|
|
+ SVGA3dCmdDXInvalidateSubResource i_body;
|
|
|
+ SVGA3dCmdDXUpdateSubResource u_body;
|
|
|
+ SVGA3dSurfaceId sid;
|
|
|
+ };
|
|
|
+ } *cmd;
|
|
|
+
|
|
|
+ BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
|
|
|
+ offsetof(typeof(*cmd), sid));
|
|
|
+ BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
|
|
|
+ offsetof(typeof(*cmd), sid));
|
|
|
+ BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
|
|
|
+ offsetof(typeof(*cmd), sid));
|
|
|
+
|
|
|
+ cmd = container_of(header, typeof(*cmd), header);
|
|
|
+
|
|
|
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
|
|
|
+ user_surface_converter,
|
|
|
+ &cmd->sid, NULL);
|
|
|
+}
|
|
|
+
|
|
|
+static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
|
|
|
+ struct vmw_sw_context *sw_context,
|
|
|
+ SVGA3dCmdHeader *header)
|
|
|
+{
|
|
|
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
|
|
|
+
|
|
|
+ if (unlikely(ctx_node == NULL)) {
|
|
|
+ DRM_ERROR("DX Context not set.\n");
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * vmw_cmd_dx_view_remove - validate a view remove command and
|
|
|
+ * schedule the view resource for removal.
|
|
|
+ *
|
|
|
+ * @dev_priv: Pointer to a device private struct.
|
|
|
+ * @sw_context: The software context being used for this batch.
|
|
|
+ * @header: Pointer to the command header in the command stream.
|
|
|
+ *
|
|
|
+ * Check that the view exists, and if it was not created using this
|
|
|
+ * command batch, make sure it's validated (present in the device) so that
|
|
|
+ * the remove command will not confuse the device.
|
|
|
+ */
|
|
|
+static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
|
|
|
+ struct vmw_sw_context *sw_context,
|
|
|
+ SVGA3dCmdHeader *header)
|
|
|
+{
|
|
|
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
|
|
|
+ struct {
|
|
|
+ SVGA3dCmdHeader header;
|
|
|
+ union vmw_view_destroy body;
|
|
|
+ } *cmd = container_of(header, typeof(*cmd), header);
|
|
|
+ enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
|
|
|
+ struct vmw_resource *view;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ if (!ctx_node) {
|
|
|
+ DRM_ERROR("DX Context not set.\n");
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ ret = vmw_view_remove(sw_context->man,
|
|
|
+ cmd->body.view_id, view_type,
|
|
|
+ &sw_context->staged_cmd_res,
|
|
|
+ &view);
|
|
|
+ if (ret || !view)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Add view to the validate list iff it was not created using this
|
|
|
+ * command batch.
|
|
|
+ */
|
|
|
+ return vmw_view_res_val_add(sw_context, view);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER
|
|
|
+ * command
|
|
|
+ *
|
|
|
+ * @dev_priv: Pointer to a device private struct.
|
|
|
+ * @sw_context: The software context being used for this batch.
|
|
|
+ * @header: Pointer to the command header in the command stream.
|
|
|
+ */
|
|
|
+static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
|
|
|
+ struct vmw_sw_context *sw_context,
|
|
|
+ SVGA3dCmdHeader *header)
|
|
|
+{
|
|
|
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
|
|
|
+ struct vmw_resource *res;
|
|
|
+ struct {
|
|
|
+ SVGA3dCmdHeader header;
|
|
|
+ SVGA3dCmdDXDefineShader body;
|
|
|
+ } *cmd = container_of(header, typeof(*cmd), header);
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ if (!ctx_node) {
|
|
|
+ DRM_ERROR("DX Context not set.\n");
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER);
|
|
|
+ ret = vmw_cotable_notify(res, cmd->body.shaderId);
|
|
|
+ vmw_resource_unreference(&res);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ return vmw_dx_shader_add(sw_context->man, ctx_node->res,
|
|
|
+ cmd->body.shaderId, cmd->body.type,
|
|
|
+ &sw_context->staged_cmd_res);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER
|
|
|
+ * command
|
|
|
+ *
|
|
|
+ * @dev_priv: Pointer to a device private struct.
|
|
|
+ * @sw_context: The software context being used for this batch.
|
|
|
+ * @header: Pointer to the command header in the command stream.
|
|
|
+ */
|
|
|
+static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
|
|
|
+ struct vmw_sw_context *sw_context,
|
|
|
+ SVGA3dCmdHeader *header)
|
|
|
+{
|
|
|
+ struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
|
|
|
+ struct {
|
|
|
+ SVGA3dCmdHeader header;
|
|
|
+ SVGA3dCmdDXDestroyShader body;
|
|
|
+ } *cmd = container_of(header, typeof(*cmd), header);
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ if (!ctx_node) {
|
|
|
+ DRM_ERROR("DX Context not set.\n");
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
|
|
|
+ &sw_context->staged_cmd_res);
|
|
|
+ if (ret)
|
|
|
+ DRM_ERROR("Could not find shader to remove.\n");
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER
|
|
|
+ * command
|
|
|
+ *
|
|
|
+ * @dev_priv: Pointer to a device private struct.
|
|
|
+ * @sw_context: The software context being used for this batch.
|
|
|
+ * @header: Pointer to the command header in the command stream.
|
|
|
+ */
|
|
|
+static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
|
|
|
+ struct vmw_sw_context *sw_context,
|
|
|
+ SVGA3dCmdHeader *header)
|
|
|
+{
|
|
|
+ struct vmw_resource_val_node *ctx_node;
|
|
|
+ struct vmw_resource_val_node *res_node;
|
|
|
+ struct vmw_resource *res;
|
|
|
+ struct {
|
|
|
+ SVGA3dCmdHeader header;
|
|
|
+ SVGA3dCmdDXBindShader body;
|
|
|
+ } *cmd = container_of(header, typeof(*cmd), header);
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ if (cmd->body.cid != SVGA3D_INVALID_ID) {
|
|
|
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
|
|
|
+ user_context_converter,
|
|
|
+ &cmd->body.cid, &ctx_node);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+ } else {
|
|
|
+ ctx_node = sw_context->dx_ctx_node;
|
|
|
+ if (!ctx_node) {
|
|
|
+ DRM_ERROR("DX Context not set.\n");
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
|
|
|
+ cmd->body.shid, 0);
|
|
|
+ if (IS_ERR(res)) {
|
|
|
+ DRM_ERROR("Could not find shader to bind.\n");
|
|
|
+ return PTR_ERR(res);
|
|
|
+ }
|
|
|
+
|
|
|
+ ret = vmw_resource_val_add(sw_context, res, &res_node);
|
|
|
+ if (ret) {
|
|
|
+ DRM_ERROR("Error creating resource validation node.\n");
|
|
|
+ goto out_unref;
|
|
|
+ }
|
|
|
+
|
|
|
+
|
|
|
+ ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node,
|
|
|
+ &cmd->body.mobid,
|
|
|
+ cmd->body.offsetInBytes);
|
|
|
+out_unref:
|
|
|
+ vmw_resource_unreference(&res);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
|
|
|
+ struct vmw_sw_context *sw_context,
|
|
|
+ void *buf, uint32_t *size)
|
|
|
+{
|
|
|
+ uint32_t size_remaining = *size;
|
|
|
+ uint32_t cmd_id;
|
|
|
+
|
|
|
+ cmd_id = ((uint32_t *)buf)[0];
|
|
|
+ switch (cmd_id) {
|
|
|
+ case SVGA_CMD_UPDATE:
|
|
|
+ *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
|
|
|
+ break;
|
|
|
+ case SVGA_CMD_DEFINE_GMRFB:
|
|
|
+ *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
|
|
|
+ break;
|
|
|
+ case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
|
|
|
+ *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
|
|
|
+ break;
|
|
|
+ case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
|
|
|
+ *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (*size > size_remaining) {
|
|
|
+ DRM_ERROR("Invalid SVGA command (size mismatch):"
|
|
|
+ " %u.\n", cmd_id);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (unlikely(!sw_context->kernel)) {
|
|
|
+ DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
|
|
|
+ return -EPERM;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
|
|
|
+ return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
|
|
|
+ false, false, false),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
|
|
|
+ false, false, false),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
|
|
|
+ true, false, false),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
|
|
|
+ true, false, false),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
|
|
|
+ true, false, false),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
|
|
|
+ false, false, false),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
|
|
|
+ false, false, false),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
|
|
|
+ true, false, false),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
|
|
|
+ true, false, false),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
|
|
|
+ true, false, false),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
|
|
|
+ &vmw_cmd_set_render_target_check, true, false, false),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
|
|
|
+ true, false, false),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
|
|
|
+ true, false, false),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
|
|
|
+ true, false, false),
|
|
|
VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
|
|
|
true, false, false),
|
|
|
VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
|
|
@@ -2050,7 +2942,136 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
|
|
|
VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
|
|
|
false, false, true),
|
|
|
VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
|
|
|
- true, false, true)
|
|
|
+ true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
|
|
|
+ false, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
|
|
|
+ false, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
|
|
|
+ false, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
|
|
|
+ false, false, true),
|
|
|
+
|
|
|
+ /*
|
|
|
+ * DX commands
|
|
|
+ */
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
|
|
|
+ false, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
|
|
|
+ false, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
|
|
|
+ false, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
|
|
|
+ false, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
|
|
|
+ false, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
|
|
|
+ &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
|
|
|
+ &vmw_cmd_dx_set_shader_res, true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
|
|
|
+ true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_invalid,
|
|
|
+ true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED, &vmw_cmd_invalid,
|
|
|
+ true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_invalid,
|
|
|
+ true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
|
|
|
+ &vmw_cmd_dx_set_vertex_buffers, true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
|
|
|
+ &vmw_cmd_dx_set_index_buffer, true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
|
|
|
+ &vmw_cmd_dx_set_rendertargets, true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
|
|
|
+ true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE, &vmw_cmd_dx_cid_check,
|
|
|
+ true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
|
|
|
+ &vmw_cmd_dx_cid_check,
|
|
|
+ true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_invalid,
|
|
|
+ true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_invalid,
|
|
|
+ true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_invalid,
|
|
|
+ true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_invalid,
|
|
|
+ true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_invalid,
|
|
|
+ true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
|
|
|
+ true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_invalid,
|
|
|
+ true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
|
|
|
+ true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
|
|
|
+ true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
|
|
|
+ &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
|
|
|
+ &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION, &vmw_cmd_invalid,
|
|
|
+ true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
|
|
|
+ true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_invalid,
|
|
|
+ true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
|
|
|
+ &vmw_cmd_dx_check_subresource, true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
|
|
|
+ &vmw_cmd_dx_check_subresource, true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
|
|
|
+ &vmw_cmd_dx_check_subresource, true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
|
|
|
+ &vmw_cmd_dx_view_define, true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
|
|
|
+ &vmw_cmd_dx_view_remove, true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
|
|
|
+ &vmw_cmd_dx_view_define, true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
|
|
|
+ &vmw_cmd_dx_view_remove, true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
|
|
|
+ &vmw_cmd_dx_view_define, true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
|
|
|
+ &vmw_cmd_dx_view_remove, true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
|
|
|
+ &vmw_cmd_dx_so_define, true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
|
|
|
+ &vmw_cmd_dx_cid_check, true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
|
|
|
+ &vmw_cmd_dx_so_define, true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
|
|
|
+ &vmw_cmd_dx_cid_check, true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
|
|
|
+ &vmw_cmd_dx_so_define, true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
|
|
|
+ &vmw_cmd_dx_cid_check, true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
|
|
|
+ &vmw_cmd_dx_so_define, true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
|
|
|
+ &vmw_cmd_dx_cid_check, true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
|
|
|
+ &vmw_cmd_dx_so_define, true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
|
|
|
+ &vmw_cmd_dx_cid_check, true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
|
|
|
+ &vmw_cmd_dx_define_shader, true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
|
|
|
+ &vmw_cmd_dx_destroy_shader, true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
|
|
|
+ &vmw_cmd_dx_bind_shader, true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
|
|
|
+ &vmw_cmd_dx_so_define, true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
|
|
|
+ &vmw_cmd_dx_cid_check, true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_invalid,
|
|
|
+ true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
|
|
|
+ &vmw_cmd_dx_cid_check, true, false, true),
|
|
|
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
|
|
|
+ &vmw_cmd_dx_cid_check, true, false, true),
|
|
|
};
|
|
|
|
|
|
static int vmw_cmd_check(struct vmw_private *dev_priv,
|
|
@@ -2183,7 +3204,8 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
|
|
|
*
|
|
|
* @list: The resource list.
|
|
|
*/
|
|
|
-static void vmw_resource_list_unreference(struct list_head *list)
|
|
|
+static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context,
|
|
|
+ struct list_head *list)
|
|
|
{
|
|
|
struct vmw_resource_val_node *val, *val_next;
|
|
|
|
|
@@ -2194,8 +3216,15 @@ static void vmw_resource_list_unreference(struct list_head *list)
|
|
|
list_for_each_entry_safe(val, val_next, list, head) {
|
|
|
list_del_init(&val->head);
|
|
|
vmw_resource_unreference(&val->res);
|
|
|
- if (unlikely(val->staged_bindings))
|
|
|
- kfree(val->staged_bindings);
|
|
|
+
|
|
|
+ if (val->staged_bindings) {
|
|
|
+ if (val->staged_bindings != sw_context->staged_bindings)
|
|
|
+ vmw_binding_state_free(val->staged_bindings);
|
|
|
+ else
|
|
|
+ sw_context->staged_bindings_inuse = false;
|
|
|
+ val->staged_bindings = NULL;
|
|
|
+ }
|
|
|
+
|
|
|
kfree(val);
|
|
|
}
|
|
|
}
|
|
@@ -2431,8 +3460,13 @@ static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
|
|
|
u32 command_size,
|
|
|
struct vmw_sw_context *sw_context)
|
|
|
{
|
|
|
- void *cmd = vmw_fifo_reserve(dev_priv, command_size);
|
|
|
+ void *cmd;
|
|
|
|
|
|
+ if (sw_context->dx_ctx_node)
|
|
|
+ cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
|
|
|
+ sw_context->dx_ctx_node->res->id);
|
|
|
+ else
|
|
|
+ cmd = vmw_fifo_reserve(dev_priv, command_size);
|
|
|
if (!cmd) {
|
|
|
DRM_ERROR("Failed reserving fifo space for commands.\n");
|
|
|
return -ENOMEM;
|
|
@@ -2464,8 +3498,10 @@ static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
|
|
|
u32 command_size,
|
|
|
struct vmw_sw_context *sw_context)
|
|
|
{
|
|
|
+ u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id :
|
|
|
+ SVGA3D_INVALID_ID);
|
|
|
void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
|
|
|
- SVGA3D_INVALID_ID, false, header);
|
|
|
+ id, false, header);
|
|
|
|
|
|
vmw_apply_relocations(sw_context);
|
|
|
vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
|
|
@@ -2535,12 +3571,44 @@ static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
|
|
|
return kernel_commands;
|
|
|
}
|
|
|
|
|
|
+static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
|
|
|
+ struct vmw_sw_context *sw_context,
|
|
|
+ uint32_t handle)
|
|
|
+{
|
|
|
+ struct vmw_resource_val_node *ctx_node;
|
|
|
+ struct vmw_resource *res;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ if (handle == SVGA3D_INVALID_ID)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile,
|
|
|
+ handle, user_context_converter,
|
|
|
+ &res);
|
|
|
+ if (unlikely(ret != 0)) {
|
|
|
+ DRM_ERROR("Could not find or user DX context 0x%08x.\n",
|
|
|
+ (unsigned) handle);
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+
|
|
|
+ ret = vmw_resource_val_add(sw_context, res, &ctx_node);
|
|
|
+ if (unlikely(ret != 0))
|
|
|
+ goto out_err;
|
|
|
+
|
|
|
+ sw_context->dx_ctx_node = ctx_node;
|
|
|
+ sw_context->man = vmw_context_res_man(res);
|
|
|
+out_err:
|
|
|
+ vmw_resource_unreference(&res);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
int vmw_execbuf_process(struct drm_file *file_priv,
|
|
|
struct vmw_private *dev_priv,
|
|
|
void __user *user_commands,
|
|
|
void *kernel_commands,
|
|
|
uint32_t command_size,
|
|
|
uint64_t throttle_us,
|
|
|
+ uint32_t dx_context_handle,
|
|
|
struct drm_vmw_fence_rep __user *user_fence_rep,
|
|
|
struct vmw_fence_obj **out_fence)
|
|
|
{
|
|
@@ -2596,12 +3664,17 @@ int vmw_execbuf_process(struct drm_file *file_priv,
|
|
|
sw_context->cur_reloc = 0;
|
|
|
sw_context->cur_val_buf = 0;
|
|
|
INIT_LIST_HEAD(&sw_context->resource_list);
|
|
|
+ INIT_LIST_HEAD(&sw_context->ctx_resource_list);
|
|
|
sw_context->cur_query_bo = dev_priv->pinned_bo;
|
|
|
sw_context->last_query_ctx = NULL;
|
|
|
sw_context->needs_post_query_barrier = false;
|
|
|
+ sw_context->dx_ctx_node = NULL;
|
|
|
memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
|
|
|
INIT_LIST_HEAD(&sw_context->validate_nodes);
|
|
|
INIT_LIST_HEAD(&sw_context->res_relocations);
|
|
|
+ if (sw_context->staged_bindings)
|
|
|
+ vmw_binding_state_reset(sw_context->staged_bindings);
|
|
|
+
|
|
|
if (!sw_context->res_ht_initialized) {
|
|
|
ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
|
|
|
if (unlikely(ret != 0))
|
|
@@ -2610,11 +3683,20 @@ int vmw_execbuf_process(struct drm_file *file_priv,
|
|
|
}
|
|
|
INIT_LIST_HEAD(&sw_context->staged_cmd_res);
|
|
|
INIT_LIST_HEAD(&resource_list);
|
|
|
+ ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
|
|
|
+ if (unlikely(ret != 0)) {
|
|
|
+ list_splice_init(&sw_context->ctx_resource_list,
|
|
|
+ &sw_context->resource_list);
|
|
|
+ goto out_err_nores;
|
|
|
+ }
|
|
|
+
|
|
|
ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
|
|
|
command_size);
|
|
|
if (unlikely(ret != 0))
|
|
|
goto out_err_nores;
|
|
|
|
|
|
+ list_splice_init(&sw_context->ctx_resource_list,
|
|
|
+ &sw_context->resource_list);
|
|
|
ret = vmw_resources_reserve(sw_context);
|
|
|
if (unlikely(ret != 0))
|
|
|
goto out_err_nores;
|
|
@@ -2622,7 +3704,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
|
|
|
ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
|
|
|
true, NULL);
|
|
|
if (unlikely(ret != 0))
|
|
|
- goto out_err;
|
|
|
+ goto out_err_nores;
|
|
|
|
|
|
ret = vmw_validate_buffers(dev_priv, sw_context);
|
|
|
if (unlikely(ret != 0))
|
|
@@ -2652,8 +3734,9 @@ int vmw_execbuf_process(struct drm_file *file_priv,
|
|
|
sw_context);
|
|
|
header = NULL;
|
|
|
}
|
|
|
+ mutex_unlock(&dev_priv->binding_mutex);
|
|
|
if (ret)
|
|
|
- goto out_unlock_binding;
|
|
|
+ goto out_err;
|
|
|
|
|
|
vmw_query_bo_switch_commit(dev_priv, sw_context);
|
|
|
ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
|
|
@@ -2668,8 +3751,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
|
|
|
if (ret != 0)
|
|
|
DRM_ERROR("Fence submission error. Syncing.\n");
|
|
|
|
|
|
- vmw_resource_list_unreserve(&sw_context->resource_list, false);
|
|
|
- mutex_unlock(&dev_priv->binding_mutex);
|
|
|
+ vmw_resource_list_unreserve(sw_context, &sw_context->resource_list,
|
|
|
+ false);
|
|
|
|
|
|
ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
|
|
|
(void *) fence);
|
|
@@ -2698,7 +3781,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
|
|
|
* Unreference resources outside of the cmdbuf_mutex to
|
|
|
* avoid deadlocks in resource destruction paths.
|
|
|
*/
|
|
|
- vmw_resource_list_unreference(&resource_list);
|
|
|
+ vmw_resource_list_unreference(sw_context, &resource_list);
|
|
|
|
|
|
return 0;
|
|
|
|
|
@@ -2707,7 +3790,8 @@ out_unlock_binding:
|
|
|
out_err:
|
|
|
ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
|
|
|
out_err_nores:
|
|
|
- vmw_resource_list_unreserve(&sw_context->resource_list, true);
|
|
|
+ vmw_resource_list_unreserve(sw_context, &sw_context->resource_list,
|
|
|
+ true);
|
|
|
vmw_resource_relocations_free(&sw_context->res_relocations);
|
|
|
vmw_free_relocations(sw_context);
|
|
|
vmw_clear_validations(sw_context);
|
|
@@ -2725,7 +3809,7 @@ out_unlock:
|
|
|
* Unreference resources outside of the cmdbuf_mutex to
|
|
|
* avoid deadlocks in resource destruction paths.
|
|
|
*/
|
|
|
- vmw_resource_list_unreference(&resource_list);
|
|
|
+ vmw_resource_list_unreference(sw_context, &resource_list);
|
|
|
if (unlikely(error_resource != NULL))
|
|
|
vmw_resource_unreference(&error_resource);
|
|
|
out_free_header:
|
|
@@ -2877,36 +3961,68 @@ void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
|
|
|
mutex_unlock(&dev_priv->cmdbuf_mutex);
|
|
|
}
|
|
|
|
|
|
-
|
|
|
-int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
|
|
|
- struct drm_file *file_priv)
|
|
|
+int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
|
|
|
+ struct drm_file *file_priv, size_t size)
|
|
|
{
|
|
|
struct vmw_private *dev_priv = vmw_priv(dev);
|
|
|
- struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
|
|
|
+ struct drm_vmw_execbuf_arg arg;
|
|
|
int ret;
|
|
|
+ static const size_t copy_offset[] = {
|
|
|
+ offsetof(struct drm_vmw_execbuf_arg, context_handle),
|
|
|
+ sizeof(struct drm_vmw_execbuf_arg)};
|
|
|
+
|
|
|
+ if (unlikely(size < copy_offset[0])) {
|
|
|
+ DRM_ERROR("Invalid command size, ioctl %d\n",
|
|
|
+ DRM_VMW_EXECBUF);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
|
|
|
+ return -EFAULT;
|
|
|
|
|
|
/*
|
|
|
- * This will allow us to extend the ioctl argument while
|
|
|
+ * Extend the ioctl argument while
|
|
|
* maintaining backwards compatibility:
|
|
|
* We take different code paths depending on the value of
|
|
|
- * arg->version.
|
|
|
+ * arg.version.
|
|
|
*/
|
|
|
|
|
|
- if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
|
|
|
+ if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
|
|
|
+ arg.version == 0)) {
|
|
|
DRM_ERROR("Incorrect execbuf version.\n");
|
|
|
- DRM_ERROR("You're running outdated experimental "
|
|
|
- "vmwgfx user-space drivers.");
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
+ if (arg.version > 1 &&
|
|
|
+ copy_from_user(&arg.context_handle,
|
|
|
+ (void __user *) (data + copy_offset[0]),
|
|
|
+ copy_offset[arg.version - 1] -
|
|
|
+ copy_offset[0]) != 0)
|
|
|
+ return -EFAULT;
|
|
|
+
|
|
|
+ switch (arg.version) {
|
|
|
+ case 1:
|
|
|
+ arg.context_handle = (uint32_t) -1;
|
|
|
+ break;
|
|
|
+ case 2:
|
|
|
+ if (arg.pad64 != 0) {
|
|
|
+ DRM_ERROR("Unused IOCTL data not set to zero.\n");
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
|
|
|
if (unlikely(ret != 0))
|
|
|
return ret;
|
|
|
|
|
|
ret = vmw_execbuf_process(file_priv, dev_priv,
|
|
|
- (void __user *)(unsigned long)arg->commands,
|
|
|
- NULL, arg->command_size, arg->throttle_us,
|
|
|
- (void __user *)(unsigned long)arg->fence_rep,
|
|
|
+ (void __user *)(unsigned long)arg.commands,
|
|
|
+ NULL, arg.command_size, arg.throttle_us,
|
|
|
+ arg.context_handle,
|
|
|
+ (void __user *)(unsigned long)arg.fence_rep,
|
|
|
NULL);
|
|
|
ttm_read_unlock(&dev_priv->reservation_sem);
|
|
|
if (unlikely(ret != 0))
|