|
@@ -1415,7 +1415,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|
struct i915_address_space *vm;
|
|
struct i915_address_space *vm;
|
|
struct i915_execbuffer_params params_master; /* XXX: will be removed later */
|
|
struct i915_execbuffer_params params_master; /* XXX: will be removed later */
|
|
struct i915_execbuffer_params *params = ¶ms_master;
|
|
struct i915_execbuffer_params *params = ¶ms_master;
|
|
- struct drm_i915_gem_request *request;
|
|
|
|
const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
|
|
const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
|
|
u32 dispatch_flags;
|
|
u32 dispatch_flags;
|
|
int ret;
|
|
int ret;
|
|
@@ -1615,7 +1614,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|
params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
|
|
params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
|
|
|
|
|
|
/* Allocate a request for this batch buffer nice and early. */
|
|
/* Allocate a request for this batch buffer nice and early. */
|
|
- ret = i915_gem_request_alloc(ring, ctx, &request);
|
|
|
|
|
|
+ ret = i915_gem_request_alloc(ring, ctx, ¶ms->request);
|
|
if (ret)
|
|
if (ret)
|
|
goto err_batch_unpin;
|
|
goto err_batch_unpin;
|
|
|
|
|
|
@@ -1649,6 +1648,16 @@ err:
|
|
i915_gem_context_unreference(ctx);
|
|
i915_gem_context_unreference(ctx);
|
|
eb_destroy(eb);
|
|
eb_destroy(eb);
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * If the request was created but not successfully submitted then it
|
|
|
|
+ * must be freed again. If it was submitted then it is being tracked
|
|
|
|
+ * on the active request list and no clean up is required here.
|
|
|
|
+ */
|
|
|
|
+ if (ret && params->request) {
|
|
|
|
+ i915_gem_request_cancel(params->request);
|
|
|
|
+ ring->outstanding_lazy_request = NULL;
|
|
|
|
+ }
|
|
|
|
+
|
|
mutex_unlock(&dev->struct_mutex);
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
|
|
pre_mutex_err:
|
|
pre_mutex_err:
|