|
@@ -489,12 +489,9 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * i915_guc_submit() - Submit commands through GuC
|
|
|
+ * __i915_guc_submit() - Submit commands through GuC
|
|
|
* @rq: request associated with the commands
|
|
|
*
|
|
|
- * Return: 0 on success, otherwise an errno.
|
|
|
- * (Note: nonzero really shouldn't happen!)
|
|
|
- *
|
|
|
* The caller must have already called i915_guc_wq_reserve() above with
|
|
|
* a result of 0 (success), guaranteeing that there is space in the work
|
|
|
* queue for the new request, so enqueuing the item cannot fail.
|
|
@@ -506,7 +503,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
|
|
|
* The only error here arises if the doorbell hardware isn't functioning
|
|
|
* as expected, which really shouln't happen.
|
|
|
*/
|
|
|
-static void i915_guc_submit(struct drm_i915_gem_request *rq)
|
|
|
+static void __i915_guc_submit(struct drm_i915_gem_request *rq)
|
|
|
{
|
|
|
struct drm_i915_private *dev_priv = rq->i915;
|
|
|
struct intel_engine_cs *engine = rq->engine;
|
|
@@ -515,17 +512,6 @@ static void i915_guc_submit(struct drm_i915_gem_request *rq)
|
|
|
struct i915_guc_client *client = guc->execbuf_client;
|
|
|
int b_ret;
|
|
|
|
|
|
- /* We keep the previous context alive until we retire the following
|
|
|
- * request. This ensures that any the context object is still pinned
|
|
|
- * for any residual writes the HW makes into it on the context switch
|
|
|
- * into the next object following the breadcrumb. Otherwise, we may
|
|
|
- * retire the context too early.
|
|
|
- */
|
|
|
- rq->previous_context = engine->last_context;
|
|
|
- engine->last_context = rq->ctx;
|
|
|
-
|
|
|
- i915_gem_request_submit(rq);
|
|
|
-
|
|
|
spin_lock(&client->wq_lock);
|
|
|
guc_wq_item_append(client, rq);
|
|
|
|
|
@@ -545,6 +531,23 @@ static void i915_guc_submit(struct drm_i915_gem_request *rq)
|
|
|
spin_unlock(&client->wq_lock);
|
|
|
}
|
|
|
|
|
|
+static void i915_guc_submit(struct drm_i915_gem_request *rq)
|
|
|
+{
|
|
|
+ struct intel_engine_cs *engine = rq->engine;
|
|
|
+
|
|
|
+ /* We keep the previous context alive until we retire the following
|
|
|
+ * request. This ensures that any the context object is still pinned
|
|
|
+ * for any residual writes the HW makes into it on the context switch
|
|
|
+ * into the next object following the breadcrumb. Otherwise, we may
|
|
|
+ * retire the context too early.
|
|
|
+ */
|
|
|
+ rq->previous_context = engine->last_context;
|
|
|
+ engine->last_context = rq->ctx;
|
|
|
+
|
|
|
+ i915_gem_request_submit(rq);
|
|
|
+ __i915_guc_submit(rq);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Everything below here is concerned with setup & teardown, and is
|
|
|
* therefore not part of the somewhat time-critical batch-submission
|
|
@@ -1443,7 +1446,7 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
|
|
|
/* Replay the current set of previously submitted requests */
|
|
|
list_for_each_entry(rq, &engine->timeline->requests, link) {
|
|
|
client->wq_rsvd += sizeof(struct guc_wq_item);
|
|
|
- i915_guc_submit(rq);
|
|
|
+ __i915_guc_submit(rq);
|
|
|
}
|
|
|
}
|
|
|
|