|
@@ -184,42 +184,27 @@ static int shadow_context_status_change(struct notifier_block *nb,
|
|
|
return NOTIFY_OK;
|
|
|
}
|
|
|
|
|
|
-static int dispatch_workload(struct intel_vgpu_workload *workload)
|
|
|
+/**
|
|
|
+ * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
|
|
|
+ * shadow it as well, include ringbuffer,wa_ctx and ctx.
|
|
|
+ * @workload: an abstract entity for each execlist submission.
|
|
|
+ *
|
|
|
+ * This function is called before the workload submitting to i915, to make
|
|
|
+ * sure the content of the workload is valid.
|
|
|
+ */
|
|
|
+int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
|
|
|
{
|
|
|
int ring_id = workload->ring_id;
|
|
|
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
|
|
|
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
|
|
|
- struct intel_engine_cs *engine = dev_priv->engine[ring_id];
|
|
|
struct drm_i915_gem_request *rq;
|
|
|
struct intel_vgpu *vgpu = workload->vgpu;
|
|
|
- struct intel_ring *ring;
|
|
|
int ret;
|
|
|
|
|
|
- gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
|
|
|
- ring_id, workload);
|
|
|
-
|
|
|
shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
|
|
|
shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
|
|
|
GEN8_CTX_ADDRESSING_MODE_SHIFT;
|
|
|
|
|
|
- mutex_lock(&dev_priv->drm.struct_mutex);
|
|
|
-
|
|
|
- /* pin shadow context by gvt even the shadow context will be pinned
|
|
|
- * when i915 alloc request. That is because gvt will update the guest
|
|
|
- * context from shadow context when workload is completed, and at that
|
|
|
- * moment, i915 may already unpined the shadow context to make the
|
|
|
- * shadow_ctx pages invalid. So gvt need to pin itself. After update
|
|
|
- * the guest context, gvt can unpin the shadow_ctx safely.
|
|
|
- */
|
|
|
- ring = engine->context_pin(engine, shadow_ctx);
|
|
|
- if (IS_ERR(ring)) {
|
|
|
- ret = PTR_ERR(ring);
|
|
|
- gvt_vgpu_err("fail to pin shadow context\n");
|
|
|
- workload->status = ret;
|
|
|
- mutex_unlock(&dev_priv->drm.struct_mutex);
|
|
|
- return ret;
|
|
|
- }
|
|
|
-
|
|
|
rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
|
|
|
if (IS_ERR(rq)) {
|
|
|
gvt_vgpu_err("fail to allocate gem request\n");
|
|
@@ -231,7 +216,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
|
|
|
|
|
|
workload->req = i915_gem_request_get(rq);
|
|
|
|
|
|
- ret = intel_gvt_scan_and_shadow_workload(workload);
|
|
|
+ ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
|
|
|
if (ret)
|
|
|
goto out;
|
|
|
|
|
@@ -243,6 +228,27 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
|
|
|
}
|
|
|
|
|
|
ret = populate_shadow_context(workload);
|
|
|
+
|
|
|
+out:
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static int dispatch_workload(struct intel_vgpu_workload *workload)
|
|
|
+{
|
|
|
+ int ring_id = workload->ring_id;
|
|
|
+ struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
|
|
|
+ struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
|
|
|
+ struct intel_engine_cs *engine = dev_priv->engine[ring_id];
|
|
|
+ struct intel_vgpu *vgpu = workload->vgpu;
|
|
|
+ struct intel_ring *ring;
|
|
|
+ int ret = 0;
|
|
|
+
|
|
|
+ gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
|
|
|
+ ring_id, workload);
|
|
|
+
|
|
|
+ mutex_lock(&dev_priv->drm.struct_mutex);
|
|
|
+
|
|
|
+ ret = intel_gvt_scan_and_shadow_workload(workload);
|
|
|
if (ret)
|
|
|
goto out;
|
|
|
|
|
@@ -252,19 +258,30 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
|
|
|
- ring_id, workload->req);
|
|
|
+ /* pin shadow context by gvt even the shadow context will be pinned
|
|
|
+ * when i915 alloc request. That is because gvt will update the guest
|
|
|
+ * context from shadow context when workload is completed, and at that
|
|
|
+ * moment, i915 may already unpined the shadow context to make the
|
|
|
+ * shadow_ctx pages invalid. So gvt need to pin itself. After update
|
|
|
+ * the guest context, gvt can unpin the shadow_ctx safely.
|
|
|
+ */
|
|
|
+ ring = engine->context_pin(engine, shadow_ctx);
|
|
|
+ if (IS_ERR(ring)) {
|
|
|
+ ret = PTR_ERR(ring);
|
|
|
+ gvt_vgpu_err("fail to pin shadow context\n");
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
|
|
|
- ret = 0;
|
|
|
- workload->dispatched = true;
|
|
|
out:
|
|
|
if (ret)
|
|
|
workload->status = ret;
|
|
|
|
|
|
- if (!IS_ERR_OR_NULL(rq))
|
|
|
- i915_add_request(rq);
|
|
|
- else
|
|
|
- engine->context_unpin(engine, shadow_ctx);
|
|
|
+ if (!IS_ERR_OR_NULL(workload->req)) {
|
|
|
+ gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
|
|
|
+ ring_id, workload->req);
|
|
|
+ i915_add_request(workload->req);
|
|
|
+ workload->dispatched = true;
|
|
|
+ }
|
|
|
|
|
|
mutex_unlock(&dev_priv->drm.struct_mutex);
|
|
|
return ret;
|