|
@@ -201,6 +201,34 @@ static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
|
|
|
ce->lrc_desc = desc;
|
|
|
}
|
|
|
|
|
|
+static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
|
|
|
+{
|
|
|
+ struct intel_vgpu *vgpu = workload->vgpu;
|
|
|
+ void *shadow_ring_buffer_va;
|
|
|
+ u32 *cs;
|
|
|
+
|
|
|
+ /* allocate shadow ring buffer */
|
|
|
+ cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
|
|
|
+ if (IS_ERR(cs)) {
|
|
|
+ gvt_vgpu_err("fail to alloc size =%ld shadow ring buffer\n",
|
|
|
+ workload->rb_len);
|
|
|
+ return PTR_ERR(cs);
|
|
|
+ }
|
|
|
+
|
|
|
+ shadow_ring_buffer_va = workload->shadow_ring_buffer_va;
|
|
|
+
|
|
|
+ /* get shadow ring buffer va */
|
|
|
+ workload->shadow_ring_buffer_va = cs;
|
|
|
+
|
|
|
+ memcpy(cs, shadow_ring_buffer_va,
|
|
|
+ workload->rb_len);
|
|
|
+
|
|
|
+ cs += workload->rb_len / sizeof(u32);
|
|
|
+ intel_ring_advance(workload->req, cs);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
|
|
|
* shadow it as well, include ringbuffer,wa_ctx and ctx.
|
|
@@ -214,8 +242,10 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
|
|
|
int ring_id = workload->ring_id;
|
|
|
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
|
|
|
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
|
|
|
+ struct intel_engine_cs *engine = dev_priv->engine[ring_id];
|
|
|
struct drm_i915_gem_request *rq;
|
|
|
struct intel_vgpu *vgpu = workload->vgpu;
|
|
|
+ struct intel_ring *ring;
|
|
|
int ret;
|
|
|
|
|
|
lockdep_assert_held(&dev_priv->drm.struct_mutex);
|
|
@@ -231,17 +261,6 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
|
|
|
shadow_context_descriptor_update(shadow_ctx,
|
|
|
dev_priv->engine[ring_id]);
|
|
|
|
|
|
- rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
|
|
|
- if (IS_ERR(rq)) {
|
|
|
- gvt_vgpu_err("fail to allocate gem request\n");
|
|
|
- ret = PTR_ERR(rq);
|
|
|
- goto out;
|
|
|
- }
|
|
|
-
|
|
|
- gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
|
|
|
-
|
|
|
- workload->req = i915_gem_request_get(rq);
|
|
|
-
|
|
|
ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
|
|
|
if (ret)
|
|
|
goto out;
|
|
@@ -253,10 +272,37 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
+ /* pin shadow context by gvt even the shadow context will be pinned
|
|
|
+ * when i915 alloc request. That is because gvt will update the guest
|
|
|
+ * context from shadow context when workload is completed, and at that
|
|
|
+ * moment, i915 may already unpined the shadow context to make the
|
|
|
+ * shadow_ctx pages invalid. So gvt need to pin itself. After update
|
|
|
+ * the guest context, gvt can unpin the shadow_ctx safely.
|
|
|
+ */
|
|
|
+ ring = engine->context_pin(engine, shadow_ctx);
|
|
|
+ if (IS_ERR(ring)) {
|
|
|
+ ret = PTR_ERR(ring);
|
|
|
+ gvt_vgpu_err("fail to pin shadow context\n");
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
ret = populate_shadow_context(workload);
|
|
|
if (ret)
|
|
|
goto out;
|
|
|
|
|
|
+ rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
|
|
|
+ if (IS_ERR(rq)) {
|
|
|
+ gvt_vgpu_err("fail to allocate gem request\n");
|
|
|
+ ret = PTR_ERR(rq);
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
+ gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
|
|
|
+
|
|
|
+ workload->req = i915_gem_request_get(rq);
|
|
|
+ ret = copy_workload_to_ring_buffer(workload);
|
|
|
+ if (ret)
|
|
|
+ goto out;
|
|
|
workload->shadowed = true;
|
|
|
|
|
|
out:
|
|
@@ -269,8 +315,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
|
|
|
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
|
|
|
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
|
|
|
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
|
|
|
- struct intel_vgpu *vgpu = workload->vgpu;
|
|
|
- struct intel_ring *ring;
|
|
|
int ret = 0;
|
|
|
|
|
|
gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
|
|
@@ -288,20 +332,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- /* pin shadow context by gvt even the shadow context will be pinned
|
|
|
- * when i915 alloc request. That is because gvt will update the guest
|
|
|
- * context from shadow context when workload is completed, and at that
|
|
|
- * moment, i915 may already unpined the shadow context to make the
|
|
|
- * shadow_ctx pages invalid. So gvt need to pin itself. After update
|
|
|
- * the guest context, gvt can unpin the shadow_ctx safely.
|
|
|
- */
|
|
|
- ring = engine->context_pin(engine, shadow_ctx);
|
|
|
- if (IS_ERR(ring)) {
|
|
|
- ret = PTR_ERR(ring);
|
|
|
- gvt_vgpu_err("fail to pin shadow context\n");
|
|
|
- goto out;
|
|
|
- }
|
|
|
-
|
|
|
out:
|
|
|
if (ret)
|
|
|
workload->status = ret;
|