|
@@ -605,6 +605,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
|
|
|
struct list_head *q = workload_q_head(vgpu, ring_id);
|
|
|
struct intel_vgpu_workload *last_workload = get_last_workload(q);
|
|
|
struct intel_vgpu_workload *workload = NULL;
|
|
|
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
|
|
u64 ring_context_gpa;
|
|
|
u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
|
|
|
int ret;
|
|
@@ -668,6 +669,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
|
|
|
workload->complete = complete_execlist_workload;
|
|
|
workload->status = -EINPROGRESS;
|
|
|
workload->emulate_schedule_in = emulate_schedule_in;
|
|
|
+ workload->shadowed = false;
|
|
|
|
|
|
if (ring_id == RCS) {
|
|
|
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
|
|
@@ -701,6 +703,15 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+ /* Only scan and shadow the first workload in the queue
|
|
|
+ * as there is only one pre-allocated buf-obj for shadow.
|
|
|
+ */
|
|
|
+ if (list_empty(workload_q_head(vgpu, ring_id))) {
|
|
|
+ mutex_lock(&dev_priv->drm.struct_mutex);
|
|
|
+ intel_gvt_scan_and_shadow_workload(workload);
|
|
|
+ mutex_unlock(&dev_priv->drm.struct_mutex);
|
|
|
+ }
|
|
|
+
|
|
|
queue_workload(workload);
|
|
|
return 0;
|
|
|
}
|