|
@@ -2609,6 +2609,9 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
|
|
unsigned long gma_head, gma_tail, gma_bottom, ring_size, ring_tail;
|
|
|
struct parser_exec_state s;
|
|
|
int ret = 0;
|
|
|
+ struct intel_vgpu_workload *workload = container_of(wa_ctx,
|
|
|
+ struct intel_vgpu_workload,
|
|
|
+ wa_ctx);
|
|
|
|
|
|
/* ring base is page aligned */
|
|
|
if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma, GTT_PAGE_SIZE)))
|
|
@@ -2623,14 +2626,14 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
|
|
|
|
|
s.buf_type = RING_BUFFER_INSTRUCTION;
|
|
|
s.buf_addr_type = GTT_BUFFER;
|
|
|
- s.vgpu = wa_ctx->workload->vgpu;
|
|
|
- s.ring_id = wa_ctx->workload->ring_id;
|
|
|
+ s.vgpu = workload->vgpu;
|
|
|
+ s.ring_id = workload->ring_id;
|
|
|
s.ring_start = wa_ctx->indirect_ctx.guest_gma;
|
|
|
s.ring_size = ring_size;
|
|
|
s.ring_head = gma_head;
|
|
|
s.ring_tail = gma_tail;
|
|
|
s.rb_va = wa_ctx->indirect_ctx.shadow_va;
|
|
|
- s.workload = wa_ctx->workload;
|
|
|
+ s.workload = workload;
|
|
|
|
|
|
ret = ip_gma_set(&s, gma_head);
|
|
|
if (ret)
|
|
@@ -2713,12 +2716,15 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
|
|
{
|
|
|
int ctx_size = wa_ctx->indirect_ctx.size;
|
|
|
unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
|
|
|
- struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
|
|
|
+ struct intel_vgpu_workload *workload = container_of(wa_ctx,
|
|
|
+ struct intel_vgpu_workload,
|
|
|
+ wa_ctx);
|
|
|
+ struct intel_vgpu *vgpu = workload->vgpu;
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
int ret = 0;
|
|
|
void *map;
|
|
|
|
|
|
- obj = i915_gem_object_create(wa_ctx->workload->vgpu->gvt->dev_priv,
|
|
|
+ obj = i915_gem_object_create(workload->vgpu->gvt->dev_priv,
|
|
|
roundup(ctx_size + CACHELINE_BYTES,
|
|
|
PAGE_SIZE));
|
|
|
if (IS_ERR(obj))
|
|
@@ -2738,8 +2744,8 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
|
|
goto unmap_src;
|
|
|
}
|
|
|
|
|
|
- ret = copy_gma_to_hva(wa_ctx->workload->vgpu,
|
|
|
- wa_ctx->workload->vgpu->gtt.ggtt_mm,
|
|
|
+ ret = copy_gma_to_hva(workload->vgpu,
|
|
|
+ workload->vgpu->gtt.ggtt_mm,
|
|
|
guest_gma, guest_gma + ctx_size,
|
|
|
map);
|
|
|
if (ret < 0) {
|
|
@@ -2777,7 +2783,10 @@ static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
|
|
int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
|
|
{
|
|
|
int ret;
|
|
|
- struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
|
|
|
+ struct intel_vgpu_workload *workload = container_of(wa_ctx,
|
|
|
+ struct intel_vgpu_workload,
|
|
|
+ wa_ctx);
|
|
|
+ struct intel_vgpu *vgpu = workload->vgpu;
|
|
|
|
|
|
if (wa_ctx->indirect_ctx.size == 0)
|
|
|
return 0;
|