|
@@ -54,11 +54,8 @@ static void set_context_pdp_root_pointer(
|
|
|
|
|
|
static void update_shadow_pdps(struct intel_vgpu_workload *workload)
|
|
static void update_shadow_pdps(struct intel_vgpu_workload *workload)
|
|
{
|
|
{
|
|
- struct intel_vgpu *vgpu = workload->vgpu;
|
|
|
|
- int ring_id = workload->ring_id;
|
|
|
|
- struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
|
|
|
|
struct drm_i915_gem_object *ctx_obj =
|
|
struct drm_i915_gem_object *ctx_obj =
|
|
- shadow_ctx->__engine[ring_id].state->obj;
|
|
|
|
|
|
+ workload->req->hw_context->state->obj;
|
|
struct execlist_ring_context *shadow_ring_context;
|
|
struct execlist_ring_context *shadow_ring_context;
|
|
struct page *page;
|
|
struct page *page;
|
|
|
|
|
|
@@ -128,9 +125,8 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
|
|
struct intel_vgpu *vgpu = workload->vgpu;
|
|
struct intel_vgpu *vgpu = workload->vgpu;
|
|
struct intel_gvt *gvt = vgpu->gvt;
|
|
struct intel_gvt *gvt = vgpu->gvt;
|
|
int ring_id = workload->ring_id;
|
|
int ring_id = workload->ring_id;
|
|
- struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
|
|
|
|
struct drm_i915_gem_object *ctx_obj =
|
|
struct drm_i915_gem_object *ctx_obj =
|
|
- shadow_ctx->__engine[ring_id].state->obj;
|
|
|
|
|
|
+ workload->req->hw_context->state->obj;
|
|
struct execlist_ring_context *shadow_ring_context;
|
|
struct execlist_ring_context *shadow_ring_context;
|
|
struct page *page;
|
|
struct page *page;
|
|
void *dst;
|
|
void *dst;
|
|
@@ -280,10 +276,8 @@ static int shadow_context_status_change(struct notifier_block *nb,
|
|
return NOTIFY_OK;
|
|
return NOTIFY_OK;
|
|
}
|
|
}
|
|
|
|
|
|
-static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
|
|
|
|
- struct intel_engine_cs *engine)
|
|
|
|
|
|
+static void shadow_context_descriptor_update(struct intel_context *ce)
|
|
{
|
|
{
|
|
- struct intel_context *ce = to_intel_context(ctx, engine);
|
|
|
|
u64 desc = 0;
|
|
u64 desc = 0;
|
|
|
|
|
|
desc = ce->lrc_desc;
|
|
desc = ce->lrc_desc;
|
|
@@ -292,7 +286,7 @@ static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
|
|
* like GEN8_CTX_* cached in desc_template
|
|
* like GEN8_CTX_* cached in desc_template
|
|
*/
|
|
*/
|
|
desc &= U64_MAX << 12;
|
|
desc &= U64_MAX << 12;
|
|
- desc |= ctx->desc_template & ((1ULL << 12) - 1);
|
|
|
|
|
|
+ desc |= ce->gem_context->desc_template & ((1ULL << 12) - 1);
|
|
|
|
|
|
ce->lrc_desc = desc;
|
|
ce->lrc_desc = desc;
|
|
}
|
|
}
|
|
@@ -300,12 +294,11 @@ static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
|
|
static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
|
|
static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
|
|
{
|
|
{
|
|
struct intel_vgpu *vgpu = workload->vgpu;
|
|
struct intel_vgpu *vgpu = workload->vgpu;
|
|
|
|
+ struct i915_request *req = workload->req;
|
|
void *shadow_ring_buffer_va;
|
|
void *shadow_ring_buffer_va;
|
|
u32 *cs;
|
|
u32 *cs;
|
|
- struct i915_request *req = workload->req;
|
|
|
|
|
|
|
|
- if (IS_KABYLAKE(req->i915) &&
|
|
|
|
- is_inhibit_context(req->gem_context, req->engine->id))
|
|
|
|
|
|
+ if (IS_KABYLAKE(req->i915) && is_inhibit_context(req->hw_context))
|
|
intel_vgpu_restore_inhibit_context(vgpu, req);
|
|
intel_vgpu_restore_inhibit_context(vgpu, req);
|
|
|
|
|
|
/* allocate shadow ring buffer */
|
|
/* allocate shadow ring buffer */
|
|
@@ -353,60 +346,56 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
|
|
struct intel_vgpu_submission *s = &vgpu->submission;
|
|
struct intel_vgpu_submission *s = &vgpu->submission;
|
|
struct i915_gem_context *shadow_ctx = s->shadow_ctx;
|
|
struct i915_gem_context *shadow_ctx = s->shadow_ctx;
|
|
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
|
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
|
- int ring_id = workload->ring_id;
|
|
|
|
- struct intel_engine_cs *engine = dev_priv->engine[ring_id];
|
|
|
|
- struct intel_ring *ring;
|
|
|
|
|
|
+ struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id];
|
|
|
|
+ struct intel_context *ce;
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
lockdep_assert_held(&dev_priv->drm.struct_mutex);
|
|
lockdep_assert_held(&dev_priv->drm.struct_mutex);
|
|
|
|
|
|
- if (workload->shadowed)
|
|
|
|
|
|
+ if (workload->req)
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
|
|
+ /* pin shadow context by gvt even the shadow context will be pinned
|
|
|
|
+ * when i915 alloc request. That is because gvt will update the guest
|
|
|
|
+ * context from shadow context when workload is completed, and at that
|
|
|
|
+ * moment, i915 may already unpined the shadow context to make the
|
|
|
|
+ * shadow_ctx pages invalid. So gvt need to pin itself. After update
|
|
|
|
+ * the guest context, gvt can unpin the shadow_ctx safely.
|
|
|
|
+ */
|
|
|
|
+ ce = intel_context_pin(shadow_ctx, engine);
|
|
|
|
+ if (IS_ERR(ce)) {
|
|
|
|
+ gvt_vgpu_err("fail to pin shadow context\n");
|
|
|
|
+ return PTR_ERR(ce);
|
|
|
|
+ }
|
|
|
|
+
|
|
shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
|
|
shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
|
|
shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
|
|
shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
|
|
GEN8_CTX_ADDRESSING_MODE_SHIFT;
|
|
GEN8_CTX_ADDRESSING_MODE_SHIFT;
|
|
|
|
|
|
- if (!test_and_set_bit(ring_id, s->shadow_ctx_desc_updated))
|
|
|
|
- shadow_context_descriptor_update(shadow_ctx,
|
|
|
|
- dev_priv->engine[ring_id]);
|
|
|
|
|
|
+ if (!test_and_set_bit(workload->ring_id, s->shadow_ctx_desc_updated))
|
|
|
|
+ shadow_context_descriptor_update(ce);
|
|
|
|
|
|
ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
|
|
ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
|
|
if (ret)
|
|
if (ret)
|
|
- goto err_scan;
|
|
|
|
|
|
+ goto err_unpin;
|
|
|
|
|
|
if ((workload->ring_id == RCS) &&
|
|
if ((workload->ring_id == RCS) &&
|
|
(workload->wa_ctx.indirect_ctx.size != 0)) {
|
|
(workload->wa_ctx.indirect_ctx.size != 0)) {
|
|
ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
|
|
ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
|
|
if (ret)
|
|
if (ret)
|
|
- goto err_scan;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- /* pin shadow context by gvt even the shadow context will be pinned
|
|
|
|
- * when i915 alloc request. That is because gvt will update the guest
|
|
|
|
- * context from shadow context when workload is completed, and at that
|
|
|
|
- * moment, i915 may already unpined the shadow context to make the
|
|
|
|
- * shadow_ctx pages invalid. So gvt need to pin itself. After update
|
|
|
|
- * the guest context, gvt can unpin the shadow_ctx safely.
|
|
|
|
- */
|
|
|
|
- ring = intel_context_pin(shadow_ctx, engine);
|
|
|
|
- if (IS_ERR(ring)) {
|
|
|
|
- ret = PTR_ERR(ring);
|
|
|
|
- gvt_vgpu_err("fail to pin shadow context\n");
|
|
|
|
- goto err_shadow;
|
|
|
|
|
|
+ goto err_shadow;
|
|
}
|
|
}
|
|
|
|
|
|
ret = populate_shadow_context(workload);
|
|
ret = populate_shadow_context(workload);
|
|
if (ret)
|
|
if (ret)
|
|
- goto err_unpin;
|
|
|
|
- workload->shadowed = true;
|
|
|
|
|
|
+ goto err_shadow;
|
|
|
|
+
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
-err_unpin:
|
|
|
|
- intel_context_unpin(shadow_ctx, engine);
|
|
|
|
err_shadow:
|
|
err_shadow:
|
|
release_shadow_wa_ctx(&workload->wa_ctx);
|
|
release_shadow_wa_ctx(&workload->wa_ctx);
|
|
-err_scan:
|
|
|
|
|
|
+err_unpin:
|
|
|
|
+ intel_context_unpin(ce);
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -414,7 +403,6 @@ static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
|
|
{
|
|
{
|
|
int ring_id = workload->ring_id;
|
|
int ring_id = workload->ring_id;
|
|
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
|
|
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
|
|
- struct intel_engine_cs *engine = dev_priv->engine[ring_id];
|
|
|
|
struct i915_request *rq;
|
|
struct i915_request *rq;
|
|
struct intel_vgpu *vgpu = workload->vgpu;
|
|
struct intel_vgpu *vgpu = workload->vgpu;
|
|
struct intel_vgpu_submission *s = &vgpu->submission;
|
|
struct intel_vgpu_submission *s = &vgpu->submission;
|
|
@@ -437,7 +425,6 @@ static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
err_unpin:
|
|
err_unpin:
|
|
- intel_context_unpin(shadow_ctx, engine);
|
|
|
|
release_shadow_wa_ctx(&workload->wa_ctx);
|
|
release_shadow_wa_ctx(&workload->wa_ctx);
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
@@ -517,21 +504,13 @@ err:
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
-static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
|
|
|
|
|
+static void update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
|
{
|
|
{
|
|
- struct intel_vgpu_workload *workload = container_of(wa_ctx,
|
|
|
|
- struct intel_vgpu_workload,
|
|
|
|
- wa_ctx);
|
|
|
|
- int ring_id = workload->ring_id;
|
|
|
|
- struct intel_vgpu_submission *s = &workload->vgpu->submission;
|
|
|
|
- struct i915_gem_context *shadow_ctx = s->shadow_ctx;
|
|
|
|
- struct drm_i915_gem_object *ctx_obj =
|
|
|
|
- shadow_ctx->__engine[ring_id].state->obj;
|
|
|
|
- struct execlist_ring_context *shadow_ring_context;
|
|
|
|
- struct page *page;
|
|
|
|
-
|
|
|
|
- page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
|
|
|
|
- shadow_ring_context = kmap_atomic(page);
|
|
|
|
|
|
+ struct intel_vgpu_workload *workload =
|
|
|
|
+ container_of(wa_ctx, struct intel_vgpu_workload, wa_ctx);
|
|
|
|
+ struct i915_request *rq = workload->req;
|
|
|
|
+ struct execlist_ring_context *shadow_ring_context =
|
|
|
|
+ (struct execlist_ring_context *)rq->hw_context->lrc_reg_state;
|
|
|
|
|
|
shadow_ring_context->bb_per_ctx_ptr.val =
|
|
shadow_ring_context->bb_per_ctx_ptr.val =
|
|
(shadow_ring_context->bb_per_ctx_ptr.val &
|
|
(shadow_ring_context->bb_per_ctx_ptr.val &
|
|
@@ -539,9 +518,6 @@ static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
|
shadow_ring_context->rcs_indirect_ctx.val =
|
|
shadow_ring_context->rcs_indirect_ctx.val =
|
|
(shadow_ring_context->rcs_indirect_ctx.val &
|
|
(shadow_ring_context->rcs_indirect_ctx.val &
|
|
(~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
|
|
(~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
|
|
-
|
|
|
|
- kunmap_atomic(shadow_ring_context);
|
|
|
|
- return 0;
|
|
|
|
}
|
|
}
|
|
|
|
|
|
static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
|
static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
|
@@ -670,12 +646,9 @@ err_unpin_mm:
|
|
static int dispatch_workload(struct intel_vgpu_workload *workload)
|
|
static int dispatch_workload(struct intel_vgpu_workload *workload)
|
|
{
|
|
{
|
|
struct intel_vgpu *vgpu = workload->vgpu;
|
|
struct intel_vgpu *vgpu = workload->vgpu;
|
|
- struct intel_vgpu_submission *s = &vgpu->submission;
|
|
|
|
- struct i915_gem_context *shadow_ctx = s->shadow_ctx;
|
|
|
|
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
|
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
|
int ring_id = workload->ring_id;
|
|
int ring_id = workload->ring_id;
|
|
- struct intel_engine_cs *engine = dev_priv->engine[ring_id];
|
|
|
|
- int ret = 0;
|
|
|
|
|
|
+ int ret;
|
|
|
|
|
|
gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
|
|
gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
|
|
ring_id, workload);
|
|
ring_id, workload);
|
|
@@ -687,10 +660,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
|
|
goto out;
|
|
goto out;
|
|
|
|
|
|
ret = prepare_workload(workload);
|
|
ret = prepare_workload(workload);
|
|
- if (ret) {
|
|
|
|
- intel_context_unpin(shadow_ctx, engine);
|
|
|
|
- goto out;
|
|
|
|
- }
|
|
|
|
|
|
|
|
out:
|
|
out:
|
|
if (ret)
|
|
if (ret)
|
|
@@ -765,27 +734,23 @@ out:
|
|
|
|
|
|
static void update_guest_context(struct intel_vgpu_workload *workload)
|
|
static void update_guest_context(struct intel_vgpu_workload *workload)
|
|
{
|
|
{
|
|
|
|
+ struct i915_request *rq = workload->req;
|
|
struct intel_vgpu *vgpu = workload->vgpu;
|
|
struct intel_vgpu *vgpu = workload->vgpu;
|
|
struct intel_gvt *gvt = vgpu->gvt;
|
|
struct intel_gvt *gvt = vgpu->gvt;
|
|
- struct intel_vgpu_submission *s = &vgpu->submission;
|
|
|
|
- struct i915_gem_context *shadow_ctx = s->shadow_ctx;
|
|
|
|
- int ring_id = workload->ring_id;
|
|
|
|
- struct drm_i915_gem_object *ctx_obj =
|
|
|
|
- shadow_ctx->__engine[ring_id].state->obj;
|
|
|
|
|
|
+ struct drm_i915_gem_object *ctx_obj = rq->hw_context->state->obj;
|
|
struct execlist_ring_context *shadow_ring_context;
|
|
struct execlist_ring_context *shadow_ring_context;
|
|
struct page *page;
|
|
struct page *page;
|
|
void *src;
|
|
void *src;
|
|
unsigned long context_gpa, context_page_num;
|
|
unsigned long context_gpa, context_page_num;
|
|
int i;
|
|
int i;
|
|
|
|
|
|
- gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id,
|
|
|
|
- workload->ctx_desc.lrca);
|
|
|
|
-
|
|
|
|
- context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
|
|
|
|
|
|
+ gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id,
|
|
|
|
+ workload->ctx_desc.lrca);
|
|
|
|
|
|
|
|
+ context_page_num = rq->engine->context_size;
|
|
context_page_num = context_page_num >> PAGE_SHIFT;
|
|
context_page_num = context_page_num >> PAGE_SHIFT;
|
|
|
|
|
|
- if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
|
|
|
|
|
|
+ if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS)
|
|
context_page_num = 19;
|
|
context_page_num = 19;
|
|
|
|
|
|
i = 2;
|
|
i = 2;
|
|
@@ -858,6 +823,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
|
|
scheduler->current_workload[ring_id];
|
|
scheduler->current_workload[ring_id];
|
|
struct intel_vgpu *vgpu = workload->vgpu;
|
|
struct intel_vgpu *vgpu = workload->vgpu;
|
|
struct intel_vgpu_submission *s = &vgpu->submission;
|
|
struct intel_vgpu_submission *s = &vgpu->submission;
|
|
|
|
+ struct i915_request *rq;
|
|
int event;
|
|
int event;
|
|
|
|
|
|
mutex_lock(&gvt->lock);
|
|
mutex_lock(&gvt->lock);
|
|
@@ -866,11 +832,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
|
|
* switch to make sure request is completed.
|
|
* switch to make sure request is completed.
|
|
* For the workload w/o request, directly complete the workload.
|
|
* For the workload w/o request, directly complete the workload.
|
|
*/
|
|
*/
|
|
- if (workload->req) {
|
|
|
|
- struct drm_i915_private *dev_priv =
|
|
|
|
- workload->vgpu->gvt->dev_priv;
|
|
|
|
- struct intel_engine_cs *engine =
|
|
|
|
- dev_priv->engine[workload->ring_id];
|
|
|
|
|
|
+ rq = fetch_and_zero(&workload->req);
|
|
|
|
+ if (rq) {
|
|
wait_event(workload->shadow_ctx_status_wq,
|
|
wait_event(workload->shadow_ctx_status_wq,
|
|
!atomic_read(&workload->shadow_ctx_active));
|
|
!atomic_read(&workload->shadow_ctx_active));
|
|
|
|
|
|
@@ -886,8 +849,6 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
|
|
workload->status = 0;
|
|
workload->status = 0;
|
|
}
|
|
}
|
|
|
|
|
|
- i915_request_put(fetch_and_zero(&workload->req));
|
|
|
|
-
|
|
|
|
if (!workload->status && !(vgpu->resetting_eng &
|
|
if (!workload->status && !(vgpu->resetting_eng &
|
|
ENGINE_MASK(ring_id))) {
|
|
ENGINE_MASK(ring_id))) {
|
|
update_guest_context(workload);
|
|
update_guest_context(workload);
|
|
@@ -896,10 +857,13 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
|
|
INTEL_GVT_EVENT_MAX)
|
|
INTEL_GVT_EVENT_MAX)
|
|
intel_vgpu_trigger_virtual_event(vgpu, event);
|
|
intel_vgpu_trigger_virtual_event(vgpu, event);
|
|
}
|
|
}
|
|
- mutex_lock(&dev_priv->drm.struct_mutex);
|
|
|
|
|
|
+
|
|
/* unpin shadow ctx as the shadow_ctx update is done */
|
|
/* unpin shadow ctx as the shadow_ctx update is done */
|
|
- intel_context_unpin(s->shadow_ctx, engine);
|
|
|
|
- mutex_unlock(&dev_priv->drm.struct_mutex);
|
|
|
|
|
|
+ mutex_lock(&rq->i915->drm.struct_mutex);
|
|
|
|
+ intel_context_unpin(rq->hw_context);
|
|
|
|
+ mutex_unlock(&rq->i915->drm.struct_mutex);
|
|
|
|
+
|
|
|
|
+ i915_request_put(rq);
|
|
}
|
|
}
|
|
|
|
|
|
gvt_dbg_sched("ring id %d complete workload %p status %d\n",
|
|
gvt_dbg_sched("ring id %d complete workload %p status %d\n",
|
|
@@ -1270,7 +1234,6 @@ alloc_workload(struct intel_vgpu *vgpu)
|
|
atomic_set(&workload->shadow_ctx_active, 0);
|
|
atomic_set(&workload->shadow_ctx_active, 0);
|
|
|
|
|
|
workload->status = -EINPROGRESS;
|
|
workload->status = -EINPROGRESS;
|
|
- workload->shadowed = false;
|
|
|
|
workload->vgpu = vgpu;
|
|
workload->vgpu = vgpu;
|
|
|
|
|
|
return workload;
|
|
return workload;
|