|
|
@@ -431,27 +431,6 @@ void i915_gem_context_fini(struct drm_device *dev)
|
|
|
dev_priv->kernel_context = NULL;
|
|
|
}
|
|
|
|
|
|
-int i915_gem_context_enable(struct drm_i915_gem_request *req)
|
|
|
-{
|
|
|
- struct intel_engine_cs *engine = req->engine;
|
|
|
- int ret;
|
|
|
-
|
|
|
- if (i915.enable_execlists) {
|
|
|
- if (engine->init_context == NULL)
|
|
|
- return 0;
|
|
|
-
|
|
|
- ret = engine->init_context(req);
|
|
|
- } else
|
|
|
- ret = i915_switch_context(req);
|
|
|
-
|
|
|
- if (ret) {
|
|
|
- DRM_ERROR("ring init context: %d\n", ret);
|
|
|
- return ret;
|
|
|
- }
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
static int context_idr_cleanup(int id, void *p, void *data)
|
|
|
{
|
|
|
struct intel_context *ctx = p;
|
|
|
@@ -630,7 +609,8 @@ static int remap_l3(struct drm_i915_gem_request *req, int slice)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static inline bool skip_rcs_switch(struct intel_engine_cs *engine,
|
|
|
+static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt,
|
|
|
+ struct intel_engine_cs *engine,
|
|
|
struct intel_context *to)
|
|
|
{
|
|
|
if (to->remap_slice)
|
|
|
@@ -639,21 +619,27 @@ static inline bool skip_rcs_switch(struct intel_engine_cs *engine,
|
|
|
if (!to->legacy_hw_ctx.initialized)
|
|
|
return false;
|
|
|
|
|
|
- if (to->ppgtt &&
|
|
|
- !(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings))
|
|
|
+ if (ppgtt && (intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
|
|
|
return false;
|
|
|
|
|
|
return to == engine->last_context;
|
|
|
}
|
|
|
|
|
|
static bool
|
|
|
-needs_pd_load_pre(struct intel_engine_cs *engine, struct intel_context *to)
|
|
|
+needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt,
|
|
|
+ struct intel_engine_cs *engine,
|
|
|
+ struct intel_context *to)
|
|
|
{
|
|
|
- if (!to->ppgtt)
|
|
|
+ if (!ppgtt)
|
|
|
return false;
|
|
|
|
|
|
+ /* Always load the ppgtt on first use */
|
|
|
+ if (!engine->last_context)
|
|
|
+ return true;
|
|
|
+
|
|
|
+ /* Same context without new entries, skip */
|
|
|
if (engine->last_context == to &&
|
|
|
- !(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings))
|
|
|
+ !(intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
|
|
|
return false;
|
|
|
|
|
|
if (engine->id != RCS)
|
|
|
@@ -666,9 +652,11 @@ needs_pd_load_pre(struct intel_engine_cs *engine, struct intel_context *to)
|
|
|
}
|
|
|
|
|
|
static bool
|
|
|
-needs_pd_load_post(struct intel_context *to, u32 hw_flags)
|
|
|
+needs_pd_load_post(struct i915_hw_ppgtt *ppgtt,
|
|
|
+ struct intel_context *to,
|
|
|
+ u32 hw_flags)
|
|
|
{
|
|
|
- if (!to->ppgtt)
|
|
|
+ if (!ppgtt)
|
|
|
return false;
|
|
|
|
|
|
if (!IS_GEN8(to->i915))
|
|
|
@@ -684,11 +672,12 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
|
|
|
{
|
|
|
struct intel_context *to = req->ctx;
|
|
|
struct intel_engine_cs *engine = req->engine;
|
|
|
+ struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
|
|
|
struct intel_context *from;
|
|
|
u32 hw_flags;
|
|
|
int ret, i;
|
|
|
|
|
|
- if (skip_rcs_switch(engine, to))
|
|
|
+ if (skip_rcs_switch(ppgtt, engine, to))
|
|
|
return 0;
|
|
|
|
|
|
/* Trying to pin first makes error handling easier. */
|
|
|
@@ -719,13 +708,13 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
|
|
|
if (ret)
|
|
|
goto unpin_out;
|
|
|
|
|
|
- if (needs_pd_load_pre(engine, to)) {
|
|
|
+ if (needs_pd_load_pre(ppgtt, engine, to)) {
|
|
|
/* Older GENs and non render rings still want the load first,
|
|
|
* "PP_DCLV followed by PP_DIR_BASE register through Load
|
|
|
* Register Immediate commands in Ring Buffer before submitting
|
|
|
* a context."*/
|
|
|
trace_switch_mm(engine, to);
|
|
|
- ret = to->ppgtt->switch_mm(to->ppgtt, req);
|
|
|
+ ret = ppgtt->switch_mm(ppgtt, req);
|
|
|
if (ret)
|
|
|
goto unpin_out;
|
|
|
}
|
|
|
@@ -736,16 +725,11 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
|
|
|
* space. This means we must enforce that a page table load
|
|
|
* occur when this occurs. */
|
|
|
hw_flags = MI_RESTORE_INHIBIT;
|
|
|
- else if (to->ppgtt &&
|
|
|
- intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)
|
|
|
+ else if (ppgtt && intel_engine_flag(engine) & ppgtt->pd_dirty_rings)
|
|
|
hw_flags = MI_FORCE_RESTORE;
|
|
|
else
|
|
|
hw_flags = 0;
|
|
|
|
|
|
- /* We should never emit switch_mm more than once */
|
|
|
- WARN_ON(needs_pd_load_pre(engine, to) &&
|
|
|
- needs_pd_load_post(to, hw_flags));
|
|
|
-
|
|
|
if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
|
|
|
ret = mi_set_context(req, hw_flags);
|
|
|
if (ret)
|
|
|
@@ -780,9 +764,9 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
|
|
|
/* GEN8 does *not* require an explicit reload if the PDPs have been
|
|
|
* setup, and we do not wish to move them.
|
|
|
*/
|
|
|
- if (needs_pd_load_post(to, hw_flags)) {
|
|
|
+ if (needs_pd_load_post(ppgtt, to, hw_flags)) {
|
|
|
trace_switch_mm(engine, to);
|
|
|
- ret = to->ppgtt->switch_mm(to->ppgtt, req);
|
|
|
+ ret = ppgtt->switch_mm(ppgtt, req);
|
|
|
/* The hardware context switch is emitted, but we haven't
|
|
|
* actually changed the state - so it's probably safe to bail
|
|
|
* here. Still, let the user know something dangerous has
|
|
|
@@ -792,8 +776,8 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
- if (to->ppgtt)
|
|
|
- to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
|
|
|
+ if (ppgtt)
|
|
|
+ ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
|
|
|
|
|
|
for (i = 0; i < MAX_L3_SLICES; i++) {
|
|
|
if (!(to->remap_slice & (1<<i)))
|
|
|
@@ -846,17 +830,18 @@ int i915_switch_context(struct drm_i915_gem_request *req)
|
|
|
if (engine->id != RCS ||
|
|
|
req->ctx->legacy_hw_ctx.rcs_state == NULL) {
|
|
|
struct intel_context *to = req->ctx;
|
|
|
+ struct i915_hw_ppgtt *ppgtt =
|
|
|
+ to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
|
|
|
|
|
|
- if (needs_pd_load_pre(engine, to)) {
|
|
|
+ if (needs_pd_load_pre(ppgtt, engine, to)) {
|
|
|
int ret;
|
|
|
|
|
|
trace_switch_mm(engine, to);
|
|
|
- ret = to->ppgtt->switch_mm(to->ppgtt, req);
|
|
|
+ ret = ppgtt->switch_mm(ppgtt, req);
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
|
|
|
- /* Doing a PD load always reloads the page dirs */
|
|
|
- to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
|
|
|
+ ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
|
|
|
}
|
|
|
|
|
|
if (to != engine->last_context) {
|