|
@@ -155,7 +155,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
|
|
if (ce->ring)
|
|
if (ce->ring)
|
|
intel_ring_free(ce->ring);
|
|
intel_ring_free(ce->ring);
|
|
|
|
|
|
- i915_gem_object_put(ce->state);
|
|
|
|
|
|
+ i915_vma_put(ce->state);
|
|
}
|
|
}
|
|
|
|
|
|
list_del(&ctx->link);
|
|
list_del(&ctx->link);
|
|
@@ -281,13 +281,24 @@ __create_hw_context(struct drm_device *dev,
|
|
ctx->ggtt_alignment = get_context_alignment(dev_priv);
|
|
ctx->ggtt_alignment = get_context_alignment(dev_priv);
|
|
|
|
|
|
if (dev_priv->hw_context_size) {
|
|
if (dev_priv->hw_context_size) {
|
|
- struct drm_i915_gem_object *obj =
|
|
|
|
- i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size);
|
|
|
|
|
|
+ struct drm_i915_gem_object *obj;
|
|
|
|
+ struct i915_vma *vma;
|
|
|
|
+
|
|
|
|
+ obj = i915_gem_alloc_context_obj(dev,
|
|
|
|
+ dev_priv->hw_context_size);
|
|
if (IS_ERR(obj)) {
|
|
if (IS_ERR(obj)) {
|
|
ret = PTR_ERR(obj);
|
|
ret = PTR_ERR(obj);
|
|
goto err_out;
|
|
goto err_out;
|
|
}
|
|
}
|
|
- ctx->engine[RCS].state = obj;
|
|
|
|
|
|
+
|
|
|
|
+ vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
|
|
|
|
+ if (IS_ERR(vma)) {
|
|
|
|
+ i915_gem_object_put(obj);
|
|
|
|
+ ret = PTR_ERR(vma);
|
|
|
|
+ goto err_out;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ ctx->engine[RCS].state = vma;
|
|
}
|
|
}
|
|
|
|
|
|
/* Default context will never have a file_priv */
|
|
/* Default context will never have a file_priv */
|
|
@@ -399,7 +410,7 @@ static void i915_gem_context_unpin(struct i915_gem_context *ctx,
|
|
struct intel_context *ce = &ctx->engine[engine->id];
|
|
struct intel_context *ce = &ctx->engine[engine->id];
|
|
|
|
|
|
if (ce->state)
|
|
if (ce->state)
|
|
- i915_gem_object_ggtt_unpin(ce->state);
|
|
|
|
|
|
+ i915_vma_unpin(ce->state);
|
|
|
|
|
|
i915_gem_context_put(ctx);
|
|
i915_gem_context_put(ctx);
|
|
}
|
|
}
|
|
@@ -620,9 +631,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
|
|
|
|
|
|
intel_ring_emit(ring, MI_NOOP);
|
|
intel_ring_emit(ring, MI_NOOP);
|
|
intel_ring_emit(ring, MI_SET_CONTEXT);
|
|
intel_ring_emit(ring, MI_SET_CONTEXT);
|
|
- intel_ring_emit(ring,
|
|
|
|
- i915_gem_obj_ggtt_offset(req->ctx->engine[RCS].state) |
|
|
|
|
- flags);
|
|
|
|
|
|
+ intel_ring_emit(ring, req->ctx->engine[RCS].state->node.start | flags);
|
|
/*
|
|
/*
|
|
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
|
|
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
|
|
* WaMiSetContext_Hang:snb,ivb,vlv
|
|
* WaMiSetContext_Hang:snb,ivb,vlv
|
|
@@ -755,6 +764,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
|
|
struct i915_gem_context *to = req->ctx;
|
|
struct i915_gem_context *to = req->ctx;
|
|
struct intel_engine_cs *engine = req->engine;
|
|
struct intel_engine_cs *engine = req->engine;
|
|
struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
|
|
struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
|
|
|
|
+ struct i915_vma *vma = to->engine[RCS].state;
|
|
struct i915_gem_context *from;
|
|
struct i915_gem_context *from;
|
|
u32 hw_flags;
|
|
u32 hw_flags;
|
|
int ret, i;
|
|
int ret, i;
|
|
@@ -763,8 +773,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
/* Trying to pin first makes error handling easier. */
|
|
/* Trying to pin first makes error handling easier. */
|
|
- ret = i915_gem_object_ggtt_pin(to->engine[RCS].state, NULL, 0,
|
|
|
|
- to->ggtt_alignment, 0);
|
|
|
|
|
|
+ ret = i915_vma_pin(vma, 0, to->ggtt_alignment, PIN_GLOBAL);
|
|
if (ret)
|
|
if (ret)
|
|
return ret;
|
|
return ret;
|
|
|
|
|
|
@@ -785,9 +794,9 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
|
|
*
|
|
*
|
|
* XXX: We need a real interface to do this instead of trickery.
|
|
* XXX: We need a real interface to do this instead of trickery.
|
|
*/
|
|
*/
|
|
- ret = i915_gem_object_set_to_gtt_domain(to->engine[RCS].state, false);
|
|
|
|
|
|
+ ret = i915_gem_object_set_to_gtt_domain(vma->obj, false);
|
|
if (ret)
|
|
if (ret)
|
|
- goto unpin_out;
|
|
|
|
|
|
+ goto err;
|
|
|
|
|
|
if (needs_pd_load_pre(ppgtt, engine, to)) {
|
|
if (needs_pd_load_pre(ppgtt, engine, to)) {
|
|
/* Older GENs and non render rings still want the load first,
|
|
/* Older GENs and non render rings still want the load first,
|
|
@@ -797,7 +806,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
|
|
trace_switch_mm(engine, to);
|
|
trace_switch_mm(engine, to);
|
|
ret = ppgtt->switch_mm(ppgtt, req);
|
|
ret = ppgtt->switch_mm(ppgtt, req);
|
|
if (ret)
|
|
if (ret)
|
|
- goto unpin_out;
|
|
|
|
|
|
+ goto err;
|
|
}
|
|
}
|
|
|
|
|
|
if (!to->engine[RCS].initialised || i915_gem_context_is_default(to))
|
|
if (!to->engine[RCS].initialised || i915_gem_context_is_default(to))
|
|
@@ -814,7 +823,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
|
|
if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
|
|
if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
|
|
ret = mi_set_context(req, hw_flags);
|
|
ret = mi_set_context(req, hw_flags);
|
|
if (ret)
|
|
if (ret)
|
|
- goto unpin_out;
|
|
|
|
|
|
+ goto err;
|
|
}
|
|
}
|
|
|
|
|
|
/* The backing object for the context is done after switching to the
|
|
/* The backing object for the context is done after switching to the
|
|
@@ -824,8 +833,6 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
|
|
* MI_SET_CONTEXT instead of when the next seqno has completed.
|
|
* MI_SET_CONTEXT instead of when the next seqno has completed.
|
|
*/
|
|
*/
|
|
if (from != NULL) {
|
|
if (from != NULL) {
|
|
- struct drm_i915_gem_object *obj = from->engine[RCS].state;
|
|
|
|
-
|
|
|
|
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
|
|
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
|
|
* whole damn pipeline, we don't need to explicitly mark the
|
|
* whole damn pipeline, we don't need to explicitly mark the
|
|
* object dirty. The only exception is that the context must be
|
|
* object dirty. The only exception is that the context must be
|
|
@@ -833,11 +840,9 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
|
|
* able to defer doing this until we know the object would be
|
|
* able to defer doing this until we know the object would be
|
|
* swapped, but there is no way to do that yet.
|
|
* swapped, but there is no way to do that yet.
|
|
*/
|
|
*/
|
|
- obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
|
|
|
|
- i915_vma_move_to_active(i915_gem_obj_to_ggtt(obj), req, 0);
|
|
|
|
-
|
|
|
|
- /* obj is kept alive until the next request by its active ref */
|
|
|
|
- i915_gem_object_ggtt_unpin(obj);
|
|
|
|
|
|
+ i915_vma_move_to_active(from->engine[RCS].state, req, 0);
|
|
|
|
+ /* state is kept alive until the next request */
|
|
|
|
+ i915_vma_unpin(from->engine[RCS].state);
|
|
i915_gem_context_put(from);
|
|
i915_gem_context_put(from);
|
|
}
|
|
}
|
|
engine->last_context = i915_gem_context_get(to);
|
|
engine->last_context = i915_gem_context_get(to);
|
|
@@ -882,8 +887,8 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
-unpin_out:
|
|
|
|
- i915_gem_object_ggtt_unpin(to->engine[RCS].state);
|
|
|
|
|
|
+err:
|
|
|
|
+ i915_vma_unpin(vma);
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|