|
@@ -571,9 +571,6 @@ static int execlists_context_queue(struct drm_i915_gem_request *request)
|
|
struct drm_i915_gem_request *cursor;
|
|
struct drm_i915_gem_request *cursor;
|
|
int num_elements = 0;
|
|
int num_elements = 0;
|
|
|
|
|
|
- if (request->ctx != ring->default_context)
|
|
|
|
- intel_lr_context_pin(request);
|
|
|
|
-
|
|
|
|
i915_gem_request_reference(request);
|
|
i915_gem_request_reference(request);
|
|
|
|
|
|
spin_lock_irq(&ring->execlist_lock);
|
|
spin_lock_irq(&ring->execlist_lock);
|
|
@@ -737,6 +734,13 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
|
|
if (intel_ring_stopped(ring))
|
|
if (intel_ring_stopped(ring))
|
|
return;
|
|
return;
|
|
|
|
|
|
|
|
+ if (request->ctx != ring->default_context) {
|
|
|
|
+ if (!request->ctx->engine[ring->id].dirty) {
|
|
|
|
+ intel_lr_context_pin(request);
|
|
|
|
+ request->ctx->engine[ring->id].dirty = true;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
if (dev_priv->guc.execbuf_client)
|
|
if (dev_priv->guc.execbuf_client)
|
|
i915_guc_submit(dev_priv->guc.execbuf_client, request);
|
|
i915_guc_submit(dev_priv->guc.execbuf_client, request);
|
|
else
|
|
else
|
|
@@ -963,12 +967,6 @@ void intel_execlists_retire_requests(struct intel_engine_cs *ring)
|
|
spin_unlock_irq(&ring->execlist_lock);
|
|
spin_unlock_irq(&ring->execlist_lock);
|
|
|
|
|
|
list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
|
|
list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
|
|
- struct intel_context *ctx = req->ctx;
|
|
|
|
- struct drm_i915_gem_object *ctx_obj =
|
|
|
|
- ctx->engine[ring->id].state;
|
|
|
|
-
|
|
|
|
- if (ctx_obj && (ctx != ring->default_context))
|
|
|
|
- intel_lr_context_unpin(req);
|
|
|
|
list_del(&req->execlist_link);
|
|
list_del(&req->execlist_link);
|
|
i915_gem_request_unreference(req);
|
|
i915_gem_request_unreference(req);
|
|
}
|
|
}
|
|
@@ -1063,21 +1061,39 @@ reset_pin_count:
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
-void intel_lr_context_unpin(struct drm_i915_gem_request *rq)
|
|
|
|
|
|
+static void __intel_lr_context_unpin(struct intel_engine_cs *ring,
|
|
|
|
+ struct intel_context *ctx)
|
|
{
|
|
{
|
|
- struct intel_engine_cs *ring = rq->ring;
|
|
|
|
- struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
|
|
|
|
- struct intel_ringbuffer *ringbuf = rq->ringbuf;
|
|
|
|
-
|
|
|
|
|
|
+ struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
|
|
|
|
+ struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
|
|
if (ctx_obj) {
|
|
if (ctx_obj) {
|
|
WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
|
|
WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
|
|
- if (--rq->ctx->engine[ring->id].pin_count == 0) {
|
|
|
|
|
|
+ if (--ctx->engine[ring->id].pin_count == 0) {
|
|
intel_unpin_ringbuffer_obj(ringbuf);
|
|
intel_unpin_ringbuffer_obj(ringbuf);
|
|
i915_gem_object_ggtt_unpin(ctx_obj);
|
|
i915_gem_object_ggtt_unpin(ctx_obj);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+void intel_lr_context_unpin(struct drm_i915_gem_request *rq)
|
|
|
|
+{
|
|
|
|
+ __intel_lr_context_unpin(rq->ring, rq->ctx);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void intel_lr_context_complete_check(struct drm_i915_gem_request *req)
|
|
|
|
+{
|
|
|
|
+ struct intel_engine_cs *ring = req->ring;
|
|
|
|
+
|
|
|
|
+ if (ring->last_context && ring->last_context != req->ctx &&
|
|
|
|
+ ring->last_context->engine[ring->id].dirty) {
|
|
|
|
+ __intel_lr_context_unpin(
|
|
|
|
+ ring,
|
|
|
|
+ ring->last_context);
|
|
|
|
+ ring->last_context->engine[ring->id].dirty = false;
|
|
|
|
+ }
|
|
|
|
+ ring->last_context = req->ctx;
|
|
|
|
+}
|
|
|
|
+
|
|
static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
|
|
static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
|
|
{
|
|
{
|
|
int ret, i;
|
|
int ret, i;
|
|
@@ -2351,6 +2367,76 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/**
|
|
|
|
+ * intel_lr_context_clean_ring() - clean the ring specific parts of an LRC
|
|
|
|
+ * @ctx: the LR context being freed.
|
|
|
|
+ * @ring: the engine being cleaned
|
|
|
|
+ * @ctx_obj: the hw context being unreferenced
|
|
|
|
+ * @ringbuf: the ringbuf being freed
|
|
|
|
+ *
|
|
|
|
+ * Take care of cleaning up the per-engine backing
|
|
|
|
+ * objects and the logical ringbuffer.
|
|
|
|
+ */
|
|
|
|
+static void
|
|
|
|
+intel_lr_context_clean_ring(struct intel_context *ctx,
|
|
|
|
+ struct intel_engine_cs *ring,
|
|
|
|
+ struct drm_i915_gem_object *ctx_obj,
|
|
|
|
+ struct intel_ringbuffer *ringbuf)
|
|
|
|
+{
|
|
|
|
+ int ret;
|
|
|
|
+
|
|
|
|
+ WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
|
|
|
|
+
|
|
|
|
+ if (ctx == ring->default_context) {
|
|
|
|
+ intel_unpin_ringbuffer_obj(ringbuf);
|
|
|
|
+ i915_gem_object_ggtt_unpin(ctx_obj);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (ctx->engine[ring->id].dirty) {
|
|
|
|
+ struct drm_i915_gem_request *req = NULL;
|
|
|
|
+
|
|
|
|
+ /**
|
|
|
|
+ * If there is already a request pending on
|
|
|
|
+ * this ring, wait for that to complete,
|
|
|
|
+ * otherwise create a switch to idle request
|
|
|
|
+ */
|
|
|
|
+ if (list_empty(&ring->request_list)) {
|
|
|
|
+ int ret;
|
|
|
|
+
|
|
|
|
+ ret = i915_gem_request_alloc(
|
|
|
|
+ ring,
|
|
|
|
+ ring->default_context,
|
|
|
|
+ &req);
|
|
|
|
+ if (!ret)
|
|
|
|
+ i915_add_request(req);
|
|
|
|
+ else
|
|
|
|
+ DRM_DEBUG("Failed to ensure context saved");
|
|
|
|
+ } else {
|
|
|
|
+ req = list_first_entry(
|
|
|
|
+ &ring->request_list,
|
|
|
|
+ typeof(*req), list);
|
|
|
|
+ }
|
|
|
|
+ if (req) {
|
|
|
|
+ ret = i915_wait_request(req);
|
|
|
|
+ if (ret != 0) {
|
|
|
|
+ /**
|
|
|
|
+ * If we get here, there's probably been a ring
|
|
|
|
+ * reset, so we just clean up the dirty flag.&
|
|
|
|
+ * pin count.
|
|
|
|
+ */
|
|
|
|
+ ctx->engine[ring->id].dirty = false;
|
|
|
|
+ __intel_lr_context_unpin(
|
|
|
|
+ ring,
|
|
|
|
+ ctx);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ WARN_ON(ctx->engine[ring->id].pin_count);
|
|
|
|
+ intel_ringbuffer_free(ringbuf);
|
|
|
|
+ drm_gem_object_unreference(&ctx_obj->base);
|
|
|
|
+}
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* intel_lr_context_free() - free the LRC specific bits of a context
|
|
* intel_lr_context_free() - free the LRC specific bits of a context
|
|
* @ctx: the LR context to free.
|
|
* @ctx: the LR context to free.
|
|
@@ -2363,7 +2449,7 @@ void intel_lr_context_free(struct intel_context *ctx)
|
|
{
|
|
{
|
|
int i;
|
|
int i;
|
|
|
|
|
|
- for (i = 0; i < I915_NUM_RINGS; i++) {
|
|
|
|
|
|
+ for (i = 0; i < I915_NUM_RINGS; ++i) {
|
|
struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
|
|
struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
|
|
|
|
|
|
if (ctx_obj) {
|
|
if (ctx_obj) {
|
|
@@ -2371,13 +2457,10 @@ void intel_lr_context_free(struct intel_context *ctx)
|
|
ctx->engine[i].ringbuf;
|
|
ctx->engine[i].ringbuf;
|
|
struct intel_engine_cs *ring = ringbuf->ring;
|
|
struct intel_engine_cs *ring = ringbuf->ring;
|
|
|
|
|
|
- if (ctx == ring->default_context) {
|
|
|
|
- intel_unpin_ringbuffer_obj(ringbuf);
|
|
|
|
- i915_gem_object_ggtt_unpin(ctx_obj);
|
|
|
|
- }
|
|
|
|
- WARN_ON(ctx->engine[ring->id].pin_count);
|
|
|
|
- intel_ringbuffer_free(ringbuf);
|
|
|
|
- drm_gem_object_unreference(&ctx_obj->base);
|
|
|
|
|
|
+ intel_lr_context_clean_ring(ctx,
|
|
|
|
+ ring,
|
|
|
|
+ ctx_obj,
|
|
|
|
+ ringbuf);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -2539,5 +2622,12 @@ void intel_lr_context_reset(struct drm_device *dev,
|
|
|
|
|
|
ringbuf->head = 0;
|
|
ringbuf->head = 0;
|
|
ringbuf->tail = 0;
|
|
ringbuf->tail = 0;
|
|
|
|
+
|
|
|
|
+ if (ctx->engine[ring->id].dirty) {
|
|
|
|
+ __intel_lr_context_unpin(
|
|
|
|
+ ring,
|
|
|
|
+ ctx);
|
|
|
|
+ ctx->engine[ring->id].dirty = false;
|
|
|
|
+ }
|
|
}
|
|
}
|
|
}
|
|
}
|