|
@@ -390,14 +390,7 @@ static int execlists_update_context(struct drm_i915_gem_request *rq)
|
|
|
{
|
|
|
struct intel_engine_cs *ring = rq->ring;
|
|
|
struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
|
|
|
- struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
|
|
|
- struct page *page;
|
|
|
- uint32_t *reg_state;
|
|
|
-
|
|
|
- BUG_ON(!ctx_obj);
|
|
|
-
|
|
|
- page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
|
|
|
- reg_state = kmap_atomic(page);
|
|
|
+ uint32_t *reg_state = rq->ctx->engine[ring->id].lrc_reg_state;
|
|
|
|
|
|
reg_state[CTX_RING_TAIL+1] = rq->tail;
|
|
|
reg_state[CTX_RING_BUFFER_START+1] = rq->ringbuf->vma->node.start;
|
|
@@ -414,8 +407,6 @@ static int execlists_update_context(struct drm_i915_gem_request *rq)
|
|
|
ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
|
|
|
}
|
|
|
|
|
|
- kunmap_atomic(reg_state);
|
|
|
-
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -1067,6 +1058,7 @@ static int intel_lr_context_do_pin(struct intel_engine_cs *ring,
|
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
|
|
|
struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
|
|
|
+ struct page *lrc_state_page;
|
|
|
int ret;
|
|
|
|
|
|
WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
|
|
@@ -1076,12 +1068,19 @@ static int intel_lr_context_do_pin(struct intel_engine_cs *ring,
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
|
|
|
+ lrc_state_page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
|
|
|
+ if (WARN_ON(!lrc_state_page)) {
|
|
|
+ ret = -ENODEV;
|
|
|
+ goto unpin_ctx_obj;
|
|
|
+ }
|
|
|
+
|
|
|
ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
|
|
|
if (ret)
|
|
|
goto unpin_ctx_obj;
|
|
|
|
|
|
ctx->engine[ring->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
|
|
|
intel_lr_context_descriptor_update(ctx, ring);
|
|
|
+ ctx->engine[ring->id].lrc_reg_state = kmap(lrc_state_page);
|
|
|
ctx_obj->dirty = true;
|
|
|
|
|
|
/* Invalidate GuC TLB. */
|
|
@@ -1119,14 +1118,18 @@ void intel_lr_context_unpin(struct drm_i915_gem_request *rq)
|
|
|
struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
|
|
|
struct intel_ringbuffer *ringbuf = rq->ringbuf;
|
|
|
|
|
|
- if (ctx_obj) {
|
|
|
- WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
|
|
|
- if (--rq->ctx->engine[ring->id].pin_count == 0) {
|
|
|
- intel_unpin_ringbuffer_obj(ringbuf);
|
|
|
- i915_gem_object_ggtt_unpin(ctx_obj);
|
|
|
- rq->ctx->engine[ring->id].lrc_vma = NULL;
|
|
|
- rq->ctx->engine[ring->id].lrc_desc = 0;
|
|
|
- }
|
|
|
+ WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
|
|
|
+
|
|
|
+ if (!ctx_obj)
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (--rq->ctx->engine[ring->id].pin_count == 0) {
|
|
|
+ kunmap(kmap_to_page(rq->ctx->engine[ring->id].lrc_reg_state));
|
|
|
+ intel_unpin_ringbuffer_obj(ringbuf);
|
|
|
+ i915_gem_object_ggtt_unpin(ctx_obj);
|
|
|
+ rq->ctx->engine[ring->id].lrc_vma = NULL;
|
|
|
+ rq->ctx->engine[ring->id].lrc_desc = 0;
|
|
|
+ rq->ctx->engine[ring->id].lrc_reg_state = NULL;
|
|
|
}
|
|
|
}
|
|
|
|