|
@@ -599,10 +599,62 @@ out:
|
|
|
static void reset_ring_common(struct intel_engine_cs *engine,
|
|
|
struct drm_i915_gem_request *request)
|
|
|
{
|
|
|
- struct intel_ring *ring = request->ring;
|
|
|
+ /* Try to restore the logical GPU state to match the continuation
|
|
|
+ * of the request queue. If we skip the context/PD restore, then
|
|
|
+ * the next request may try to execute assuming that its context
|
|
|
+ * is valid and loaded on the GPU and so may try to access invalid
|
|
|
+ * memory, prompting repeated GPU hangs.
|
|
|
+ *
|
|
|
+ * If the request was guilty, we still restore the logical state
|
|
|
+ * in case the next request requires it (e.g. the aliasing ppgtt),
|
|
|
+ * but skip over the hung batch.
|
|
|
+ *
|
|
|
+ * If the request was innocent, we try to replay the request with
|
|
|
+ * the restored context.
|
|
|
+ */
|
|
|
+ if (request) {
|
|
|
+ struct drm_i915_private *dev_priv = request->i915;
|
|
|
+ struct intel_context *ce = &request->ctx->engine[engine->id];
|
|
|
+ struct i915_hw_ppgtt *ppgtt;
|
|
|
+
|
|
|
+ /* FIXME consider gen8 reset */
|
|
|
+
|
|
|
+ if (ce->state) {
|
|
|
+ I915_WRITE(CCID,
|
|
|
+ i915_ggtt_offset(ce->state) |
|
|
|
+ BIT(8) /* must be set! */ |
|
|
|
+ CCID_EXTENDED_STATE_SAVE |
|
|
|
+ CCID_EXTENDED_STATE_RESTORE |
|
|
|
+ CCID_EN);
|
|
|
+ }
|
|
|
|
|
|
- ring->head = request->postfix;
|
|
|
- ring->last_retired_head = -1;
|
|
|
+ ppgtt = request->ctx->ppgtt ?: engine->i915->mm.aliasing_ppgtt;
|
|
|
+ if (ppgtt) {
|
|
|
+ u32 pd_offset = ppgtt->pd.base.ggtt_offset << 10;
|
|
|
+
|
|
|
+ I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
|
|
|
+ I915_WRITE(RING_PP_DIR_BASE(engine), pd_offset);
|
|
|
+
|
|
|
+ /* Wait for the PD reload to complete */
|
|
|
+ if (intel_wait_for_register(dev_priv,
|
|
|
+ RING_PP_DIR_BASE(engine),
|
|
|
+ BIT(0), 0,
|
|
|
+ 10))
|
|
|
+ DRM_ERROR("Wait for reload of ppgtt page-directory timed out\n");
|
|
|
+
|
|
|
+ ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
|
|
|
+ }
|
|
|
+
|
|
|
+ /* If the rq hung, jump to its breadcrumb and skip the batch */
|
|
|
+ if (request->fence.error == -EIO) {
|
|
|
+ struct intel_ring *ring = request->ring;
|
|
|
+
|
|
|
+ ring->head = request->postfix;
|
|
|
+ ring->last_retired_head = -1;
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ engine->legacy_active_context = NULL;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
|