|
@@ -2552,14 +2552,25 @@ void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
|
|
|
{
|
|
|
struct drm_i915_private *dev_priv = to_i915(engine->dev);
|
|
|
|
|
|
+ /* Our semaphore implementation is strictly monotonic (i.e. we proceed
|
|
|
+ * so long as the semaphore value in the register/page is greater
|
|
|
+ * than the sync value), so whenever we reset the seqno,
|
|
|
+ * so long as we reset the tracking semaphore value to 0, it will
|
|
|
+ * always be before the next request's seqno. If we don't reset
|
|
|
+ * the semaphore value, then when the seqno moves backwards all
|
|
|
+ * future waits will complete instantly (causing rendering corruption).
|
|
|
+ */
|
|
|
if (INTEL_INFO(dev_priv)->gen == 6 || INTEL_INFO(dev_priv)->gen == 7) {
|
|
|
I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
|
|
|
I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
|
|
|
if (HAS_VEBOX(dev_priv))
|
|
|
I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
|
|
|
}
|
|
|
+ memset(engine->semaphore.sync_seqno, 0,
|
|
|
+ sizeof(engine->semaphore.sync_seqno));
|
|
|
|
|
|
engine->set_seqno(engine, seqno);
|
|
|
+
|
|
|
engine->hangcheck.seqno = seqno;
|
|
|
}
|
|
|
|