|
@@ -2599,6 +2599,46 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+struct wedge_me {
|
|
|
|
+ struct delayed_work work;
|
|
|
|
+ struct drm_i915_private *i915;
|
|
|
|
+ const char *name;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+static void wedge_me(struct work_struct *work)
|
|
|
|
+{
|
|
|
|
+ struct wedge_me *w = container_of(work, typeof(*w), work.work);
|
|
|
|
+
|
|
|
|
+ dev_err(w->i915->drm.dev,
|
|
|
|
+ "%s timed out, cancelling all in-flight rendering.\n",
|
|
|
|
+ w->name);
|
|
|
|
+ i915_gem_set_wedged(w->i915);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void __init_wedge(struct wedge_me *w,
|
|
|
|
+ struct drm_i915_private *i915,
|
|
|
|
+ long timeout,
|
|
|
|
+ const char *name)
|
|
|
|
+{
|
|
|
|
+ w->i915 = i915;
|
|
|
|
+ w->name = name;
|
|
|
|
+
|
|
|
|
+ INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me);
|
|
|
|
+ schedule_delayed_work(&w->work, timeout);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void __fini_wedge(struct wedge_me *w)
|
|
|
|
+{
|
|
|
|
+ cancel_delayed_work_sync(&w->work);
|
|
|
|
+ destroy_delayed_work_on_stack(&w->work);
|
|
|
|
+ w->i915 = NULL;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+#define i915_wedge_on_timeout(W, DEV, TIMEOUT) \
|
|
|
|
+ for (__init_wedge((W), (DEV), (TIMEOUT), __func__); \
|
|
|
|
+ (W)->i915; \
|
|
|
|
+ __fini_wedge((W)))
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* i915_reset_device - do process context error handling work
|
|
* i915_reset_device - do process context error handling work
|
|
* @dev_priv: i915 device private
|
|
* @dev_priv: i915 device private
|
|
@@ -2612,36 +2652,36 @@ static void i915_reset_device(struct drm_i915_private *dev_priv)
|
|
char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
|
|
char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
|
|
char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
|
|
char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
|
|
char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
|
|
char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
|
|
|
|
+ struct wedge_me w;
|
|
|
|
|
|
kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
|
|
kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
|
|
|
|
|
|
DRM_DEBUG_DRIVER("resetting chip\n");
|
|
DRM_DEBUG_DRIVER("resetting chip\n");
|
|
kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
|
|
kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
|
|
|
|
|
|
- intel_prepare_reset(dev_priv);
|
|
|
|
|
|
+ /* Use a watchdog to ensure that our reset completes */
|
|
|
|
+ i915_wedge_on_timeout(&w, dev_priv, 5*HZ) {
|
|
|
|
+ intel_prepare_reset(dev_priv);
|
|
|
|
|
|
- set_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags);
|
|
|
|
- wake_up_all(&dev_priv->gpu_error.wait_queue);
|
|
|
|
|
|
+ /* Signal that locked waiters should reset the GPU */
|
|
|
|
+ set_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags);
|
|
|
|
+ wake_up_all(&dev_priv->gpu_error.wait_queue);
|
|
|
|
|
|
- do {
|
|
|
|
- /*
|
|
|
|
- * All state reset _must_ be completed before we update the
|
|
|
|
- * reset counter, for otherwise waiters might miss the reset
|
|
|
|
- * pending state and not properly drop locks, resulting in
|
|
|
|
- * deadlocks with the reset work.
|
|
|
|
|
|
+ /* Wait for anyone holding the lock to wakeup, without
|
|
|
|
+ * blocking indefinitely on struct_mutex.
|
|
*/
|
|
*/
|
|
- if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
|
|
|
|
- i915_reset(dev_priv);
|
|
|
|
- mutex_unlock(&dev_priv->drm.struct_mutex);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- /* We need to wait for anyone holding the lock to wakeup */
|
|
|
|
- } while (wait_on_bit_timeout(&dev_priv->gpu_error.flags,
|
|
|
|
- I915_RESET_HANDOFF,
|
|
|
|
- TASK_UNINTERRUPTIBLE,
|
|
|
|
- HZ));
|
|
|
|
|
|
+ do {
|
|
|
|
+ if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
|
|
|
|
+ i915_reset(dev_priv);
|
|
|
|
+ mutex_unlock(&dev_priv->drm.struct_mutex);
|
|
|
|
+ }
|
|
|
|
+ } while (wait_on_bit_timeout(&dev_priv->gpu_error.flags,
|
|
|
|
+ I915_RESET_HANDOFF,
|
|
|
|
+ TASK_UNINTERRUPTIBLE,
|
|
|
|
+ 1));
|
|
|
|
|
|
- intel_finish_reset(dev_priv);
|
|
|
|
|
|
+ intel_finish_reset(dev_priv);
|
|
|
|
+ }
|
|
|
|
|
|
if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
|
|
if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
|
|
kobject_uevent_env(kobj,
|
|
kobject_uevent_env(kobj,
|