|
@@ -2974,7 +2974,7 @@ ring_stuck(struct intel_engine_cs *ring, u64 acthd)
|
|
return HANGCHECK_HUNG;
|
|
return HANGCHECK_HUNG;
|
|
}
|
|
}
|
|
|
|
|
|
-/**
|
|
|
|
|
|
+/*
|
|
* This is called when the chip hasn't reported back with completed
|
|
* This is called when the chip hasn't reported back with completed
|
|
* batchbuffers in a long time. We keep track per ring seqno progress and
|
|
* batchbuffers in a long time. We keep track per ring seqno progress and
|
|
* if there are no progress, hangcheck score for that ring is increased.
|
|
* if there are no progress, hangcheck score for that ring is increased.
|
|
@@ -2982,10 +2982,12 @@ ring_stuck(struct intel_engine_cs *ring, u64 acthd)
|
|
* we kick the ring. If we see no progress on three subsequent calls
|
|
* we kick the ring. If we see no progress on three subsequent calls
|
|
* we assume chip is wedged and try to fix it by resetting the chip.
|
|
* we assume chip is wedged and try to fix it by resetting the chip.
|
|
*/
|
|
*/
|
|
-static void i915_hangcheck_elapsed(unsigned long data)
|
|
|
|
|
|
+static void i915_hangcheck_elapsed(struct work_struct *work)
|
|
{
|
|
{
|
|
- struct drm_device *dev = (struct drm_device *)data;
|
|
|
|
- struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
|
|
|
+ struct drm_i915_private *dev_priv =
|
|
|
|
+ container_of(work, typeof(*dev_priv),
|
|
|
|
+ gpu_error.hangcheck_work.work);
|
|
|
|
+ struct drm_device *dev = dev_priv->dev;
|
|
struct intel_engine_cs *ring;
|
|
struct intel_engine_cs *ring;
|
|
int i;
|
|
int i;
|
|
int busy_count = 0, rings_hung = 0;
|
|
int busy_count = 0, rings_hung = 0;
|
|
@@ -3099,17 +3101,18 @@ static void i915_hangcheck_elapsed(unsigned long data)
|
|
|
|
|
|
void i915_queue_hangcheck(struct drm_device *dev)
|
|
void i915_queue_hangcheck(struct drm_device *dev)
|
|
{
|
|
{
|
|
- struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
|
- struct timer_list *timer = &dev_priv->gpu_error.hangcheck_timer;
|
|
|
|
|
|
+ struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
|
|
|
|
|
|
if (!i915.enable_hangcheck)
|
|
if (!i915.enable_hangcheck)
|
|
return;
|
|
return;
|
|
|
|
|
|
- /* Don't continually defer the hangcheck, but make sure it is active */
|
|
|
|
- if (timer_pending(timer))
|
|
|
|
- return;
|
|
|
|
- mod_timer(timer,
|
|
|
|
- round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
|
|
|
|
|
|
+ /* Don't continually defer the hangcheck so that it is always run at
|
|
|
|
+ * least once after work has been scheduled on any ring. Otherwise,
|
|
|
|
+ * we will ignore a hung ring if a second ring is kept busy.
|
|
|
|
+ */
|
|
|
|
+
|
|
|
|
+ queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
|
|
|
|
+ round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
|
|
}
|
|
}
|
|
|
|
|
|
static void ibx_irq_reset(struct drm_device *dev)
|
|
static void ibx_irq_reset(struct drm_device *dev)
|
|
@@ -4353,9 +4356,8 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
|
|
else
|
|
else
|
|
dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
|
|
dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
|
|
|
|
|
|
- setup_timer(&dev_priv->gpu_error.hangcheck_timer,
|
|
|
|
- i915_hangcheck_elapsed,
|
|
|
|
- (unsigned long) dev);
|
|
|
|
|
|
+ INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
|
|
|
|
+ i915_hangcheck_elapsed);
|
|
INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
|
|
INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
|
|
intel_hpd_irq_reenable_work);
|
|
intel_hpd_irq_reenable_work);
|
|
|
|
|