|
@@ -139,6 +139,8 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
|
|
|
|
|
|
static u32 __i915_gem_park(struct drm_i915_private *i915)
|
|
|
{
|
|
|
+ GEM_TRACE("\n");
|
|
|
+
|
|
|
lockdep_assert_held(&i915->drm.struct_mutex);
|
|
|
GEM_BUG_ON(i915->gt.active_requests);
|
|
|
GEM_BUG_ON(!list_empty(&i915->gt.active_rings));
|
|
@@ -181,6 +183,8 @@ static u32 __i915_gem_park(struct drm_i915_private *i915)
|
|
|
|
|
|
void i915_gem_park(struct drm_i915_private *i915)
|
|
|
{
|
|
|
+ GEM_TRACE("\n");
|
|
|
+
|
|
|
lockdep_assert_held(&i915->drm.struct_mutex);
|
|
|
GEM_BUG_ON(i915->gt.active_requests);
|
|
|
|
|
@@ -193,6 +197,8 @@ void i915_gem_park(struct drm_i915_private *i915)
|
|
|
|
|
|
void i915_gem_unpark(struct drm_i915_private *i915)
|
|
|
{
|
|
|
+ GEM_TRACE("\n");
|
|
|
+
|
|
|
lockdep_assert_held(&i915->drm.struct_mutex);
|
|
|
GEM_BUG_ON(!i915->gt.active_requests);
|
|
|
|
|
@@ -3503,6 +3509,24 @@ i915_gem_idle_work_handler(struct work_struct *work)
|
|
|
if (!READ_ONCE(dev_priv->gt.awake))
|
|
|
return;
|
|
|
|
|
|
+ if (READ_ONCE(dev_priv->gt.active_requests))
|
|
|
+ return;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Flush out the last user context, leaving only the pinned
|
|
|
+ * kernel context resident. When we are idling on the kernel_context,
|
|
|
+ * no more new requests (with a context switch) are emitted and we
|
|
|
+ * can finally rest. A consequence is that the idle work handler is
|
|
|
+ * always called at least twice before idling (and if the system is
|
|
|
+ * idle that implies a round trip through the retire worker).
|
|
|
+ */
|
|
|
+ mutex_lock(&dev_priv->drm.struct_mutex);
|
|
|
+ i915_gem_switch_to_kernel_context(dev_priv);
|
|
|
+ mutex_unlock(&dev_priv->drm.struct_mutex);
|
|
|
+
|
|
|
+ GEM_TRACE("active_requests=%d (after switch-to-kernel-context)\n",
|
|
|
+ READ_ONCE(dev_priv->gt.active_requests));
|
|
|
+
|
|
|
/*
|
|
|
* Wait for last execlists context complete, but bail out in case a
|
|
|
* new request is submitted. As we don't trust the hardware, we
|
|
@@ -4913,11 +4937,9 @@ static void assert_kernel_context_is_current(struct drm_i915_private *i915)
|
|
|
|
|
|
void i915_gem_sanitize(struct drm_i915_private *i915)
|
|
|
{
|
|
|
- if (i915_terminally_wedged(&i915->gpu_error)) {
|
|
|
- mutex_lock(&i915->drm.struct_mutex);
|
|
|
+ mutex_lock(&i915->drm.struct_mutex);
|
|
|
+ if (i915_terminally_wedged(&i915->gpu_error))
|
|
|
i915_gem_unset_wedged(i915);
|
|
|
- mutex_unlock(&i915->drm.struct_mutex);
|
|
|
- }
|
|
|
|
|
|
/*
|
|
|
* If we inherit context state from the BIOS or earlier occupants
|
|
@@ -4929,6 +4951,9 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
|
|
|
*/
|
|
|
if (INTEL_GEN(i915) >= 5 && intel_has_gpu_reset(i915))
|
|
|
WARN_ON(intel_gpu_reset(i915, ALL_ENGINES));
|
|
|
+
|
|
|
+ i915_gem_contexts_lost(i915);
|
|
|
+ mutex_unlock(&i915->drm.struct_mutex);
|
|
|
}
|
|
|
|
|
|
int i915_gem_suspend(struct drm_i915_private *dev_priv)
|
|
@@ -4964,7 +4989,6 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
|
|
|
|
|
|
assert_kernel_context_is_current(dev_priv);
|
|
|
}
|
|
|
- i915_gem_contexts_lost(dev_priv);
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
|
|
intel_uc_suspend(dev_priv);
|
|
@@ -5017,6 +5041,8 @@ err_unlock:
|
|
|
|
|
|
void i915_gem_resume(struct drm_i915_private *i915)
|
|
|
{
|
|
|
+ GEM_TRACE("\n");
|
|
|
+
|
|
|
WARN_ON(i915->gt.awake);
|
|
|
|
|
|
mutex_lock(&i915->drm.struct_mutex);
|