|
@@ -35,9 +35,9 @@
|
|
|
#include "i915_drv.h"
|
|
|
#include "i915_trace.h"
|
|
|
|
|
|
-static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
|
|
|
+static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock)
|
|
|
{
|
|
|
- switch (mutex_trylock_recursive(&dev->struct_mutex)) {
|
|
|
+ switch (mutex_trylock_recursive(&dev_priv->drm.struct_mutex)) {
|
|
|
case MUTEX_TRYLOCK_FAILED:
|
|
|
return false;
|
|
|
|
|
@@ -53,12 +53,12 @@ static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
|
|
|
BUG();
|
|
|
}
|
|
|
|
|
|
-static void i915_gem_shrinker_unlock(struct drm_device *dev, bool unlock)
|
|
|
+static void shrinker_unlock(struct drm_i915_private *dev_priv, bool unlock)
|
|
|
{
|
|
|
if (!unlock)
|
|
|
return;
|
|
|
|
|
|
- mutex_unlock(&dev->struct_mutex);
|
|
|
+ mutex_unlock(&dev_priv->drm.struct_mutex);
|
|
|
|
|
|
/* expedite the RCU grace period to free some request slabs */
|
|
|
synchronize_rcu_expedited();
|
|
@@ -156,7 +156,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
|
|
|
unsigned long count = 0;
|
|
|
bool unlock;
|
|
|
|
|
|
- if (!i915_gem_shrinker_lock(&dev_priv->drm, &unlock))
|
|
|
+ if (!shrinker_lock(dev_priv, &unlock))
|
|
|
return 0;
|
|
|
|
|
|
trace_i915_gem_shrink(dev_priv, target, flags);
|
|
@@ -244,7 +244,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
|
|
|
|
|
|
i915_gem_retire_requests(dev_priv);
|
|
|
|
|
|
- i915_gem_shrinker_unlock(&dev_priv->drm, unlock);
|
|
|
+ shrinker_unlock(dev_priv, unlock);
|
|
|
|
|
|
return count;
|
|
|
}
|
|
@@ -284,12 +284,11 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
|
|
|
{
|
|
|
struct drm_i915_private *dev_priv =
|
|
|
container_of(shrinker, struct drm_i915_private, mm.shrinker);
|
|
|
- struct drm_device *dev = &dev_priv->drm;
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
unsigned long count;
|
|
|
bool unlock;
|
|
|
|
|
|
- if (!i915_gem_shrinker_lock(dev, &unlock))
|
|
|
+ if (!shrinker_lock(dev_priv, &unlock))
|
|
|
return 0;
|
|
|
|
|
|
i915_gem_retire_requests(dev_priv);
|
|
@@ -304,7 +303,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
|
|
|
count += obj->base.size >> PAGE_SHIFT;
|
|
|
}
|
|
|
|
|
|
- i915_gem_shrinker_unlock(dev, unlock);
|
|
|
+ shrinker_unlock(dev_priv, unlock);
|
|
|
|
|
|
return count;
|
|
|
}
|
|
@@ -314,11 +313,10 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
|
|
|
{
|
|
|
struct drm_i915_private *dev_priv =
|
|
|
container_of(shrinker, struct drm_i915_private, mm.shrinker);
|
|
|
- struct drm_device *dev = &dev_priv->drm;
|
|
|
unsigned long freed;
|
|
|
bool unlock;
|
|
|
|
|
|
- if (!i915_gem_shrinker_lock(dev, &unlock))
|
|
|
+ if (!shrinker_lock(dev_priv, &unlock))
|
|
|
return SHRINK_STOP;
|
|
|
|
|
|
freed = i915_gem_shrink(dev_priv,
|
|
@@ -332,26 +330,20 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
|
|
|
I915_SHRINK_BOUND |
|
|
|
I915_SHRINK_UNBOUND);
|
|
|
|
|
|
- i915_gem_shrinker_unlock(dev, unlock);
|
|
|
+ shrinker_unlock(dev_priv, unlock);
|
|
|
|
|
|
return freed;
|
|
|
}
|
|
|
|
|
|
-struct shrinker_lock_uninterruptible {
|
|
|
- bool was_interruptible;
|
|
|
- bool unlock;
|
|
|
-};
|
|
|
-
|
|
|
static bool
|
|
|
-i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv,
|
|
|
- struct shrinker_lock_uninterruptible *slu,
|
|
|
- int timeout_ms)
|
|
|
+shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv, bool *unlock,
|
|
|
+ int timeout_ms)
|
|
|
{
|
|
|
unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms);
|
|
|
|
|
|
do {
|
|
|
if (i915_gem_wait_for_idle(dev_priv, 0) == 0 &&
|
|
|
- i915_gem_shrinker_lock(&dev_priv->drm, &slu->unlock))
|
|
|
+ shrinker_lock(dev_priv, unlock))
|
|
|
break;
|
|
|
|
|
|
schedule_timeout_killable(1);
|
|
@@ -364,29 +356,19 @@ i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv,
|
|
|
}
|
|
|
} while (1);
|
|
|
|
|
|
- slu->was_interruptible = dev_priv->mm.interruptible;
|
|
|
- dev_priv->mm.interruptible = false;
|
|
|
return true;
|
|
|
}
|
|
|
|
|
|
-static void
|
|
|
-i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private *dev_priv,
|
|
|
- struct shrinker_lock_uninterruptible *slu)
|
|
|
-{
|
|
|
- dev_priv->mm.interruptible = slu->was_interruptible;
|
|
|
- i915_gem_shrinker_unlock(&dev_priv->drm, slu->unlock);
|
|
|
-}
|
|
|
-
|
|
|
static int
|
|
|
i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
|
|
|
{
|
|
|
struct drm_i915_private *dev_priv =
|
|
|
container_of(nb, struct drm_i915_private, mm.oom_notifier);
|
|
|
- struct shrinker_lock_uninterruptible slu;
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
unsigned long unevictable, bound, unbound, freed_pages;
|
|
|
+ bool unlock;
|
|
|
|
|
|
- if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
|
|
|
+ if (!shrinker_lock_uninterruptible(dev_priv, &unlock, 5000))
|
|
|
return NOTIFY_DONE;
|
|
|
|
|
|
freed_pages = i915_gem_shrink_all(dev_priv);
|
|
@@ -415,7 +397,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
|
|
|
bound += obj->base.size >> PAGE_SHIFT;
|
|
|
}
|
|
|
|
|
|
- i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu);
|
|
|
+ shrinker_unlock(dev_priv, unlock);
|
|
|
|
|
|
if (freed_pages || unbound || bound)
|
|
|
pr_info("Purging GPU memory, %lu pages freed, "
|
|
@@ -435,12 +417,12 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
|
|
|
{
|
|
|
struct drm_i915_private *dev_priv =
|
|
|
container_of(nb, struct drm_i915_private, mm.vmap_notifier);
|
|
|
- struct shrinker_lock_uninterruptible slu;
|
|
|
struct i915_vma *vma, *next;
|
|
|
unsigned long freed_pages = 0;
|
|
|
+ bool unlock;
|
|
|
int ret;
|
|
|
|
|
|
- if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
|
|
|
+ if (!shrinker_lock_uninterruptible(dev_priv, &unlock, 5000))
|
|
|
return NOTIFY_DONE;
|
|
|
|
|
|
/* Force everything onto the inactive lists */
|
|
@@ -465,7 +447,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
|
|
|
}
|
|
|
|
|
|
out:
|
|
|
- i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu);
|
|
|
+ shrinker_unlock(dev_priv, unlock);
|
|
|
|
|
|
*(unsigned long *)ptr += freed_pages;
|
|
|
return NOTIFY_DONE;
|