|
@@ -35,9 +35,9 @@
|
|
#include "i915_drv.h"
|
|
#include "i915_drv.h"
|
|
#include "i915_trace.h"
|
|
#include "i915_trace.h"
|
|
|
|
|
|
-static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock)
|
|
|
|
|
|
+static bool shrinker_lock(struct drm_i915_private *i915, bool *unlock)
|
|
{
|
|
{
|
|
- switch (mutex_trylock_recursive(&dev_priv->drm.struct_mutex)) {
|
|
|
|
|
|
+ switch (mutex_trylock_recursive(&i915->drm.struct_mutex)) {
|
|
case MUTEX_TRYLOCK_RECURSIVE:
|
|
case MUTEX_TRYLOCK_RECURSIVE:
|
|
*unlock = false;
|
|
*unlock = false;
|
|
return true;
|
|
return true;
|
|
@@ -47,7 +47,7 @@ static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock)
|
|
preempt_disable();
|
|
preempt_disable();
|
|
do {
|
|
do {
|
|
cpu_relax();
|
|
cpu_relax();
|
|
- if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
|
|
|
|
|
|
+ if (mutex_trylock(&i915->drm.struct_mutex)) {
|
|
*unlock = true;
|
|
*unlock = true;
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
@@ -63,12 +63,12 @@ static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock)
|
|
BUG();
|
|
BUG();
|
|
}
|
|
}
|
|
|
|
|
|
-static void shrinker_unlock(struct drm_i915_private *dev_priv, bool unlock)
|
|
|
|
|
|
+static void shrinker_unlock(struct drm_i915_private *i915, bool unlock)
|
|
{
|
|
{
|
|
if (!unlock)
|
|
if (!unlock)
|
|
return;
|
|
return;
|
|
|
|
|
|
- mutex_unlock(&dev_priv->drm.struct_mutex);
|
|
|
|
|
|
+ mutex_unlock(&i915->drm.struct_mutex);
|
|
}
|
|
}
|
|
|
|
|
|
static bool swap_available(void)
|
|
static bool swap_available(void)
|
|
@@ -118,7 +118,7 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
|
|
|
|
|
|
/**
|
|
/**
|
|
* i915_gem_shrink - Shrink buffer object caches
|
|
* i915_gem_shrink - Shrink buffer object caches
|
|
- * @dev_priv: i915 device
|
|
|
|
|
|
+ * @i915: i915 device
|
|
* @target: amount of memory to make available, in pages
|
|
* @target: amount of memory to make available, in pages
|
|
* @nr_scanned: optional output for number of pages scanned (incremental)
|
|
* @nr_scanned: optional output for number of pages scanned (incremental)
|
|
* @flags: control flags for selecting cache types
|
|
* @flags: control flags for selecting cache types
|
|
@@ -142,7 +142,7 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
|
|
* The number of pages of backing storage actually released.
|
|
* The number of pages of backing storage actually released.
|
|
*/
|
|
*/
|
|
unsigned long
|
|
unsigned long
|
|
-i915_gem_shrink(struct drm_i915_private *dev_priv,
|
|
|
|
|
|
+i915_gem_shrink(struct drm_i915_private *i915,
|
|
unsigned long target,
|
|
unsigned long target,
|
|
unsigned long *nr_scanned,
|
|
unsigned long *nr_scanned,
|
|
unsigned flags)
|
|
unsigned flags)
|
|
@@ -151,15 +151,15 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
|
|
struct list_head *list;
|
|
struct list_head *list;
|
|
unsigned int bit;
|
|
unsigned int bit;
|
|
} phases[] = {
|
|
} phases[] = {
|
|
- { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
|
|
|
|
- { &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
|
|
|
|
|
|
+ { &i915->mm.unbound_list, I915_SHRINK_UNBOUND },
|
|
|
|
+ { &i915->mm.bound_list, I915_SHRINK_BOUND },
|
|
{ NULL, 0 },
|
|
{ NULL, 0 },
|
|
}, *phase;
|
|
}, *phase;
|
|
unsigned long count = 0;
|
|
unsigned long count = 0;
|
|
unsigned long scanned = 0;
|
|
unsigned long scanned = 0;
|
|
bool unlock;
|
|
bool unlock;
|
|
|
|
|
|
- if (!shrinker_lock(dev_priv, &unlock))
|
|
|
|
|
|
+ if (!shrinker_lock(i915, &unlock))
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -172,10 +172,10 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
|
|
* we will free as much as we can and hope to get a second chance.
|
|
* we will free as much as we can and hope to get a second chance.
|
|
*/
|
|
*/
|
|
if (flags & I915_SHRINK_ACTIVE)
|
|
if (flags & I915_SHRINK_ACTIVE)
|
|
- i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);
|
|
|
|
|
|
+ i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
|
|
|
|
|
|
- trace_i915_gem_shrink(dev_priv, target, flags);
|
|
|
|
- i915_gem_retire_requests(dev_priv);
|
|
|
|
|
|
+ trace_i915_gem_shrink(i915, target, flags);
|
|
|
|
+ i915_gem_retire_requests(i915);
|
|
|
|
|
|
/*
|
|
/*
|
|
* Unbinding of objects will require HW access; Let us not wake the
|
|
* Unbinding of objects will require HW access; Let us not wake the
|
|
@@ -183,7 +183,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
|
|
* we will force the wake during oom-notifier.
|
|
* we will force the wake during oom-notifier.
|
|
*/
|
|
*/
|
|
if ((flags & I915_SHRINK_BOUND) &&
|
|
if ((flags & I915_SHRINK_BOUND) &&
|
|
- !intel_runtime_pm_get_if_in_use(dev_priv))
|
|
|
|
|
|
+ !intel_runtime_pm_get_if_in_use(i915))
|
|
flags &= ~I915_SHRINK_BOUND;
|
|
flags &= ~I915_SHRINK_BOUND;
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -221,7 +221,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
|
|
* to be able to shrink their pages, so they remain on
|
|
* to be able to shrink their pages, so they remain on
|
|
* the unbound/bound list until actually freed.
|
|
* the unbound/bound list until actually freed.
|
|
*/
|
|
*/
|
|
- spin_lock(&dev_priv->mm.obj_lock);
|
|
|
|
|
|
+ spin_lock(&i915->mm.obj_lock);
|
|
while (count < target &&
|
|
while (count < target &&
|
|
(obj = list_first_entry_or_null(phase->list,
|
|
(obj = list_first_entry_or_null(phase->list,
|
|
typeof(*obj),
|
|
typeof(*obj),
|
|
@@ -244,7 +244,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
|
|
if (!can_release_pages(obj))
|
|
if (!can_release_pages(obj))
|
|
continue;
|
|
continue;
|
|
|
|
|
|
- spin_unlock(&dev_priv->mm.obj_lock);
|
|
|
|
|
|
+ spin_unlock(&i915->mm.obj_lock);
|
|
|
|
|
|
if (unsafe_drop_pages(obj)) {
|
|
if (unsafe_drop_pages(obj)) {
|
|
/* May arrive from get_pages on another bo */
|
|
/* May arrive from get_pages on another bo */
|
|
@@ -258,18 +258,18 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
|
|
}
|
|
}
|
|
scanned += obj->base.size >> PAGE_SHIFT;
|
|
scanned += obj->base.size >> PAGE_SHIFT;
|
|
|
|
|
|
- spin_lock(&dev_priv->mm.obj_lock);
|
|
|
|
|
|
+ spin_lock(&i915->mm.obj_lock);
|
|
}
|
|
}
|
|
list_splice_tail(&still_in_list, phase->list);
|
|
list_splice_tail(&still_in_list, phase->list);
|
|
- spin_unlock(&dev_priv->mm.obj_lock);
|
|
|
|
|
|
+ spin_unlock(&i915->mm.obj_lock);
|
|
}
|
|
}
|
|
|
|
|
|
if (flags & I915_SHRINK_BOUND)
|
|
if (flags & I915_SHRINK_BOUND)
|
|
- intel_runtime_pm_put(dev_priv);
|
|
|
|
|
|
+ intel_runtime_pm_put(i915);
|
|
|
|
|
|
- i915_gem_retire_requests(dev_priv);
|
|
|
|
|
|
+ i915_gem_retire_requests(i915);
|
|
|
|
|
|
- shrinker_unlock(dev_priv, unlock);
|
|
|
|
|
|
+ shrinker_unlock(i915, unlock);
|
|
|
|
|
|
if (nr_scanned)
|
|
if (nr_scanned)
|
|
*nr_scanned += scanned;
|
|
*nr_scanned += scanned;
|
|
@@ -278,7 +278,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
|
|
|
|
|
|
/**
|
|
/**
|
|
* i915_gem_shrink_all - Shrink buffer object caches completely
|
|
* i915_gem_shrink_all - Shrink buffer object caches completely
|
|
- * @dev_priv: i915 device
|
|
|
|
|
|
+ * @i915: i915 device
|
|
*
|
|
*
|
|
* This is a simple wraper around i915_gem_shrink() to aggressively shrink all
|
|
* This is a simple wraper around i915_gem_shrink() to aggressively shrink all
|
|
* caches completely. It also first waits for and retires all outstanding
|
|
* caches completely. It also first waits for and retires all outstanding
|
|
@@ -290,16 +290,16 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
|
|
* Returns:
|
|
* Returns:
|
|
* The number of pages of backing storage actually released.
|
|
* The number of pages of backing storage actually released.
|
|
*/
|
|
*/
|
|
-unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
|
|
|
|
|
|
+unsigned long i915_gem_shrink_all(struct drm_i915_private *i915)
|
|
{
|
|
{
|
|
unsigned long freed;
|
|
unsigned long freed;
|
|
|
|
|
|
- intel_runtime_pm_get(dev_priv);
|
|
|
|
- freed = i915_gem_shrink(dev_priv, -1UL, NULL,
|
|
|
|
|
|
+ intel_runtime_pm_get(i915);
|
|
|
|
+ freed = i915_gem_shrink(i915, -1UL, NULL,
|
|
I915_SHRINK_BOUND |
|
|
I915_SHRINK_BOUND |
|
|
I915_SHRINK_UNBOUND |
|
|
I915_SHRINK_UNBOUND |
|
|
I915_SHRINK_ACTIVE);
|
|
I915_SHRINK_ACTIVE);
|
|
- intel_runtime_pm_put(dev_priv);
|
|
|
|
|
|
+ intel_runtime_pm_put(i915);
|
|
|
|
|
|
return freed;
|
|
return freed;
|
|
}
|
|
}
|
|
@@ -347,53 +347,53 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
|
|
static unsigned long
|
|
static unsigned long
|
|
i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
|
|
i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
|
|
{
|
|
{
|
|
- struct drm_i915_private *dev_priv =
|
|
|
|
|
|
+ struct drm_i915_private *i915 =
|
|
container_of(shrinker, struct drm_i915_private, mm.shrinker);
|
|
container_of(shrinker, struct drm_i915_private, mm.shrinker);
|
|
unsigned long freed;
|
|
unsigned long freed;
|
|
bool unlock;
|
|
bool unlock;
|
|
|
|
|
|
sc->nr_scanned = 0;
|
|
sc->nr_scanned = 0;
|
|
|
|
|
|
- if (!shrinker_lock(dev_priv, &unlock))
|
|
|
|
|
|
+ if (!shrinker_lock(i915, &unlock))
|
|
return SHRINK_STOP;
|
|
return SHRINK_STOP;
|
|
|
|
|
|
- freed = i915_gem_shrink(dev_priv,
|
|
|
|
|
|
+ freed = i915_gem_shrink(i915,
|
|
sc->nr_to_scan,
|
|
sc->nr_to_scan,
|
|
&sc->nr_scanned,
|
|
&sc->nr_scanned,
|
|
I915_SHRINK_BOUND |
|
|
I915_SHRINK_BOUND |
|
|
I915_SHRINK_UNBOUND |
|
|
I915_SHRINK_UNBOUND |
|
|
I915_SHRINK_PURGEABLE);
|
|
I915_SHRINK_PURGEABLE);
|
|
if (freed < sc->nr_to_scan)
|
|
if (freed < sc->nr_to_scan)
|
|
- freed += i915_gem_shrink(dev_priv,
|
|
|
|
|
|
+ freed += i915_gem_shrink(i915,
|
|
sc->nr_to_scan - sc->nr_scanned,
|
|
sc->nr_to_scan - sc->nr_scanned,
|
|
&sc->nr_scanned,
|
|
&sc->nr_scanned,
|
|
I915_SHRINK_BOUND |
|
|
I915_SHRINK_BOUND |
|
|
I915_SHRINK_UNBOUND);
|
|
I915_SHRINK_UNBOUND);
|
|
if (freed < sc->nr_to_scan && current_is_kswapd()) {
|
|
if (freed < sc->nr_to_scan && current_is_kswapd()) {
|
|
- intel_runtime_pm_get(dev_priv);
|
|
|
|
- freed += i915_gem_shrink(dev_priv,
|
|
|
|
|
|
+ intel_runtime_pm_get(i915);
|
|
|
|
+ freed += i915_gem_shrink(i915,
|
|
sc->nr_to_scan - sc->nr_scanned,
|
|
sc->nr_to_scan - sc->nr_scanned,
|
|
&sc->nr_scanned,
|
|
&sc->nr_scanned,
|
|
I915_SHRINK_ACTIVE |
|
|
I915_SHRINK_ACTIVE |
|
|
I915_SHRINK_BOUND |
|
|
I915_SHRINK_BOUND |
|
|
I915_SHRINK_UNBOUND);
|
|
I915_SHRINK_UNBOUND);
|
|
- intel_runtime_pm_put(dev_priv);
|
|
|
|
|
|
+ intel_runtime_pm_put(i915);
|
|
}
|
|
}
|
|
|
|
|
|
- shrinker_unlock(dev_priv, unlock);
|
|
|
|
|
|
+ shrinker_unlock(i915, unlock);
|
|
|
|
|
|
return sc->nr_scanned ? freed : SHRINK_STOP;
|
|
return sc->nr_scanned ? freed : SHRINK_STOP;
|
|
}
|
|
}
|
|
|
|
|
|
static bool
|
|
static bool
|
|
-shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv, bool *unlock,
|
|
|
|
|
|
+shrinker_lock_uninterruptible(struct drm_i915_private *i915, bool *unlock,
|
|
int timeout_ms)
|
|
int timeout_ms)
|
|
{
|
|
{
|
|
unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms);
|
|
unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms);
|
|
|
|
|
|
do {
|
|
do {
|
|
- if (i915_gem_wait_for_idle(dev_priv, 0) == 0 &&
|
|
|
|
- shrinker_lock(dev_priv, unlock))
|
|
|
|
|
|
+ if (i915_gem_wait_for_idle(i915, 0) == 0 &&
|
|
|
|
+ shrinker_lock(i915, unlock))
|
|
break;
|
|
break;
|
|
|
|
|
|
schedule_timeout_killable(1);
|
|
schedule_timeout_killable(1);
|
|
@@ -412,32 +412,32 @@ shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv, bool *unlock,
|
|
static int
|
|
static int
|
|
i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
|
|
i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
|
|
{
|
|
{
|
|
- struct drm_i915_private *dev_priv =
|
|
|
|
|
|
+ struct drm_i915_private *i915 =
|
|
container_of(nb, struct drm_i915_private, mm.oom_notifier);
|
|
container_of(nb, struct drm_i915_private, mm.oom_notifier);
|
|
struct drm_i915_gem_object *obj;
|
|
struct drm_i915_gem_object *obj;
|
|
unsigned long unevictable, bound, unbound, freed_pages;
|
|
unsigned long unevictable, bound, unbound, freed_pages;
|
|
|
|
|
|
- freed_pages = i915_gem_shrink_all(dev_priv);
|
|
|
|
|
|
+ freed_pages = i915_gem_shrink_all(i915);
|
|
|
|
|
|
/* Because we may be allocating inside our own driver, we cannot
|
|
/* Because we may be allocating inside our own driver, we cannot
|
|
* assert that there are no objects with pinned pages that are not
|
|
* assert that there are no objects with pinned pages that are not
|
|
* being pointed to by hardware.
|
|
* being pointed to by hardware.
|
|
*/
|
|
*/
|
|
unbound = bound = unevictable = 0;
|
|
unbound = bound = unevictable = 0;
|
|
- spin_lock(&dev_priv->mm.obj_lock);
|
|
|
|
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
|
|
|
|
|
|
+ spin_lock(&i915->mm.obj_lock);
|
|
|
|
+ list_for_each_entry(obj, &i915->mm.unbound_list, mm.link) {
|
|
if (!can_release_pages(obj))
|
|
if (!can_release_pages(obj))
|
|
unevictable += obj->base.size >> PAGE_SHIFT;
|
|
unevictable += obj->base.size >> PAGE_SHIFT;
|
|
else
|
|
else
|
|
unbound += obj->base.size >> PAGE_SHIFT;
|
|
unbound += obj->base.size >> PAGE_SHIFT;
|
|
}
|
|
}
|
|
- list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
|
|
|
|
|
|
+ list_for_each_entry(obj, &i915->mm.bound_list, mm.link) {
|
|
if (!can_release_pages(obj))
|
|
if (!can_release_pages(obj))
|
|
unevictable += obj->base.size >> PAGE_SHIFT;
|
|
unevictable += obj->base.size >> PAGE_SHIFT;
|
|
else
|
|
else
|
|
bound += obj->base.size >> PAGE_SHIFT;
|
|
bound += obj->base.size >> PAGE_SHIFT;
|
|
}
|
|
}
|
|
- spin_unlock(&dev_priv->mm.obj_lock);
|
|
|
|
|
|
+ spin_unlock(&i915->mm.obj_lock);
|
|
|
|
|
|
if (freed_pages || unbound || bound)
|
|
if (freed_pages || unbound || bound)
|
|
pr_info("Purging GPU memory, %lu pages freed, "
|
|
pr_info("Purging GPU memory, %lu pages freed, "
|
|
@@ -455,39 +455,39 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
|
|
static int
|
|
static int
|
|
i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
|
|
i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
|
|
{
|
|
{
|
|
- struct drm_i915_private *dev_priv =
|
|
|
|
|
|
+ struct drm_i915_private *i915 =
|
|
container_of(nb, struct drm_i915_private, mm.vmap_notifier);
|
|
container_of(nb, struct drm_i915_private, mm.vmap_notifier);
|
|
struct i915_vma *vma, *next;
|
|
struct i915_vma *vma, *next;
|
|
unsigned long freed_pages = 0;
|
|
unsigned long freed_pages = 0;
|
|
bool unlock;
|
|
bool unlock;
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
- if (!shrinker_lock_uninterruptible(dev_priv, &unlock, 5000))
|
|
|
|
|
|
+ if (!shrinker_lock_uninterruptible(i915, &unlock, 5000))
|
|
return NOTIFY_DONE;
|
|
return NOTIFY_DONE;
|
|
|
|
|
|
/* Force everything onto the inactive lists */
|
|
/* Force everything onto the inactive lists */
|
|
- ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);
|
|
|
|
|
|
+ ret = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
|
|
if (ret)
|
|
if (ret)
|
|
goto out;
|
|
goto out;
|
|
|
|
|
|
- intel_runtime_pm_get(dev_priv);
|
|
|
|
- freed_pages += i915_gem_shrink(dev_priv, -1UL, NULL,
|
|
|
|
|
|
+ intel_runtime_pm_get(i915);
|
|
|
|
+ freed_pages += i915_gem_shrink(i915, -1UL, NULL,
|
|
I915_SHRINK_BOUND |
|
|
I915_SHRINK_BOUND |
|
|
I915_SHRINK_UNBOUND |
|
|
I915_SHRINK_UNBOUND |
|
|
I915_SHRINK_ACTIVE |
|
|
I915_SHRINK_ACTIVE |
|
|
I915_SHRINK_VMAPS);
|
|
I915_SHRINK_VMAPS);
|
|
- intel_runtime_pm_put(dev_priv);
|
|
|
|
|
|
+ intel_runtime_pm_put(i915);
|
|
|
|
|
|
/* We also want to clear any cached iomaps as they wrap vmap */
|
|
/* We also want to clear any cached iomaps as they wrap vmap */
|
|
list_for_each_entry_safe(vma, next,
|
|
list_for_each_entry_safe(vma, next,
|
|
- &dev_priv->ggtt.base.inactive_list, vm_link) {
|
|
|
|
|
|
+ &i915->ggtt.base.inactive_list, vm_link) {
|
|
unsigned long count = vma->node.size >> PAGE_SHIFT;
|
|
unsigned long count = vma->node.size >> PAGE_SHIFT;
|
|
if (vma->iomap && i915_vma_unbind(vma) == 0)
|
|
if (vma->iomap && i915_vma_unbind(vma) == 0)
|
|
freed_pages += count;
|
|
freed_pages += count;
|
|
}
|
|
}
|
|
|
|
|
|
out:
|
|
out:
|
|
- shrinker_unlock(dev_priv, unlock);
|
|
|
|
|
|
+ shrinker_unlock(i915, unlock);
|
|
|
|
|
|
*(unsigned long *)ptr += freed_pages;
|
|
*(unsigned long *)ptr += freed_pages;
|
|
return NOTIFY_DONE;
|
|
return NOTIFY_DONE;
|
|
@@ -495,34 +495,34 @@ out:
|
|
|
|
|
|
/**
|
|
/**
|
|
* i915_gem_shrinker_register - Register the i915 shrinker
|
|
* i915_gem_shrinker_register - Register the i915 shrinker
|
|
- * @dev_priv: i915 device
|
|
|
|
|
|
+ * @i915: i915 device
|
|
*
|
|
*
|
|
* This function registers and sets up the i915 shrinker and OOM handler.
|
|
* This function registers and sets up the i915 shrinker and OOM handler.
|
|
*/
|
|
*/
|
|
-void i915_gem_shrinker_register(struct drm_i915_private *dev_priv)
|
|
|
|
|
|
+void i915_gem_shrinker_register(struct drm_i915_private *i915)
|
|
{
|
|
{
|
|
- dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
|
|
|
|
- dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
|
|
|
|
- dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
|
|
|
|
- dev_priv->mm.shrinker.batch = 4096;
|
|
|
|
- WARN_ON(register_shrinker(&dev_priv->mm.shrinker));
|
|
|
|
|
|
+ i915->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
|
|
|
|
+ i915->mm.shrinker.count_objects = i915_gem_shrinker_count;
|
|
|
|
+ i915->mm.shrinker.seeks = DEFAULT_SEEKS;
|
|
|
|
+ i915->mm.shrinker.batch = 4096;
|
|
|
|
+ WARN_ON(register_shrinker(&i915->mm.shrinker));
|
|
|
|
|
|
- dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
|
|
|
|
- WARN_ON(register_oom_notifier(&dev_priv->mm.oom_notifier));
|
|
|
|
|
|
+ i915->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
|
|
|
|
+ WARN_ON(register_oom_notifier(&i915->mm.oom_notifier));
|
|
|
|
|
|
- dev_priv->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap;
|
|
|
|
- WARN_ON(register_vmap_purge_notifier(&dev_priv->mm.vmap_notifier));
|
|
|
|
|
|
+ i915->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap;
|
|
|
|
+ WARN_ON(register_vmap_purge_notifier(&i915->mm.vmap_notifier));
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
* i915_gem_shrinker_unregister - Unregisters the i915 shrinker
|
|
* i915_gem_shrinker_unregister - Unregisters the i915 shrinker
|
|
- * @dev_priv: i915 device
|
|
|
|
|
|
+ * @i915: i915 device
|
|
*
|
|
*
|
|
* This function unregisters the i915 shrinker and OOM handler.
|
|
* This function unregisters the i915 shrinker and OOM handler.
|
|
*/
|
|
*/
|
|
-void i915_gem_shrinker_unregister(struct drm_i915_private *dev_priv)
|
|
|
|
|
|
+void i915_gem_shrinker_unregister(struct drm_i915_private *i915)
|
|
{
|
|
{
|
|
- WARN_ON(unregister_vmap_purge_notifier(&dev_priv->mm.vmap_notifier));
|
|
|
|
- WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
|
|
|
|
- unregister_shrinker(&dev_priv->mm.shrinker);
|
|
|
|
|
|
+ WARN_ON(unregister_vmap_purge_notifier(&i915->mm.vmap_notifier));
|
|
|
|
+ WARN_ON(unregister_oom_notifier(&i915->mm.oom_notifier));
|
|
|
|
+ unregister_shrinker(&i915->mm.shrinker);
|
|
}
|
|
}
|