|
@@ -310,9 +310,7 @@ EXPORT_SYMBOL(unregister_shrinker);
|
|
#define SHRINK_BATCH 128
|
|
#define SHRINK_BATCH 128
|
|
|
|
|
|
static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
|
|
static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
|
|
- struct shrinker *shrinker,
|
|
|
|
- unsigned long nr_scanned,
|
|
|
|
- unsigned long nr_eligible)
|
|
|
|
|
|
+ struct shrinker *shrinker, int priority)
|
|
{
|
|
{
|
|
unsigned long freed = 0;
|
|
unsigned long freed = 0;
|
|
unsigned long long delta;
|
|
unsigned long long delta;
|
|
@@ -337,9 +335,9 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
|
|
nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
|
|
nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
|
|
|
|
|
|
total_scan = nr;
|
|
total_scan = nr;
|
|
- delta = (4 * nr_scanned) / shrinker->seeks;
|
|
|
|
- delta *= freeable;
|
|
|
|
- do_div(delta, nr_eligible + 1);
|
|
|
|
|
|
+ delta = freeable >> priority;
|
|
|
|
+ delta *= 4;
|
|
|
|
+ do_div(delta, shrinker->seeks);
|
|
total_scan += delta;
|
|
total_scan += delta;
|
|
if (total_scan < 0) {
|
|
if (total_scan < 0) {
|
|
pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n",
|
|
pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n",
|
|
@@ -373,8 +371,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
|
|
total_scan = freeable * 2;
|
|
total_scan = freeable * 2;
|
|
|
|
|
|
trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
|
|
trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
|
|
- nr_scanned, nr_eligible,
|
|
|
|
- freeable, delta, total_scan);
|
|
|
|
|
|
+ freeable, delta, total_scan, priority);
|
|
|
|
|
|
/*
|
|
/*
|
|
* Normally, we should not scan less than batch_size objects in one
|
|
* Normally, we should not scan less than batch_size objects in one
|
|
@@ -434,8 +431,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
|
|
* @gfp_mask: allocation context
|
|
* @gfp_mask: allocation context
|
|
* @nid: node whose slab caches to target
|
|
* @nid: node whose slab caches to target
|
|
* @memcg: memory cgroup whose slab caches to target
|
|
* @memcg: memory cgroup whose slab caches to target
|
|
- * @nr_scanned: pressure numerator
|
|
|
|
- * @nr_eligible: pressure denominator
|
|
|
|
|
|
+ * @priority: the reclaim priority
|
|
*
|
|
*
|
|
* Call the shrink functions to age shrinkable caches.
|
|
* Call the shrink functions to age shrinkable caches.
|
|
*
|
|
*
|
|
@@ -447,20 +443,14 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
|
|
* objects from the memory cgroup specified. Otherwise, only unaware
|
|
* objects from the memory cgroup specified. Otherwise, only unaware
|
|
* shrinkers are called.
|
|
* shrinkers are called.
|
|
*
|
|
*
|
|
- * @nr_scanned and @nr_eligible form a ratio that indicate how much of
|
|
|
|
- * the available objects should be scanned. Page reclaim for example
|
|
|
|
- * passes the number of pages scanned and the number of pages on the
|
|
|
|
- * LRU lists that it considered on @nid, plus a bias in @nr_scanned
|
|
|
|
- * when it encountered mapped pages. The ratio is further biased by
|
|
|
|
- * the ->seeks setting of the shrink function, which indicates the
|
|
|
|
- * cost to recreate an object relative to that of an LRU page.
|
|
|
|
|
|
+ * @priority is sc->priority, we take the number of objects and >> by priority
|
|
|
|
+ * in order to get the scan target.
|
|
*
|
|
*
|
|
* Returns the number of reclaimed slab objects.
|
|
* Returns the number of reclaimed slab objects.
|
|
*/
|
|
*/
|
|
static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
|
|
static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
|
|
struct mem_cgroup *memcg,
|
|
struct mem_cgroup *memcg,
|
|
- unsigned long nr_scanned,
|
|
|
|
- unsigned long nr_eligible)
|
|
|
|
|
|
+ int priority)
|
|
{
|
|
{
|
|
struct shrinker *shrinker;
|
|
struct shrinker *shrinker;
|
|
unsigned long freed = 0;
|
|
unsigned long freed = 0;
|
|
@@ -468,9 +458,6 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
|
|
if (memcg && (!memcg_kmem_enabled() || !mem_cgroup_online(memcg)))
|
|
if (memcg && (!memcg_kmem_enabled() || !mem_cgroup_online(memcg)))
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
- if (nr_scanned == 0)
|
|
|
|
- nr_scanned = SWAP_CLUSTER_MAX;
|
|
|
|
-
|
|
|
|
if (!down_read_trylock(&shrinker_rwsem)) {
|
|
if (!down_read_trylock(&shrinker_rwsem)) {
|
|
/*
|
|
/*
|
|
* If we would return 0, our callers would understand that we
|
|
* If we would return 0, our callers would understand that we
|
|
@@ -501,7 +488,7 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
|
|
if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
|
|
if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
|
|
sc.nid = 0;
|
|
sc.nid = 0;
|
|
|
|
|
|
- freed += do_shrink_slab(&sc, shrinker, nr_scanned, nr_eligible);
|
|
|
|
|
|
+ freed += do_shrink_slab(&sc, shrinker, priority);
|
|
}
|
|
}
|
|
|
|
|
|
up_read(&shrinker_rwsem);
|
|
up_read(&shrinker_rwsem);
|
|
@@ -519,8 +506,7 @@ void drop_slab_node(int nid)
|
|
|
|
|
|
freed = 0;
|
|
freed = 0;
|
|
do {
|
|
do {
|
|
- freed += shrink_slab(GFP_KERNEL, nid, memcg,
|
|
|
|
- 1000, 1000);
|
|
|
|
|
|
+ freed += shrink_slab(GFP_KERNEL, nid, memcg, 0);
|
|
} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
|
|
} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
|
|
} while (freed > 10);
|
|
} while (freed > 10);
|
|
}
|
|
}
|
|
@@ -2615,14 +2601,12 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
|
|
|
|
|
|
reclaimed = sc->nr_reclaimed;
|
|
reclaimed = sc->nr_reclaimed;
|
|
scanned = sc->nr_scanned;
|
|
scanned = sc->nr_scanned;
|
|
-
|
|
|
|
shrink_node_memcg(pgdat, memcg, sc, &lru_pages);
|
|
shrink_node_memcg(pgdat, memcg, sc, &lru_pages);
|
|
node_lru_pages += lru_pages;
|
|
node_lru_pages += lru_pages;
|
|
|
|
|
|
if (memcg)
|
|
if (memcg)
|
|
shrink_slab(sc->gfp_mask, pgdat->node_id,
|
|
shrink_slab(sc->gfp_mask, pgdat->node_id,
|
|
- memcg, sc->nr_scanned - scanned,
|
|
|
|
- lru_pages);
|
|
|
|
|
|
+ memcg, sc->priority);
|
|
|
|
|
|
/* Record the group's reclaim efficiency */
|
|
/* Record the group's reclaim efficiency */
|
|
vmpressure(sc->gfp_mask, memcg, false,
|
|
vmpressure(sc->gfp_mask, memcg, false,
|
|
@@ -2646,14 +2630,9 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
|
|
}
|
|
}
|
|
} while ((memcg = mem_cgroup_iter(root, memcg, &reclaim)));
|
|
} while ((memcg = mem_cgroup_iter(root, memcg, &reclaim)));
|
|
|
|
|
|
- /*
|
|
|
|
- * Shrink the slab caches in the same proportion that
|
|
|
|
- * the eligible LRU pages were scanned.
|
|
|
|
- */
|
|
|
|
if (global_reclaim(sc))
|
|
if (global_reclaim(sc))
|
|
shrink_slab(sc->gfp_mask, pgdat->node_id, NULL,
|
|
shrink_slab(sc->gfp_mask, pgdat->node_id, NULL,
|
|
- sc->nr_scanned - nr_scanned,
|
|
|
|
- node_lru_pages);
|
|
|
|
|
|
+ sc->priority);
|
|
|
|
|
|
if (reclaim_state) {
|
|
if (reclaim_state) {
|
|
sc->nr_reclaimed += reclaim_state->reclaimed_slab;
|
|
sc->nr_reclaimed += reclaim_state->reclaimed_slab;
|