|
@@ -364,6 +364,21 @@ int prealloc_shrinker(struct shrinker *shrinker)
|
|
if (!shrinker->nr_deferred)
|
|
if (!shrinker->nr_deferred)
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * There is a window between prealloc_shrinker()
|
|
|
|
+ * and register_shrinker_prepared(). We don't want
|
|
|
|
+ * to clear bit of a shrinker in such the state
|
|
|
|
+ * in shrink_slab_memcg(), since this will impose
|
|
|
|
+ * restrictions on a code registering a shrinker
|
|
|
|
+ * (they would have to guarantee, their LRU lists
|
|
|
|
+ * are empty till shrinker is completely registered).
|
|
|
|
+ * So, we differ the situation, when 1)a shrinker
|
|
|
|
+ * is semi-registered (id is assigned, but it has
|
|
|
|
+ * not yet linked to shrinker_list) and 2)shrinker
|
|
|
|
+ * is not registered (id is not assigned).
|
|
|
|
+ */
|
|
|
|
+ INIT_LIST_HEAD(&shrinker->list);
|
|
|
|
+
|
|
if (shrinker->flags & SHRINKER_MEMCG_AWARE) {
|
|
if (shrinker->flags & SHRINKER_MEMCG_AWARE) {
|
|
if (prealloc_memcg_shrinker(shrinker))
|
|
if (prealloc_memcg_shrinker(shrinker))
|
|
goto free_deferred;
|
|
goto free_deferred;
|
|
@@ -543,6 +558,63 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
|
|
return freed;
|
|
return freed;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+#ifdef CONFIG_MEMCG_KMEM
|
|
|
|
+static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
|
|
|
|
+ struct mem_cgroup *memcg, int priority)
|
|
|
|
+{
|
|
|
|
+ struct memcg_shrinker_map *map;
|
|
|
|
+ unsigned long freed = 0;
|
|
|
|
+ int ret, i;
|
|
|
|
+
|
|
|
|
+ if (!memcg_kmem_enabled() || !mem_cgroup_online(memcg))
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ if (!down_read_trylock(&shrinker_rwsem))
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ map = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_map,
|
|
|
|
+ true);
|
|
|
|
+ if (unlikely(!map))
|
|
|
|
+ goto unlock;
|
|
|
|
+
|
|
|
|
+ for_each_set_bit(i, map->map, shrinker_nr_max) {
|
|
|
|
+ struct shrink_control sc = {
|
|
|
|
+ .gfp_mask = gfp_mask,
|
|
|
|
+ .nid = nid,
|
|
|
|
+ .memcg = memcg,
|
|
|
|
+ };
|
|
|
|
+ struct shrinker *shrinker;
|
|
|
|
+
|
|
|
|
+ shrinker = idr_find(&shrinker_idr, i);
|
|
|
|
+ if (unlikely(!shrinker)) {
|
|
|
|
+ clear_bit(i, map->map);
|
|
|
|
+ continue;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* See comment in prealloc_shrinker() */
|
|
|
|
+ if (unlikely(list_empty(&shrinker->list)))
|
|
|
|
+ continue;
|
|
|
|
+
|
|
|
|
+ ret = do_shrink_slab(&sc, shrinker, priority);
|
|
|
|
+ freed += ret;
|
|
|
|
+
|
|
|
|
+ if (rwsem_is_contended(&shrinker_rwsem)) {
|
|
|
|
+ freed = freed ? : 1;
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+unlock:
|
|
|
|
+ up_read(&shrinker_rwsem);
|
|
|
|
+ return freed;
|
|
|
|
+}
|
|
|
|
+#else /* CONFIG_MEMCG_KMEM */
|
|
|
|
+static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
|
|
|
|
+ struct mem_cgroup *memcg, int priority)
|
|
|
|
+{
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+#endif /* CONFIG_MEMCG_KMEM */
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* shrink_slab - shrink slab caches
|
|
* shrink_slab - shrink slab caches
|
|
* @gfp_mask: allocation context
|
|
* @gfp_mask: allocation context
|
|
@@ -572,8 +644,8 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
|
|
struct shrinker *shrinker;
|
|
struct shrinker *shrinker;
|
|
unsigned long freed = 0;
|
|
unsigned long freed = 0;
|
|
|
|
|
|
- if (memcg && (!memcg_kmem_enabled() || !mem_cgroup_online(memcg)))
|
|
|
|
- return 0;
|
|
|
|
|
|
+ if (memcg && !mem_cgroup_is_root(memcg))
|
|
|
|
+ return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
|
|
|
|
|
|
if (!down_read_trylock(&shrinker_rwsem))
|
|
if (!down_read_trylock(&shrinker_rwsem))
|
|
goto out;
|
|
goto out;
|
|
@@ -585,13 +657,7 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
|
|
.memcg = memcg,
|
|
.memcg = memcg,
|
|
};
|
|
};
|
|
|
|
|
|
- /*
|
|
|
|
- * If kernel memory accounting is disabled, we ignore
|
|
|
|
- * SHRINKER_MEMCG_AWARE flag and call all shrinkers
|
|
|
|
- * passing NULL for memcg.
|
|
|
|
- */
|
|
|
|
- if (memcg_kmem_enabled() &&
|
|
|
|
- !!memcg != !!(shrinker->flags & SHRINKER_MEMCG_AWARE))
|
|
|
|
|
|
+ if (!!memcg != !!(shrinker->flags & SHRINKER_MEMCG_AWARE))
|
|
continue;
|
|
continue;
|
|
|
|
|
|
if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
|
|
if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
|