|
@@ -320,6 +320,119 @@ EXPORT_SYMBOL(memcg_kmem_enabled_key);
|
|
|
|
|
|
struct workqueue_struct *memcg_kmem_cache_wq;
|
|
struct workqueue_struct *memcg_kmem_cache_wq;
|
|
|
|
|
|
|
|
+static int memcg_shrinker_map_size;
|
|
|
|
+static DEFINE_MUTEX(memcg_shrinker_map_mutex);
|
|
|
|
+
|
|
|
|
+static void memcg_free_shrinker_map_rcu(struct rcu_head *head)
|
|
|
|
+{
|
|
|
|
+ kvfree(container_of(head, struct memcg_shrinker_map, rcu));
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg,
|
|
|
|
+ int size, int old_size)
|
|
|
|
+{
|
|
|
|
+ struct memcg_shrinker_map *new, *old;
|
|
|
|
+ int nid;
|
|
|
|
+
|
|
|
|
+ lockdep_assert_held(&memcg_shrinker_map_mutex);
|
|
|
|
+
|
|
|
|
+ for_each_node(nid) {
|
|
|
|
+ old = rcu_dereference_protected(
|
|
|
|
+ mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true);
|
|
|
|
+ /* Not yet online memcg */
|
|
|
|
+ if (!old)
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ new = kvmalloc(sizeof(*new) + size, GFP_KERNEL);
|
|
|
|
+ if (!new)
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+
|
|
|
|
+ /* Set all old bits, clear all new bits */
|
|
|
|
+ memset(new->map, (int)0xff, old_size);
|
|
|
|
+ memset((void *)new->map + old_size, 0, size - old_size);
|
|
|
|
+
|
|
|
|
+ rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new);
|
|
|
|
+ call_rcu(&old->rcu, memcg_free_shrinker_map_rcu);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void memcg_free_shrinker_maps(struct mem_cgroup *memcg)
|
|
|
|
+{
|
|
|
|
+ struct mem_cgroup_per_node *pn;
|
|
|
|
+ struct memcg_shrinker_map *map;
|
|
|
|
+ int nid;
|
|
|
|
+
|
|
|
|
+ if (mem_cgroup_is_root(memcg))
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ for_each_node(nid) {
|
|
|
|
+ pn = mem_cgroup_nodeinfo(memcg, nid);
|
|
|
|
+ map = rcu_dereference_protected(pn->shrinker_map, true);
|
|
|
|
+ if (map)
|
|
|
|
+ kvfree(map);
|
|
|
|
+ rcu_assign_pointer(pn->shrinker_map, NULL);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg)
|
|
|
|
+{
|
|
|
|
+ struct memcg_shrinker_map *map;
|
|
|
|
+ int nid, size, ret = 0;
|
|
|
|
+
|
|
|
|
+ if (mem_cgroup_is_root(memcg))
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ mutex_lock(&memcg_shrinker_map_mutex);
|
|
|
|
+ size = memcg_shrinker_map_size;
|
|
|
|
+ for_each_node(nid) {
|
|
|
|
+ map = kvzalloc(sizeof(*map) + size, GFP_KERNEL);
|
|
|
|
+ if (!map) {
|
|
|
|
+ memcg_free_shrinker_maps(memcg);
|
|
|
|
+ ret = -ENOMEM;
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map);
|
|
|
|
+ }
|
|
|
|
+ mutex_unlock(&memcg_shrinker_map_mutex);
|
|
|
|
+
|
|
|
|
+ return ret;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+int memcg_expand_shrinker_maps(int new_id)
|
|
|
|
+{
|
|
|
|
+ int size, old_size, ret = 0;
|
|
|
|
+ struct mem_cgroup *memcg;
|
|
|
|
+
|
|
|
|
+ size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long);
|
|
|
|
+ old_size = memcg_shrinker_map_size;
|
|
|
|
+ if (size <= old_size)
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ mutex_lock(&memcg_shrinker_map_mutex);
|
|
|
|
+ if (!root_mem_cgroup)
|
|
|
|
+ goto unlock;
|
|
|
|
+
|
|
|
|
+ for_each_mem_cgroup(memcg) {
|
|
|
|
+ if (mem_cgroup_is_root(memcg))
|
|
|
|
+ continue;
|
|
|
|
+ ret = memcg_expand_one_shrinker_map(memcg, size, old_size);
|
|
|
|
+ if (ret)
|
|
|
|
+ goto unlock;
|
|
|
|
+ }
|
|
|
|
+unlock:
|
|
|
|
+ if (!ret)
|
|
|
|
+ memcg_shrinker_map_size = size;
|
|
|
|
+ mutex_unlock(&memcg_shrinker_map_mutex);
|
|
|
|
+ return ret;
|
|
|
|
+}
|
|
|
|
+#else /* CONFIG_MEMCG_KMEM */
|
|
|
|
+static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg)
|
|
|
|
+{
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+static void memcg_free_shrinker_maps(struct mem_cgroup *memcg) { }
|
|
#endif /* CONFIG_MEMCG_KMEM */
|
|
#endif /* CONFIG_MEMCG_KMEM */
|
|
|
|
|
|
/**
|
|
/**
|
|
@@ -4356,6 +4469,16 @@ static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
|
|
{
|
|
{
|
|
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
|
|
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * A memcg must be visible for memcg_expand_shrinker_maps()
|
|
|
|
+ * by the time the maps are allocated. So, we allocate maps
|
|
|
|
+ * here, when for_each_mem_cgroup() can't skip it.
|
|
|
|
+ */
|
|
|
|
+ if (memcg_alloc_shrinker_maps(memcg)) {
|
|
|
|
+ mem_cgroup_id_remove(memcg);
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+ }
|
|
|
|
+
|
|
/* Online state pins memcg ID, memcg ID pins CSS */
|
|
/* Online state pins memcg ID, memcg ID pins CSS */
|
|
atomic_set(&memcg->id.ref, 1);
|
|
atomic_set(&memcg->id.ref, 1);
|
|
css_get(css);
|
|
css_get(css);
|
|
@@ -4408,6 +4531,7 @@ static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
|
|
vmpressure_cleanup(&memcg->vmpressure);
|
|
vmpressure_cleanup(&memcg->vmpressure);
|
|
cancel_work_sync(&memcg->high_work);
|
|
cancel_work_sync(&memcg->high_work);
|
|
mem_cgroup_remove_from_trees(memcg);
|
|
mem_cgroup_remove_from_trees(memcg);
|
|
|
|
+ memcg_free_shrinker_maps(memcg);
|
|
memcg_free_kmem(memcg);
|
|
memcg_free_kmem(memcg);
|
|
mem_cgroup_free(memcg);
|
|
mem_cgroup_free(memcg);
|
|
}
|
|
}
|