|
@@ -334,6 +334,7 @@ struct mem_cgroup {
|
|
#if defined(CONFIG_MEMCG_KMEM)
|
|
#if defined(CONFIG_MEMCG_KMEM)
|
|
/* Index in the kmem_cache->memcg_params.memcg_caches array */
|
|
/* Index in the kmem_cache->memcg_params.memcg_caches array */
|
|
int kmemcg_id;
|
|
int kmemcg_id;
|
|
|
|
+ bool kmem_acct_active;
|
|
#endif
|
|
#endif
|
|
|
|
|
|
int last_scanned_node;
|
|
int last_scanned_node;
|
|
@@ -354,7 +355,7 @@ struct mem_cgroup {
|
|
#ifdef CONFIG_MEMCG_KMEM
|
|
#ifdef CONFIG_MEMCG_KMEM
|
|
bool memcg_kmem_is_active(struct mem_cgroup *memcg)
|
|
bool memcg_kmem_is_active(struct mem_cgroup *memcg)
|
|
{
|
|
{
|
|
- return memcg->kmemcg_id >= 0;
|
|
|
|
|
|
+ return memcg->kmem_acct_active;
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
|
|
|
|
@@ -585,7 +586,7 @@ static void memcg_free_cache_id(int id);
|
|
|
|
|
|
static void disarm_kmem_keys(struct mem_cgroup *memcg)
|
|
static void disarm_kmem_keys(struct mem_cgroup *memcg)
|
|
{
|
|
{
|
|
- if (memcg_kmem_is_active(memcg)) {
|
|
|
|
|
|
+ if (memcg->kmemcg_id >= 0) {
|
|
static_key_slow_dec(&memcg_kmem_enabled_key);
|
|
static_key_slow_dec(&memcg_kmem_enabled_key);
|
|
memcg_free_cache_id(memcg->kmemcg_id);
|
|
memcg_free_cache_id(memcg->kmemcg_id);
|
|
}
|
|
}
|
|
@@ -2666,6 +2667,7 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep)
|
|
{
|
|
{
|
|
struct mem_cgroup *memcg;
|
|
struct mem_cgroup *memcg;
|
|
struct kmem_cache *memcg_cachep;
|
|
struct kmem_cache *memcg_cachep;
|
|
|
|
+ int kmemcg_id;
|
|
|
|
|
|
VM_BUG_ON(!is_root_cache(cachep));
|
|
VM_BUG_ON(!is_root_cache(cachep));
|
|
|
|
|
|
@@ -2673,10 +2675,11 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep)
|
|
return cachep;
|
|
return cachep;
|
|
|
|
|
|
memcg = get_mem_cgroup_from_mm(current->mm);
|
|
memcg = get_mem_cgroup_from_mm(current->mm);
|
|
- if (!memcg_kmem_is_active(memcg))
|
|
|
|
|
|
+ kmemcg_id = ACCESS_ONCE(memcg->kmemcg_id);
|
|
|
|
+ if (kmemcg_id < 0)
|
|
goto out;
|
|
goto out;
|
|
|
|
|
|
- memcg_cachep = cache_from_memcg_idx(cachep, memcg_cache_id(memcg));
|
|
|
|
|
|
+ memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id);
|
|
if (likely(memcg_cachep))
|
|
if (likely(memcg_cachep))
|
|
return memcg_cachep;
|
|
return memcg_cachep;
|
|
|
|
|
|
@@ -3318,8 +3321,8 @@ static int memcg_activate_kmem(struct mem_cgroup *memcg,
|
|
int err = 0;
|
|
int err = 0;
|
|
int memcg_id;
|
|
int memcg_id;
|
|
|
|
|
|
- if (memcg_kmem_is_active(memcg))
|
|
|
|
- return 0;
|
|
|
|
|
|
+ BUG_ON(memcg->kmemcg_id >= 0);
|
|
|
|
+ BUG_ON(memcg->kmem_acct_active);
|
|
|
|
|
|
/*
|
|
/*
|
|
* For simplicity, we won't allow this to be disabled. It also can't
|
|
* For simplicity, we won't allow this to be disabled. It also can't
|
|
@@ -3362,6 +3365,7 @@ static int memcg_activate_kmem(struct mem_cgroup *memcg,
|
|
* patched.
|
|
* patched.
|
|
*/
|
|
*/
|
|
memcg->kmemcg_id = memcg_id;
|
|
memcg->kmemcg_id = memcg_id;
|
|
|
|
+ memcg->kmem_acct_active = true;
|
|
out:
|
|
out:
|
|
return err;
|
|
return err;
|
|
}
|
|
}
|
|
@@ -4041,6 +4045,22 @@ static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
|
|
return mem_cgroup_sockets_init(memcg, ss);
|
|
return mem_cgroup_sockets_init(memcg, ss);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void memcg_deactivate_kmem(struct mem_cgroup *memcg)
|
|
|
|
+{
|
|
|
|
+ if (!memcg->kmem_acct_active)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Clear the 'active' flag before clearing memcg_caches arrays entries.
|
|
|
|
+ * Since we take the slab_mutex in memcg_deactivate_kmem_caches(), it
|
|
|
|
+ * guarantees no cache will be created for this cgroup after we are
|
|
|
|
+ * done (see memcg_create_kmem_cache()).
|
|
|
|
+ */
|
|
|
|
+ memcg->kmem_acct_active = false;
|
|
|
|
+
|
|
|
|
+ memcg_deactivate_kmem_caches(memcg);
|
|
|
|
+}
|
|
|
|
+
|
|
static void memcg_destroy_kmem(struct mem_cgroup *memcg)
|
|
static void memcg_destroy_kmem(struct mem_cgroup *memcg)
|
|
{
|
|
{
|
|
memcg_destroy_kmem_caches(memcg);
|
|
memcg_destroy_kmem_caches(memcg);
|
|
@@ -4052,6 +4072,10 @@ static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void memcg_deactivate_kmem(struct mem_cgroup *memcg)
|
|
|
|
+{
|
|
|
|
+}
|
|
|
|
+
|
|
static void memcg_destroy_kmem(struct mem_cgroup *memcg)
|
|
static void memcg_destroy_kmem(struct mem_cgroup *memcg)
|
|
{
|
|
{
|
|
}
|
|
}
|
|
@@ -4608,6 +4632,8 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
|
|
spin_unlock(&memcg->event_list_lock);
|
|
spin_unlock(&memcg->event_list_lock);
|
|
|
|
|
|
vmpressure_cleanup(&memcg->vmpressure);
|
|
vmpressure_cleanup(&memcg->vmpressure);
|
|
|
|
+
|
|
|
|
+ memcg_deactivate_kmem(memcg);
|
|
}
|
|
}
|
|
|
|
|
|
static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
|
|
static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
|