|
@@ -334,6 +334,7 @@ struct mem_cgroup {
|
|
#if defined(CONFIG_MEMCG_KMEM)
|
|
#if defined(CONFIG_MEMCG_KMEM)
|
|
/* Index in the kmem_cache->memcg_params.memcg_caches array */
|
|
/* Index in the kmem_cache->memcg_params.memcg_caches array */
|
|
int kmemcg_id;
|
|
int kmemcg_id;
|
|
|
|
+ bool kmem_acct_activated;
|
|
bool kmem_acct_active;
|
|
bool kmem_acct_active;
|
|
#endif
|
|
#endif
|
|
|
|
|
|
@@ -582,14 +583,10 @@ void memcg_put_cache_ids(void)
|
|
struct static_key memcg_kmem_enabled_key;
|
|
struct static_key memcg_kmem_enabled_key;
|
|
EXPORT_SYMBOL(memcg_kmem_enabled_key);
|
|
EXPORT_SYMBOL(memcg_kmem_enabled_key);
|
|
|
|
|
|
-static void memcg_free_cache_id(int id);
|
|
|
|
-
|
|
|
|
static void disarm_kmem_keys(struct mem_cgroup *memcg)
|
|
static void disarm_kmem_keys(struct mem_cgroup *memcg)
|
|
{
|
|
{
|
|
- if (memcg->kmemcg_id >= 0) {
|
|
|
|
|
|
+ if (memcg->kmem_acct_activated)
|
|
static_key_slow_dec(&memcg_kmem_enabled_key);
|
|
static_key_slow_dec(&memcg_kmem_enabled_key);
|
|
- memcg_free_cache_id(memcg->kmemcg_id);
|
|
|
|
- }
|
|
|
|
/*
|
|
/*
|
|
* This check can't live in kmem destruction function,
|
|
* This check can't live in kmem destruction function,
|
|
* since the charges will outlive the cgroup
|
|
* since the charges will outlive the cgroup
|
|
@@ -3322,6 +3319,7 @@ static int memcg_activate_kmem(struct mem_cgroup *memcg,
|
|
int memcg_id;
|
|
int memcg_id;
|
|
|
|
|
|
BUG_ON(memcg->kmemcg_id >= 0);
|
|
BUG_ON(memcg->kmemcg_id >= 0);
|
|
|
|
+ BUG_ON(memcg->kmem_acct_activated);
|
|
BUG_ON(memcg->kmem_acct_active);
|
|
BUG_ON(memcg->kmem_acct_active);
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -3365,6 +3363,7 @@ static int memcg_activate_kmem(struct mem_cgroup *memcg,
|
|
* patched.
|
|
* patched.
|
|
*/
|
|
*/
|
|
memcg->kmemcg_id = memcg_id;
|
|
memcg->kmemcg_id = memcg_id;
|
|
|
|
+ memcg->kmem_acct_activated = true;
|
|
memcg->kmem_acct_active = true;
|
|
memcg->kmem_acct_active = true;
|
|
out:
|
|
out:
|
|
return err;
|
|
return err;
|
|
@@ -4047,6 +4046,10 @@ static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
|
|
|
|
|
|
static void memcg_deactivate_kmem(struct mem_cgroup *memcg)
|
|
static void memcg_deactivate_kmem(struct mem_cgroup *memcg)
|
|
{
|
|
{
|
|
|
|
+ struct cgroup_subsys_state *css;
|
|
|
|
+ struct mem_cgroup *parent, *child;
|
|
|
|
+ int kmemcg_id;
|
|
|
|
+
|
|
if (!memcg->kmem_acct_active)
|
|
if (!memcg->kmem_acct_active)
|
|
return;
|
|
return;
|
|
|
|
|
|
@@ -4059,6 +4062,32 @@ static void memcg_deactivate_kmem(struct mem_cgroup *memcg)
|
|
memcg->kmem_acct_active = false;
|
|
memcg->kmem_acct_active = false;
|
|
|
|
|
|
memcg_deactivate_kmem_caches(memcg);
|
|
memcg_deactivate_kmem_caches(memcg);
|
|
|
|
+
|
|
|
|
+ kmemcg_id = memcg->kmemcg_id;
|
|
|
|
+ BUG_ON(kmemcg_id < 0);
|
|
|
|
+
|
|
|
|
+ parent = parent_mem_cgroup(memcg);
|
|
|
|
+ if (!parent)
|
|
|
|
+ parent = root_mem_cgroup;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Change kmemcg_id of this cgroup and all its descendants to the
|
|
|
|
+ * parent's id, and then move all entries from this cgroup's list_lrus
|
|
|
|
+ * to ones of the parent. After we have finished, all list_lrus
|
|
|
|
+ * corresponding to this cgroup are guaranteed to remain empty. The
|
|
|
|
+ * ordering is imposed by list_lru_node->lock taken by
|
|
|
|
+ * memcg_drain_all_list_lrus().
|
|
|
|
+ */
|
|
|
|
+ css_for_each_descendant_pre(css, &memcg->css) {
|
|
|
|
+ child = mem_cgroup_from_css(css);
|
|
|
|
+ BUG_ON(child->kmemcg_id != kmemcg_id);
|
|
|
|
+ child->kmemcg_id = parent->kmemcg_id;
|
|
|
|
+ if (!memcg->use_hierarchy)
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
|
|
|
|
+
|
|
|
|
+ memcg_free_cache_id(kmemcg_id);
|
|
}
|
|
}
|
|
|
|
|
|
static void memcg_destroy_kmem(struct mem_cgroup *memcg)
|
|
static void memcg_destroy_kmem(struct mem_cgroup *memcg)
|