|
@@ -140,9 +140,9 @@ int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
|
|
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
|
|
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
|
|
void slab_init_memcg_params(struct kmem_cache *s)
|
|
void slab_init_memcg_params(struct kmem_cache *s)
|
|
{
|
|
{
|
|
- s->memcg_params.is_root_cache = true;
|
|
|
|
- INIT_LIST_HEAD(&s->memcg_params.list);
|
|
|
|
|
|
+ s->memcg_params.root_cache = NULL;
|
|
RCU_INIT_POINTER(s->memcg_params.memcg_caches, NULL);
|
|
RCU_INIT_POINTER(s->memcg_params.memcg_caches, NULL);
|
|
|
|
+ INIT_LIST_HEAD(&s->memcg_params.children);
|
|
}
|
|
}
|
|
|
|
|
|
static int init_memcg_params(struct kmem_cache *s,
|
|
static int init_memcg_params(struct kmem_cache *s,
|
|
@@ -150,10 +150,10 @@ static int init_memcg_params(struct kmem_cache *s,
|
|
{
|
|
{
|
|
struct memcg_cache_array *arr;
|
|
struct memcg_cache_array *arr;
|
|
|
|
|
|
- if (memcg) {
|
|
|
|
- s->memcg_params.is_root_cache = false;
|
|
|
|
- s->memcg_params.memcg = memcg;
|
|
|
|
|
|
+ if (root_cache) {
|
|
s->memcg_params.root_cache = root_cache;
|
|
s->memcg_params.root_cache = root_cache;
|
|
|
|
+ s->memcg_params.memcg = memcg;
|
|
|
|
+ INIT_LIST_HEAD(&s->memcg_params.children_node);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -223,7 +223,7 @@ int memcg_update_all_caches(int num_memcgs)
|
|
|
|
|
|
static void unlink_memcg_cache(struct kmem_cache *s)
|
|
static void unlink_memcg_cache(struct kmem_cache *s)
|
|
{
|
|
{
|
|
- list_del(&s->memcg_params.list);
|
|
|
|
|
|
+ list_del(&s->memcg_params.children_node);
|
|
}
|
|
}
|
|
#else
|
|
#else
|
|
static inline int init_memcg_params(struct kmem_cache *s,
|
|
static inline int init_memcg_params(struct kmem_cache *s,
|
|
@@ -594,7 +594,8 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg,
|
|
goto out_unlock;
|
|
goto out_unlock;
|
|
}
|
|
}
|
|
|
|
|
|
- list_add(&s->memcg_params.list, &root_cache->memcg_params.list);
|
|
|
|
|
|
+ list_add(&s->memcg_params.children_node,
|
|
|
|
+ &root_cache->memcg_params.children);
|
|
|
|
|
|
/*
|
|
/*
|
|
* Since readers won't lock (see cache_from_memcg_idx()), we need a
|
|
* Since readers won't lock (see cache_from_memcg_idx()), we need a
|
|
@@ -690,7 +691,7 @@ static int shutdown_memcg_caches(struct kmem_cache *s)
|
|
* list so as not to try to destroy it for a second
|
|
* list so as not to try to destroy it for a second
|
|
* time while iterating over inactive caches below.
|
|
* time while iterating over inactive caches below.
|
|
*/
|
|
*/
|
|
- list_move(&c->memcg_params.list, &busy);
|
|
|
|
|
|
+ list_move(&c->memcg_params.children_node, &busy);
|
|
else
|
|
else
|
|
/*
|
|
/*
|
|
* The cache is empty and will be destroyed soon. Clear
|
|
* The cache is empty and will be destroyed soon. Clear
|
|
@@ -705,17 +706,17 @@ static int shutdown_memcg_caches(struct kmem_cache *s)
|
|
* Second, shutdown all caches left from memory cgroups that are now
|
|
* Second, shutdown all caches left from memory cgroups that are now
|
|
* offline.
|
|
* offline.
|
|
*/
|
|
*/
|
|
- list_for_each_entry_safe(c, c2, &s->memcg_params.list,
|
|
|
|
- memcg_params.list)
|
|
|
|
|
|
+ list_for_each_entry_safe(c, c2, &s->memcg_params.children,
|
|
|
|
+ memcg_params.children_node)
|
|
shutdown_cache(c);
|
|
shutdown_cache(c);
|
|
|
|
|
|
- list_splice(&busy, &s->memcg_params.list);
|
|
|
|
|
|
+ list_splice(&busy, &s->memcg_params.children);
|
|
|
|
|
|
/*
|
|
/*
|
|
* A cache being destroyed must be empty. In particular, this means
|
|
* A cache being destroyed must be empty. In particular, this means
|
|
* that all per memcg caches attached to it must be empty too.
|
|
* that all per memcg caches attached to it must be empty too.
|
|
*/
|
|
*/
|
|
- if (!list_empty(&s->memcg_params.list))
|
|
|
|
|
|
+ if (!list_empty(&s->memcg_params.children))
|
|
return -EBUSY;
|
|
return -EBUSY;
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|