|
@@ -316,10 +316,10 @@ unsigned long calculate_alignment(unsigned long flags,
|
|
return ALIGN(align, sizeof(void *));
|
|
return ALIGN(align, sizeof(void *));
|
|
}
|
|
}
|
|
|
|
|
|
-static struct kmem_cache *
|
|
|
|
-do_kmem_cache_create(const char *name, size_t object_size, size_t size,
|
|
|
|
- size_t align, unsigned long flags, void (*ctor)(void *),
|
|
|
|
- struct mem_cgroup *memcg, struct kmem_cache *root_cache)
|
|
|
|
|
|
+static struct kmem_cache *create_cache(const char *name,
|
|
|
|
+ size_t object_size, size_t size, size_t align,
|
|
|
|
+ unsigned long flags, void (*ctor)(void *),
|
|
|
|
+ struct mem_cgroup *memcg, struct kmem_cache *root_cache)
|
|
{
|
|
{
|
|
struct kmem_cache *s;
|
|
struct kmem_cache *s;
|
|
int err;
|
|
int err;
|
|
@@ -418,9 +418,9 @@ kmem_cache_create(const char *name, size_t size, size_t align,
|
|
goto out_unlock;
|
|
goto out_unlock;
|
|
}
|
|
}
|
|
|
|
|
|
- s = do_kmem_cache_create(cache_name, size, size,
|
|
|
|
- calculate_alignment(flags, align, size),
|
|
|
|
- flags, ctor, NULL, NULL);
|
|
|
|
|
|
+ s = create_cache(cache_name, size, size,
|
|
|
|
+ calculate_alignment(flags, align, size),
|
|
|
|
+ flags, ctor, NULL, NULL);
|
|
if (IS_ERR(s)) {
|
|
if (IS_ERR(s)) {
|
|
err = PTR_ERR(s);
|
|
err = PTR_ERR(s);
|
|
kfree_const(cache_name);
|
|
kfree_const(cache_name);
|
|
@@ -448,7 +448,7 @@ out_unlock:
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(kmem_cache_create);
|
|
EXPORT_SYMBOL(kmem_cache_create);
|
|
|
|
|
|
-static int do_kmem_cache_shutdown(struct kmem_cache *s,
|
|
|
|
|
|
+static int shutdown_cache(struct kmem_cache *s,
|
|
struct list_head *release, bool *need_rcu_barrier)
|
|
struct list_head *release, bool *need_rcu_barrier)
|
|
{
|
|
{
|
|
if (__kmem_cache_shutdown(s) != 0) {
|
|
if (__kmem_cache_shutdown(s) != 0) {
|
|
@@ -469,8 +469,7 @@ static int do_kmem_cache_shutdown(struct kmem_cache *s,
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-static void do_kmem_cache_release(struct list_head *release,
|
|
|
|
- bool need_rcu_barrier)
|
|
|
|
|
|
+static void release_caches(struct list_head *release, bool need_rcu_barrier)
|
|
{
|
|
{
|
|
struct kmem_cache *s, *s2;
|
|
struct kmem_cache *s, *s2;
|
|
|
|
|
|
@@ -536,10 +535,10 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg,
|
|
if (!cache_name)
|
|
if (!cache_name)
|
|
goto out_unlock;
|
|
goto out_unlock;
|
|
|
|
|
|
- s = do_kmem_cache_create(cache_name, root_cache->object_size,
|
|
|
|
- root_cache->size, root_cache->align,
|
|
|
|
- root_cache->flags, root_cache->ctor,
|
|
|
|
- memcg, root_cache);
|
|
|
|
|
|
+ s = create_cache(cache_name, root_cache->object_size,
|
|
|
|
+ root_cache->size, root_cache->align,
|
|
|
|
+ root_cache->flags, root_cache->ctor,
|
|
|
|
+ memcg, root_cache);
|
|
/*
|
|
/*
|
|
* If we could not create a memcg cache, do not complain, because
|
|
* If we could not create a memcg cache, do not complain, because
|
|
* that's not critical at all as we can always proceed with the root
|
|
* that's not critical at all as we can always proceed with the root
|
|
@@ -615,14 +614,14 @@ void memcg_destroy_kmem_caches(struct mem_cgroup *memcg)
|
|
* The cgroup is about to be freed and therefore has no charges
|
|
* The cgroup is about to be freed and therefore has no charges
|
|
* left. Hence, all its caches must be empty by now.
|
|
* left. Hence, all its caches must be empty by now.
|
|
*/
|
|
*/
|
|
- BUG_ON(do_kmem_cache_shutdown(s, &release, &need_rcu_barrier));
|
|
|
|
|
|
+ BUG_ON(shutdown_cache(s, &release, &need_rcu_barrier));
|
|
}
|
|
}
|
|
mutex_unlock(&slab_mutex);
|
|
mutex_unlock(&slab_mutex);
|
|
|
|
|
|
put_online_mems();
|
|
put_online_mems();
|
|
put_online_cpus();
|
|
put_online_cpus();
|
|
|
|
|
|
- do_kmem_cache_release(&release, need_rcu_barrier);
|
|
|
|
|
|
+ release_caches(&release, need_rcu_barrier);
|
|
}
|
|
}
|
|
#endif /* CONFIG_MEMCG_KMEM */
|
|
#endif /* CONFIG_MEMCG_KMEM */
|
|
|
|
|
|
@@ -655,12 +654,12 @@ void kmem_cache_destroy(struct kmem_cache *s)
|
|
goto out_unlock;
|
|
goto out_unlock;
|
|
|
|
|
|
for_each_memcg_cache_safe(c, c2, s) {
|
|
for_each_memcg_cache_safe(c, c2, s) {
|
|
- if (do_kmem_cache_shutdown(c, &release, &need_rcu_barrier))
|
|
|
|
|
|
+ if (shutdown_cache(c, &release, &need_rcu_barrier))
|
|
busy = true;
|
|
busy = true;
|
|
}
|
|
}
|
|
|
|
|
|
if (!busy)
|
|
if (!busy)
|
|
- do_kmem_cache_shutdown(s, &release, &need_rcu_barrier);
|
|
|
|
|
|
+ shutdown_cache(s, &release, &need_rcu_barrier);
|
|
|
|
|
|
out_unlock:
|
|
out_unlock:
|
|
mutex_unlock(&slab_mutex);
|
|
mutex_unlock(&slab_mutex);
|
|
@@ -668,7 +667,7 @@ out_unlock:
|
|
put_online_mems();
|
|
put_online_mems();
|
|
put_online_cpus();
|
|
put_online_cpus();
|
|
|
|
|
|
- do_kmem_cache_release(&release, need_rcu_barrier);
|
|
|
|
|
|
+ release_caches(&release, need_rcu_barrier);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(kmem_cache_destroy);
|
|
EXPORT_SYMBOL(kmem_cache_destroy);
|
|
|
|
|