|
@@ -301,39 +301,64 @@ out_unlock:
|
|
mutex_unlock(&slab_mutex);
|
|
mutex_unlock(&slab_mutex);
|
|
put_online_cpus();
|
|
put_online_cpus();
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+static int kmem_cache_destroy_memcg_children(struct kmem_cache *s)
|
|
|
|
+{
|
|
|
|
+ int rc;
|
|
|
|
+
|
|
|
|
+ if (!s->memcg_params ||
|
|
|
|
+ !s->memcg_params->is_root_cache)
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ mutex_unlock(&slab_mutex);
|
|
|
|
+ rc = __kmem_cache_destroy_memcg_children(s);
|
|
|
|
+ mutex_lock(&slab_mutex);
|
|
|
|
+
|
|
|
|
+ return rc;
|
|
|
|
+}
|
|
|
|
+#else
|
|
|
|
+static int kmem_cache_destroy_memcg_children(struct kmem_cache *s)
|
|
|
|
+{
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
#endif /* CONFIG_MEMCG_KMEM */
|
|
#endif /* CONFIG_MEMCG_KMEM */
|
|
|
|
|
|
void kmem_cache_destroy(struct kmem_cache *s)
|
|
void kmem_cache_destroy(struct kmem_cache *s)
|
|
{
|
|
{
|
|
- /* Destroy all the children caches if we aren't a memcg cache */
|
|
|
|
- kmem_cache_destroy_memcg_children(s);
|
|
|
|
-
|
|
|
|
get_online_cpus();
|
|
get_online_cpus();
|
|
mutex_lock(&slab_mutex);
|
|
mutex_lock(&slab_mutex);
|
|
|
|
+
|
|
s->refcount--;
|
|
s->refcount--;
|
|
- if (!s->refcount) {
|
|
|
|
- list_del(&s->list);
|
|
|
|
- memcg_unregister_cache(s);
|
|
|
|
-
|
|
|
|
- if (!__kmem_cache_shutdown(s)) {
|
|
|
|
- mutex_unlock(&slab_mutex);
|
|
|
|
- if (s->flags & SLAB_DESTROY_BY_RCU)
|
|
|
|
- rcu_barrier();
|
|
|
|
-
|
|
|
|
- memcg_free_cache_params(s);
|
|
|
|
- kfree(s->name);
|
|
|
|
- kmem_cache_free(kmem_cache, s);
|
|
|
|
- } else {
|
|
|
|
- list_add(&s->list, &slab_caches);
|
|
|
|
- memcg_register_cache(s);
|
|
|
|
- mutex_unlock(&slab_mutex);
|
|
|
|
- printk(KERN_ERR "kmem_cache_destroy %s: Slab cache still has objects\n",
|
|
|
|
- s->name);
|
|
|
|
- dump_stack();
|
|
|
|
- }
|
|
|
|
- } else {
|
|
|
|
- mutex_unlock(&slab_mutex);
|
|
|
|
|
|
+ if (s->refcount)
|
|
|
|
+ goto out_unlock;
|
|
|
|
+
|
|
|
|
+ if (kmem_cache_destroy_memcg_children(s) != 0)
|
|
|
|
+ goto out_unlock;
|
|
|
|
+
|
|
|
|
+ list_del(&s->list);
|
|
|
|
+ memcg_unregister_cache(s);
|
|
|
|
+
|
|
|
|
+ if (__kmem_cache_shutdown(s) != 0) {
|
|
|
|
+ list_add(&s->list, &slab_caches);
|
|
|
|
+ memcg_register_cache(s);
|
|
|
|
+ printk(KERN_ERR "kmem_cache_destroy %s: "
|
|
|
|
+ "Slab cache still has objects\n", s->name);
|
|
|
|
+ dump_stack();
|
|
|
|
+ goto out_unlock;
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+ mutex_unlock(&slab_mutex);
|
|
|
|
+ if (s->flags & SLAB_DESTROY_BY_RCU)
|
|
|
|
+ rcu_barrier();
|
|
|
|
+
|
|
|
|
+ memcg_free_cache_params(s);
|
|
|
|
+ kfree(s->name);
|
|
|
|
+ kmem_cache_free(kmem_cache, s);
|
|
|
|
+ goto out_put_cpus;
|
|
|
|
+
|
|
|
|
+out_unlock:
|
|
|
|
+ mutex_unlock(&slab_mutex);
|
|
|
|
+out_put_cpus:
|
|
put_online_cpus();
|
|
put_online_cpus();
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(kmem_cache_destroy);
|
|
EXPORT_SYMBOL(kmem_cache_destroy);
|