|
@@ -1592,18 +1592,12 @@ static inline void add_partial(struct kmem_cache_node *n,
|
|
__add_partial(n, page, tail);
|
|
__add_partial(n, page, tail);
|
|
}
|
|
}
|
|
|
|
|
|
-static inline void
|
|
|
|
-__remove_partial(struct kmem_cache_node *n, struct page *page)
|
|
|
|
-{
|
|
|
|
- list_del(&page->lru);
|
|
|
|
- n->nr_partial--;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static inline void remove_partial(struct kmem_cache_node *n,
|
|
static inline void remove_partial(struct kmem_cache_node *n,
|
|
struct page *page)
|
|
struct page *page)
|
|
{
|
|
{
|
|
lockdep_assert_held(&n->list_lock);
|
|
lockdep_assert_held(&n->list_lock);
|
|
- __remove_partial(n, page);
|
|
|
|
|
|
+ list_del(&page->lru);
|
|
|
|
+ n->nr_partial--;
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -3184,6 +3178,12 @@ static void free_kmem_cache_nodes(struct kmem_cache *s)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+void __kmem_cache_release(struct kmem_cache *s)
|
|
|
|
+{
|
|
|
|
+ free_percpu(s->cpu_slab);
|
|
|
|
+ free_kmem_cache_nodes(s);
|
|
|
|
+}
|
|
|
|
+
|
|
static int init_kmem_cache_nodes(struct kmem_cache *s)
|
|
static int init_kmem_cache_nodes(struct kmem_cache *s)
|
|
{
|
|
{
|
|
int node;
|
|
int node;
|
|
@@ -3443,28 +3443,31 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
|
|
|
|
|
|
/*
|
|
/*
|
|
* Attempt to free all partial slabs on a node.
|
|
* Attempt to free all partial slabs on a node.
|
|
- * This is called from kmem_cache_close(). We must be the last thread
|
|
|
|
- * using the cache and therefore we do not need to lock anymore.
|
|
|
|
|
|
+ * This is called from __kmem_cache_shutdown(). We must take list_lock
|
|
|
|
+ * because sysfs file might still access partial list after the shutdowning.
|
|
*/
|
|
*/
|
|
static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
|
|
static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
|
|
{
|
|
{
|
|
struct page *page, *h;
|
|
struct page *page, *h;
|
|
|
|
|
|
|
|
+ BUG_ON(irqs_disabled());
|
|
|
|
+ spin_lock_irq(&n->list_lock);
|
|
list_for_each_entry_safe(page, h, &n->partial, lru) {
|
|
list_for_each_entry_safe(page, h, &n->partial, lru) {
|
|
if (!page->inuse) {
|
|
if (!page->inuse) {
|
|
- __remove_partial(n, page);
|
|
|
|
|
|
+ remove_partial(n, page);
|
|
discard_slab(s, page);
|
|
discard_slab(s, page);
|
|
} else {
|
|
} else {
|
|
list_slab_objects(s, page,
|
|
list_slab_objects(s, page,
|
|
- "Objects remaining in %s on kmem_cache_close()");
|
|
|
|
|
|
+ "Objects remaining in %s on __kmem_cache_shutdown()");
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
+ spin_unlock_irq(&n->list_lock);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
* Release all resources used by a slab cache.
|
|
* Release all resources used by a slab cache.
|
|
*/
|
|
*/
|
|
-static inline int kmem_cache_close(struct kmem_cache *s)
|
|
|
|
|
|
+int __kmem_cache_shutdown(struct kmem_cache *s)
|
|
{
|
|
{
|
|
int node;
|
|
int node;
|
|
struct kmem_cache_node *n;
|
|
struct kmem_cache_node *n;
|
|
@@ -3476,16 +3479,9 @@ static inline int kmem_cache_close(struct kmem_cache *s)
|
|
if (n->nr_partial || slabs_node(s, node))
|
|
if (n->nr_partial || slabs_node(s, node))
|
|
return 1;
|
|
return 1;
|
|
}
|
|
}
|
|
- free_percpu(s->cpu_slab);
|
|
|
|
- free_kmem_cache_nodes(s);
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-int __kmem_cache_shutdown(struct kmem_cache *s)
|
|
|
|
-{
|
|
|
|
- return kmem_cache_close(s);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/********************************************************************
|
|
/********************************************************************
|
|
* Kmalloc subsystem
|
|
* Kmalloc subsystem
|
|
*******************************************************************/
|
|
*******************************************************************/
|
|
@@ -3980,7 +3976,7 @@ int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
|
|
memcg_propagate_slab_attrs(s);
|
|
memcg_propagate_slab_attrs(s);
|
|
err = sysfs_slab_add(s);
|
|
err = sysfs_slab_add(s);
|
|
if (err)
|
|
if (err)
|
|
- kmem_cache_close(s);
|
|
|
|
|
|
+ __kmem_cache_release(s);
|
|
|
|
|
|
return err;
|
|
return err;
|
|
}
|
|
}
|