|
@@ -1000,23 +1000,22 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
|
|
|
|
|
|
/*
|
|
/*
|
|
* Tracking of fully allocated slabs for debugging purposes.
|
|
* Tracking of fully allocated slabs for debugging purposes.
|
|
- *
|
|
|
|
- * list_lock must be held.
|
|
|
|
*/
|
|
*/
|
|
static void add_full(struct kmem_cache *s,
|
|
static void add_full(struct kmem_cache *s,
|
|
struct kmem_cache_node *n, struct page *page)
|
|
struct kmem_cache_node *n, struct page *page)
|
|
{
|
|
{
|
|
|
|
+ lockdep_assert_held(&n->list_lock);
|
|
|
|
+
|
|
if (!(s->flags & SLAB_STORE_USER))
|
|
if (!(s->flags & SLAB_STORE_USER))
|
|
return;
|
|
return;
|
|
|
|
|
|
list_add(&page->lru, &n->full);
|
|
list_add(&page->lru, &n->full);
|
|
}
|
|
}
|
|
|
|
|
|
-/*
|
|
|
|
- * list_lock must be held.
|
|
|
|
- */
|
|
|
|
-static void remove_full(struct kmem_cache *s, struct page *page)
|
|
|
|
|
|
+static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
|
|
{
|
|
{
|
|
|
|
+ lockdep_assert_held(&n->list_lock);
|
|
|
|
+
|
|
if (!(s->flags & SLAB_STORE_USER))
|
|
if (!(s->flags & SLAB_STORE_USER))
|
|
return;
|
|
return;
|
|
|
|
|
|
@@ -1265,7 +1264,8 @@ static inline int check_object(struct kmem_cache *s, struct page *page,
|
|
void *object, u8 val) { return 1; }
|
|
void *object, u8 val) { return 1; }
|
|
static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
|
|
static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
|
|
struct page *page) {}
|
|
struct page *page) {}
|
|
-static inline void remove_full(struct kmem_cache *s, struct page *page) {}
|
|
|
|
|
|
+static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
|
|
|
|
+ struct page *page) {}
|
|
static inline unsigned long kmem_cache_flags(unsigned long object_size,
|
|
static inline unsigned long kmem_cache_flags(unsigned long object_size,
|
|
unsigned long flags, const char *name,
|
|
unsigned long flags, const char *name,
|
|
void (*ctor)(void *))
|
|
void (*ctor)(void *))
|
|
@@ -1519,12 +1519,12 @@ static void discard_slab(struct kmem_cache *s, struct page *page)
|
|
|
|
|
|
/*
|
|
/*
|
|
* Management of partially allocated slabs.
|
|
* Management of partially allocated slabs.
|
|
- *
|
|
|
|
- * list_lock must be held.
|
|
|
|
*/
|
|
*/
|
|
static inline void add_partial(struct kmem_cache_node *n,
|
|
static inline void add_partial(struct kmem_cache_node *n,
|
|
struct page *page, int tail)
|
|
struct page *page, int tail)
|
|
{
|
|
{
|
|
|
|
+ lockdep_assert_held(&n->list_lock);
|
|
|
|
+
|
|
n->nr_partial++;
|
|
n->nr_partial++;
|
|
if (tail == DEACTIVATE_TO_TAIL)
|
|
if (tail == DEACTIVATE_TO_TAIL)
|
|
list_add_tail(&page->lru, &n->partial);
|
|
list_add_tail(&page->lru, &n->partial);
|
|
@@ -1532,12 +1532,11 @@ static inline void add_partial(struct kmem_cache_node *n,
|
|
list_add(&page->lru, &n->partial);
|
|
list_add(&page->lru, &n->partial);
|
|
}
|
|
}
|
|
|
|
|
|
-/*
|
|
|
|
- * list_lock must be held.
|
|
|
|
- */
|
|
|
|
static inline void remove_partial(struct kmem_cache_node *n,
|
|
static inline void remove_partial(struct kmem_cache_node *n,
|
|
struct page *page)
|
|
struct page *page)
|
|
{
|
|
{
|
|
|
|
+ lockdep_assert_held(&n->list_lock);
|
|
|
|
+
|
|
list_del(&page->lru);
|
|
list_del(&page->lru);
|
|
n->nr_partial--;
|
|
n->nr_partial--;
|
|
}
|
|
}
|
|
@@ -1547,8 +1546,6 @@ static inline void remove_partial(struct kmem_cache_node *n,
|
|
* return the pointer to the freelist.
|
|
* return the pointer to the freelist.
|
|
*
|
|
*
|
|
* Returns a list of objects or NULL if it fails.
|
|
* Returns a list of objects or NULL if it fails.
|
|
- *
|
|
|
|
- * Must hold list_lock since we modify the partial list.
|
|
|
|
*/
|
|
*/
|
|
static inline void *acquire_slab(struct kmem_cache *s,
|
|
static inline void *acquire_slab(struct kmem_cache *s,
|
|
struct kmem_cache_node *n, struct page *page,
|
|
struct kmem_cache_node *n, struct page *page,
|
|
@@ -1558,6 +1555,8 @@ static inline void *acquire_slab(struct kmem_cache *s,
|
|
unsigned long counters;
|
|
unsigned long counters;
|
|
struct page new;
|
|
struct page new;
|
|
|
|
|
|
|
|
+ lockdep_assert_held(&n->list_lock);
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Zap the freelist and set the frozen bit.
|
|
* Zap the freelist and set the frozen bit.
|
|
* The old freelist is the list of objects for the
|
|
* The old freelist is the list of objects for the
|
|
@@ -1902,7 +1901,7 @@ redo:
|
|
|
|
|
|
else if (l == M_FULL)
|
|
else if (l == M_FULL)
|
|
|
|
|
|
- remove_full(s, page);
|
|
|
|
|
|
+ remove_full(s, n, page);
|
|
|
|
|
|
if (m == M_PARTIAL) {
|
|
if (m == M_PARTIAL) {
|
|
|
|
|
|
@@ -2556,7 +2555,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
|
|
new.inuse--;
|
|
new.inuse--;
|
|
if ((!new.inuse || !prior) && !was_frozen) {
|
|
if ((!new.inuse || !prior) && !was_frozen) {
|
|
|
|
|
|
- if (kmem_cache_has_cpu_partial(s) && !prior)
|
|
|
|
|
|
+ if (kmem_cache_has_cpu_partial(s) && !prior) {
|
|
|
|
|
|
/*
|
|
/*
|
|
* Slab was on no list before and will be
|
|
* Slab was on no list before and will be
|
|
@@ -2566,7 +2565,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
|
|
*/
|
|
*/
|
|
new.frozen = 1;
|
|
new.frozen = 1;
|
|
|
|
|
|
- else { /* Needs to be taken off a list */
|
|
|
|
|
|
+ } else { /* Needs to be taken off a list */
|
|
|
|
|
|
n = get_node(s, page_to_nid(page));
|
|
n = get_node(s, page_to_nid(page));
|
|
/*
|
|
/*
|
|
@@ -2615,7 +2614,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
|
|
*/
|
|
*/
|
|
if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
|
|
if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
|
|
if (kmem_cache_debug(s))
|
|
if (kmem_cache_debug(s))
|
|
- remove_full(s, page);
|
|
|
|
|
|
+ remove_full(s, n, page);
|
|
add_partial(n, page, DEACTIVATE_TO_TAIL);
|
|
add_partial(n, page, DEACTIVATE_TO_TAIL);
|
|
stat(s, FREE_ADD_PARTIAL);
|
|
stat(s, FREE_ADD_PARTIAL);
|
|
}
|
|
}
|
|
@@ -2629,9 +2628,10 @@ slab_empty:
|
|
*/
|
|
*/
|
|
remove_partial(n, page);
|
|
remove_partial(n, page);
|
|
stat(s, FREE_REMOVE_PARTIAL);
|
|
stat(s, FREE_REMOVE_PARTIAL);
|
|
- } else
|
|
|
|
|
|
+ } else {
|
|
/* Slab must be on the full list */
|
|
/* Slab must be on the full list */
|
|
- remove_full(s, page);
|
|
|
|
|
|
+ remove_full(s, n, page);
|
|
|
|
+ }
|
|
|
|
|
|
spin_unlock_irqrestore(&n->list_lock, flags);
|
|
spin_unlock_irqrestore(&n->list_lock, flags);
|
|
stat(s, FREE_SLAB);
|
|
stat(s, FREE_SLAB);
|
|
@@ -2905,7 +2905,13 @@ static void early_kmem_cache_node_alloc(int node)
|
|
init_kmem_cache_node(n);
|
|
init_kmem_cache_node(n);
|
|
inc_slabs_node(kmem_cache_node, node, page->objects);
|
|
inc_slabs_node(kmem_cache_node, node, page->objects);
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * the lock is for lockdep's sake, not for any actual
|
|
|
|
+ * race protection
|
|
|
|
+ */
|
|
|
|
+ spin_lock(&n->list_lock);
|
|
add_partial(n, page, DEACTIVATE_TO_HEAD);
|
|
add_partial(n, page, DEACTIVATE_TO_HEAD);
|
|
|
|
+ spin_unlock(&n->list_lock);
|
|
}
|
|
}
|
|
|
|
|
|
static void free_kmem_cache_nodes(struct kmem_cache *s)
|
|
static void free_kmem_cache_nodes(struct kmem_cache *s)
|
|
@@ -4314,7 +4320,13 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
|
|
|
|
|
|
page = ACCESS_ONCE(c->partial);
|
|
page = ACCESS_ONCE(c->partial);
|
|
if (page) {
|
|
if (page) {
|
|
- x = page->pobjects;
|
|
|
|
|
|
+ node = page_to_nid(page);
|
|
|
|
+ if (flags & SO_TOTAL)
|
|
|
|
+ WARN_ON_ONCE(1);
|
|
|
|
+ else if (flags & SO_OBJECTS)
|
|
|
|
+ WARN_ON_ONCE(1);
|
|
|
|
+ else
|
|
|
|
+ x = page->pages;
|
|
total += x;
|
|
total += x;
|
|
nodes[node] += x;
|
|
nodes[node] += x;
|
|
}
|
|
}
|
|
@@ -5178,7 +5190,7 @@ static int sysfs_slab_add(struct kmem_cache *s)
|
|
}
|
|
}
|
|
|
|
|
|
s->kobj.kset = slab_kset;
|
|
s->kobj.kset = slab_kset;
|
|
- err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
|
|
|
|
|
|
+ err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
|
|
if (err) {
|
|
if (err) {
|
|
kobject_put(&s->kobj);
|
|
kobject_put(&s->kobj);
|
|
return err;
|
|
return err;
|