|
@@ -1518,11 +1518,9 @@ static void discard_slab(struct kmem_cache *s, struct page *page)
|
|
/*
|
|
/*
|
|
* Management of partially allocated slabs.
|
|
* Management of partially allocated slabs.
|
|
*/
|
|
*/
|
|
-static inline void add_partial(struct kmem_cache_node *n,
|
|
|
|
- struct page *page, int tail)
|
|
|
|
|
|
+static inline void
|
|
|
|
+__add_partial(struct kmem_cache_node *n, struct page *page, int tail)
|
|
{
|
|
{
|
|
- lockdep_assert_held(&n->list_lock);
|
|
|
|
-
|
|
|
|
n->nr_partial++;
|
|
n->nr_partial++;
|
|
if (tail == DEACTIVATE_TO_TAIL)
|
|
if (tail == DEACTIVATE_TO_TAIL)
|
|
list_add_tail(&page->lru, &n->partial);
|
|
list_add_tail(&page->lru, &n->partial);
|
|
@@ -1530,15 +1528,27 @@ static inline void add_partial(struct kmem_cache_node *n,
|
|
list_add(&page->lru, &n->partial);
|
|
list_add(&page->lru, &n->partial);
|
|
}
|
|
}
|
|
|
|
|
|
-static inline void remove_partial(struct kmem_cache_node *n,
|
|
|
|
- struct page *page)
|
|
|
|
|
|
+static inline void add_partial(struct kmem_cache_node *n,
|
|
|
|
+ struct page *page, int tail)
|
|
{
|
|
{
|
|
lockdep_assert_held(&n->list_lock);
|
|
lockdep_assert_held(&n->list_lock);
|
|
|
|
+ __add_partial(n, page, tail);
|
|
|
|
+}
|
|
|
|
|
|
|
|
+static inline void
|
|
|
|
+__remove_partial(struct kmem_cache_node *n, struct page *page)
|
|
|
|
+{
|
|
list_del(&page->lru);
|
|
list_del(&page->lru);
|
|
n->nr_partial--;
|
|
n->nr_partial--;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static inline void remove_partial(struct kmem_cache_node *n,
|
|
|
|
+ struct page *page)
|
|
|
|
+{
|
|
|
|
+ lockdep_assert_held(&n->list_lock);
|
|
|
|
+ __remove_partial(n, page);
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Remove slab from the partial list, freeze it and
|
|
* Remove slab from the partial list, freeze it and
|
|
* return the pointer to the freelist.
|
|
* return the pointer to the freelist.
|
|
@@ -2904,12 +2914,10 @@ static void early_kmem_cache_node_alloc(int node)
|
|
inc_slabs_node(kmem_cache_node, node, page->objects);
|
|
inc_slabs_node(kmem_cache_node, node, page->objects);
|
|
|
|
|
|
/*
|
|
/*
|
|
- * the lock is for lockdep's sake, not for any actual
|
|
|
|
- * race protection
|
|
|
|
|
|
+ * No locks need to be taken here as it has just been
|
|
|
|
+ * initialized and there is no concurrent access.
|
|
*/
|
|
*/
|
|
- spin_lock(&n->list_lock);
|
|
|
|
- add_partial(n, page, DEACTIVATE_TO_HEAD);
|
|
|
|
- spin_unlock(&n->list_lock);
|
|
|
|
|
|
+ __add_partial(n, page, DEACTIVATE_TO_HEAD);
|
|
}
|
|
}
|
|
|
|
|
|
static void free_kmem_cache_nodes(struct kmem_cache *s)
|
|
static void free_kmem_cache_nodes(struct kmem_cache *s)
|
|
@@ -3195,7 +3203,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
|
|
|
|
|
|
list_for_each_entry_safe(page, h, &n->partial, lru) {
|
|
list_for_each_entry_safe(page, h, &n->partial, lru) {
|
|
if (!page->inuse) {
|
|
if (!page->inuse) {
|
|
- remove_partial(n, page);
|
|
|
|
|
|
+ __remove_partial(n, page);
|
|
discard_slab(s, page);
|
|
discard_slab(s, page);
|
|
} else {
|
|
} else {
|
|
list_slab_objects(s, page,
|
|
list_slab_objects(s, page,
|