|
@@ -965,6 +965,15 @@ static int setup_kmem_cache_node(struct kmem_cache *cachep,
|
|
|
spin_unlock_irq(&n->list_lock);
|
|
|
slabs_destroy(cachep, &list);
|
|
|
|
|
|
+ /*
|
|
|
+ * To protect lockless access to n->shared during irq disabled context.
|
|
|
+ * If n->shared isn't NULL in irq disabled context, accessing to it is
|
|
|
+ * guaranteed to be valid until irq is re-enabled, because it will be
|
|
|
+ * freed after synchronize_sched().
|
|
|
+ */
|
|
|
+ if (force_change)
|
|
|
+ synchronize_sched();
|
|
|
+
|
|
|
fail:
|
|
|
kfree(old_shared);
|
|
|
kfree(new_shared);
|
|
@@ -2893,7 +2902,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
|
|
|
{
|
|
|
int batchcount;
|
|
|
struct kmem_cache_node *n;
|
|
|
- struct array_cache *ac;
|
|
|
+ struct array_cache *ac, *shared;
|
|
|
int node;
|
|
|
void *list = NULL;
|
|
|
struct page *page;
|
|
@@ -2914,11 +2923,16 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
|
|
|
n = get_node(cachep, node);
|
|
|
|
|
|
BUG_ON(ac->avail > 0 || !n);
|
|
|
+ shared = READ_ONCE(n->shared);
|
|
|
+ if (!n->free_objects && (!shared || !shared->avail))
|
|
|
+ goto direct_grow;
|
|
|
+
|
|
|
spin_lock(&n->list_lock);
|
|
|
+ shared = READ_ONCE(n->shared);
|
|
|
|
|
|
/* See if we can refill from the shared array */
|
|
|
- if (n->shared && transfer_objects(ac, n->shared, batchcount)) {
|
|
|
- n->shared->touched = 1;
|
|
|
+ if (shared && transfer_objects(ac, shared, batchcount)) {
|
|
|
+ shared->touched = 1;
|
|
|
goto alloc_done;
|
|
|
}
|
|
|
|
|
@@ -2940,6 +2954,7 @@ alloc_done:
|
|
|
spin_unlock(&n->list_lock);
|
|
|
fixup_objfreelist_debug(cachep, &list);
|
|
|
|
|
|
+direct_grow:
|
|
|
if (unlikely(!ac->avail)) {
|
|
|
/* Check if we can use obj in pfmemalloc slab */
|
|
|
if (sk_memalloc_socks()) {
|