|
@@ -2865,6 +2865,30 @@ static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep,
|
|
|
return obj;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Slab list should be fixed up by fixup_slab_list() for existing slab
|
|
|
+ * or cache_grow_end() for new slab
|
|
|
+ */
|
|
|
+static __always_inline int alloc_block(struct kmem_cache *cachep,
|
|
|
+ struct array_cache *ac, struct page *page, int batchcount)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * There must be at least one object available for
|
|
|
+ * allocation.
|
|
|
+ */
|
|
|
+ BUG_ON(page->active >= cachep->num);
|
|
|
+
|
|
|
+ while (page->active < cachep->num && batchcount--) {
|
|
|
+ STATS_INC_ALLOCED(cachep);
|
|
|
+ STATS_INC_ACTIVE(cachep);
|
|
|
+ STATS_SET_HIGH(cachep);
|
|
|
+
|
|
|
+ ac->entry[ac->avail++] = slab_get_obj(cachep, page);
|
|
|
+ }
|
|
|
+
|
|
|
+ return batchcount;
|
|
|
+}
|
|
|
+
|
|
|
static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
|
|
|
{
|
|
|
int batchcount;
|
|
@@ -2877,7 +2901,6 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
|
|
|
check_irq_off();
|
|
|
node = numa_mem_id();
|
|
|
|
|
|
-retry:
|
|
|
ac = cpu_cache_get(cachep);
|
|
|
batchcount = ac->batchcount;
|
|
|
if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
|
|
@@ -2907,21 +2930,7 @@ retry:
|
|
|
|
|
|
check_spinlock_acquired(cachep);
|
|
|
|
|
|
- /*
|
|
|
- * The slab was either on partial or free list so
|
|
|
- * there must be at least one object available for
|
|
|
- * allocation.
|
|
|
- */
|
|
|
- BUG_ON(page->active >= cachep->num);
|
|
|
-
|
|
|
- while (page->active < cachep->num && batchcount--) {
|
|
|
- STATS_INC_ALLOCED(cachep);
|
|
|
- STATS_INC_ACTIVE(cachep);
|
|
|
- STATS_SET_HIGH(cachep);
|
|
|
-
|
|
|
- ac->entry[ac->avail++] = slab_get_obj(cachep, page);
|
|
|
- }
|
|
|
-
|
|
|
+ batchcount = alloc_block(cachep, ac, page, batchcount);
|
|
|
fixup_slab_list(cachep, n, page, &list);
|
|
|
}
|
|
|
|
|
@@ -2941,21 +2950,18 @@ alloc_done:
|
|
|
}
|
|
|
|
|
|
page = cache_grow_begin(cachep, gfp_exact_node(flags), node);
|
|
|
- cache_grow_end(cachep, page);
|
|
|
|
|
|
/*
|
|
|
* cache_grow_begin() can reenable interrupts,
|
|
|
* then ac could change.
|
|
|
*/
|
|
|
ac = cpu_cache_get(cachep);
|
|
|
- node = numa_mem_id();
|
|
|
+ if (!ac->avail && page)
|
|
|
+ alloc_block(cachep, ac, page, batchcount);
|
|
|
+ cache_grow_end(cachep, page);
|
|
|
|
|
|
- /* no objects in sight? abort */
|
|
|
- if (!page && ac->avail == 0)
|
|
|
+ if (!ac->avail)
|
|
|
return NULL;
|
|
|
-
|
|
|
- if (!ac->avail) /* objects refilled by interrupt? */
|
|
|
- goto retry;
|
|
|
}
|
|
|
ac->touched = 1;
|
|
|
|
|
@@ -3149,14 +3155,13 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
|
|
|
{
|
|
|
struct page *page;
|
|
|
struct kmem_cache_node *n;
|
|
|
- void *obj;
|
|
|
+ void *obj = NULL;
|
|
|
void *list = NULL;
|
|
|
|
|
|
VM_BUG_ON(nodeid < 0 || nodeid >= MAX_NUMNODES);
|
|
|
n = get_node(cachep, nodeid);
|
|
|
BUG_ON(!n);
|
|
|
|
|
|
-retry:
|
|
|
check_irq_off();
|
|
|
spin_lock(&n->list_lock);
|
|
|
page = get_first_slab(n, false);
|
|
@@ -3178,19 +3183,18 @@ retry:
|
|
|
|
|
|
spin_unlock(&n->list_lock);
|
|
|
fixup_objfreelist_debug(cachep, &list);
|
|
|
- goto done;
|
|
|
+ return obj;
|
|
|
|
|
|
must_grow:
|
|
|
spin_unlock(&n->list_lock);
|
|
|
page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid);
|
|
|
+ if (page) {
|
|
|
+ /* This slab isn't counted yet so don't update free_objects */
|
|
|
+ obj = slab_get_obj(cachep, page);
|
|
|
+ }
|
|
|
cache_grow_end(cachep, page);
|
|
|
- if (page)
|
|
|
- goto retry;
|
|
|
|
|
|
- return fallback_alloc(cachep, flags);
|
|
|
-
|
|
|
-done:
|
|
|
- return obj;
|
|
|
+ return obj ? obj : fallback_alloc(cachep, flags);
|
|
|
}
|
|
|
|
|
|
static __always_inline void *
|