|
@@ -1204,7 +1204,7 @@ unsigned long kmem_cache_flags(unsigned long object_size,
|
|
|
|
|
|
return flags;
|
|
return flags;
|
|
}
|
|
}
|
|
-#else
|
|
|
|
|
|
+#else /* !CONFIG_SLUB_DEBUG */
|
|
static inline void setup_object_debug(struct kmem_cache *s,
|
|
static inline void setup_object_debug(struct kmem_cache *s,
|
|
struct page *page, void *object) {}
|
|
struct page *page, void *object) {}
|
|
|
|
|
|
@@ -2295,23 +2295,15 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page)
|
|
* And if we were unable to get a new slab from the partial slab lists then
|
|
* And if we were unable to get a new slab from the partial slab lists then
|
|
* we need to allocate a new slab. This is the slowest path since it involves
|
|
* we need to allocate a new slab. This is the slowest path since it involves
|
|
* a call to the page allocator and the setup of a new slab.
|
|
* a call to the page allocator and the setup of a new slab.
|
|
|
|
+ *
|
|
|
|
+ * Version of __slab_alloc to use when we know that interrupts are
|
|
|
|
+ * already disabled (which is the case for bulk allocation).
|
|
*/
|
|
*/
|
|
-static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
|
|
|
|
|
|
+static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
|
|
unsigned long addr, struct kmem_cache_cpu *c)
|
|
unsigned long addr, struct kmem_cache_cpu *c)
|
|
{
|
|
{
|
|
void *freelist;
|
|
void *freelist;
|
|
struct page *page;
|
|
struct page *page;
|
|
- unsigned long flags;
|
|
|
|
-
|
|
|
|
- local_irq_save(flags);
|
|
|
|
-#ifdef CONFIG_PREEMPT
|
|
|
|
- /*
|
|
|
|
- * We may have been preempted and rescheduled on a different
|
|
|
|
- * cpu before disabling interrupts. Need to reload cpu area
|
|
|
|
- * pointer.
|
|
|
|
- */
|
|
|
|
- c = this_cpu_ptr(s->cpu_slab);
|
|
|
|
-#endif
|
|
|
|
|
|
|
|
page = c->page;
|
|
page = c->page;
|
|
if (!page)
|
|
if (!page)
|
|
@@ -2369,7 +2361,6 @@ load_freelist:
|
|
VM_BUG_ON(!c->page->frozen);
|
|
VM_BUG_ON(!c->page->frozen);
|
|
c->freelist = get_freepointer(s, freelist);
|
|
c->freelist = get_freepointer(s, freelist);
|
|
c->tid = next_tid(c->tid);
|
|
c->tid = next_tid(c->tid);
|
|
- local_irq_restore(flags);
|
|
|
|
return freelist;
|
|
return freelist;
|
|
|
|
|
|
new_slab:
|
|
new_slab:
|
|
@@ -2386,7 +2377,6 @@ new_slab:
|
|
|
|
|
|
if (unlikely(!freelist)) {
|
|
if (unlikely(!freelist)) {
|
|
slab_out_of_memory(s, gfpflags, node);
|
|
slab_out_of_memory(s, gfpflags, node);
|
|
- local_irq_restore(flags);
|
|
|
|
return NULL;
|
|
return NULL;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2402,10 +2392,34 @@ new_slab:
|
|
deactivate_slab(s, page, get_freepointer(s, freelist));
|
|
deactivate_slab(s, page, get_freepointer(s, freelist));
|
|
c->page = NULL;
|
|
c->page = NULL;
|
|
c->freelist = NULL;
|
|
c->freelist = NULL;
|
|
- local_irq_restore(flags);
|
|
|
|
return freelist;
|
|
return freelist;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * Another one that disabled interrupt and compensates for possible
|
|
|
|
+ * cpu changes by refetching the per cpu area pointer.
|
|
|
|
+ */
|
|
|
|
+static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
|
|
|
|
+ unsigned long addr, struct kmem_cache_cpu *c)
|
|
|
|
+{
|
|
|
|
+ void *p;
|
|
|
|
+ unsigned long flags;
|
|
|
|
+
|
|
|
|
+ local_irq_save(flags);
|
|
|
|
+#ifdef CONFIG_PREEMPT
|
|
|
|
+ /*
|
|
|
|
+ * We may have been preempted and rescheduled on a different
|
|
|
|
+ * cpu before disabling interrupts. Need to reload cpu area
|
|
|
|
+ * pointer.
|
|
|
|
+ */
|
|
|
|
+ c = this_cpu_ptr(s->cpu_slab);
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+ p = ___slab_alloc(s, gfpflags, node, addr, c);
|
|
|
|
+ local_irq_restore(flags);
|
|
|
|
+ return p;
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
|
|
* Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
|
|
* have the fastpath folded into their functions. So no function call
|
|
* have the fastpath folded into their functions. So no function call
|
|
@@ -2804,30 +2818,23 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
|
|
void *object = c->freelist;
|
|
void *object = c->freelist;
|
|
|
|
|
|
if (unlikely(!object)) {
|
|
if (unlikely(!object)) {
|
|
- local_irq_enable();
|
|
|
|
/*
|
|
/*
|
|
* Invoking slow path likely have side-effect
|
|
* Invoking slow path likely have side-effect
|
|
* of re-populating per CPU c->freelist
|
|
* of re-populating per CPU c->freelist
|
|
*/
|
|
*/
|
|
- p[i] = __slab_alloc(s, flags, NUMA_NO_NODE,
|
|
|
|
|
|
+ p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
|
|
_RET_IP_, c);
|
|
_RET_IP_, c);
|
|
- if (unlikely(!p[i])) {
|
|
|
|
- __kmem_cache_free_bulk(s, i, p);
|
|
|
|
- return false;
|
|
|
|
- }
|
|
|
|
- local_irq_disable();
|
|
|
|
|
|
+ if (unlikely(!p[i]))
|
|
|
|
+ goto error;
|
|
|
|
+
|
|
c = this_cpu_ptr(s->cpu_slab);
|
|
c = this_cpu_ptr(s->cpu_slab);
|
|
continue; /* goto for-loop */
|
|
continue; /* goto for-loop */
|
|
}
|
|
}
|
|
|
|
|
|
/* kmem_cache debug support */
|
|
/* kmem_cache debug support */
|
|
s = slab_pre_alloc_hook(s, flags);
|
|
s = slab_pre_alloc_hook(s, flags);
|
|
- if (unlikely(!s)) {
|
|
|
|
- __kmem_cache_free_bulk(s, i, p);
|
|
|
|
- c->tid = next_tid(c->tid);
|
|
|
|
- local_irq_enable();
|
|
|
|
- return false;
|
|
|
|
- }
|
|
|
|
|
|
+ if (unlikely(!s))
|
|
|
|
+ goto error;
|
|
|
|
|
|
c->freelist = get_freepointer(s, object);
|
|
c->freelist = get_freepointer(s, object);
|
|
p[i] = object;
|
|
p[i] = object;
|
|
@@ -2847,6 +2854,11 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
|
|
}
|
|
}
|
|
|
|
|
|
return true;
|
|
return true;
|
|
|
|
+
|
|
|
|
+error:
|
|
|
|
+ __kmem_cache_free_bulk(s, i, p);
|
|
|
|
+ local_irq_enable();
|
|
|
|
+ return false;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(kmem_cache_alloc_bulk);
|
|
EXPORT_SYMBOL(kmem_cache_alloc_bulk);
|
|
|
|
|