|
@@ -1306,6 +1306,17 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
|
|
|
kasan_slab_free(s, x);
|
|
|
}
|
|
|
|
|
|
+static void setup_object(struct kmem_cache *s, struct page *page,
|
|
|
+ void *object)
|
|
|
+{
|
|
|
+ setup_object_debug(s, page, object);
|
|
|
+ if (unlikely(s->ctor)) {
|
|
|
+ kasan_unpoison_object_data(s, object);
|
|
|
+ s->ctor(object);
|
|
|
+ kasan_poison_object_data(s, object);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Slab allocation and freeing
|
|
|
*/
|
|
@@ -1336,6 +1347,8 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
|
|
|
struct page *page;
|
|
|
struct kmem_cache_order_objects oo = s->oo;
|
|
|
gfp_t alloc_gfp;
|
|
|
+ void *start, *p;
|
|
|
+ int idx, order;
|
|
|
|
|
|
flags &= gfp_allowed_mask;
|
|
|
|
|
@@ -1359,13 +1372,13 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
|
|
|
* Try a lower order alloc if possible
|
|
|
*/
|
|
|
page = alloc_slab_page(s, alloc_gfp, node, oo);
|
|
|
-
|
|
|
- if (page)
|
|
|
- stat(s, ORDER_FALLBACK);
|
|
|
+ if (unlikely(!page))
|
|
|
+ goto out;
|
|
|
+ stat(s, ORDER_FALLBACK);
|
|
|
}
|
|
|
|
|
|
- if (kmemcheck_enabled && page
|
|
|
- && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
|
|
|
+ if (kmemcheck_enabled &&
|
|
|
+ !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
|
|
|
int pages = 1 << oo_order(oo);
|
|
|
|
|
|
kmemcheck_alloc_shadow(page, oo_order(oo), alloc_gfp, node);
|
|
@@ -1380,51 +1393,9 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
|
|
|
kmemcheck_mark_unallocated_pages(page, pages);
|
|
|
}
|
|
|
|
|
|
- if (flags & __GFP_WAIT)
|
|
|
- local_irq_disable();
|
|
|
- if (!page)
|
|
|
- return NULL;
|
|
|
-
|
|
|
page->objects = oo_objects(oo);
|
|
|
- mod_zone_page_state(page_zone(page),
|
|
|
- (s->flags & SLAB_RECLAIM_ACCOUNT) ?
|
|
|
- NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
|
|
|
- 1 << oo_order(oo));
|
|
|
-
|
|
|
- return page;
|
|
|
-}
|
|
|
-
|
|
|
-static void setup_object(struct kmem_cache *s, struct page *page,
|
|
|
- void *object)
|
|
|
-{
|
|
|
- setup_object_debug(s, page, object);
|
|
|
- if (unlikely(s->ctor)) {
|
|
|
- kasan_unpoison_object_data(s, object);
|
|
|
- s->ctor(object);
|
|
|
- kasan_poison_object_data(s, object);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
|
|
|
-{
|
|
|
- struct page *page;
|
|
|
- void *start;
|
|
|
- void *p;
|
|
|
- int order;
|
|
|
- int idx;
|
|
|
-
|
|
|
- if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
|
|
|
- pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK);
|
|
|
- BUG();
|
|
|
- }
|
|
|
-
|
|
|
- page = allocate_slab(s,
|
|
|
- flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
|
|
|
- if (!page)
|
|
|
- goto out;
|
|
|
|
|
|
order = compound_order(page);
|
|
|
- inc_slabs_node(s, page_to_nid(page), page->objects);
|
|
|
page->slab_cache = s;
|
|
|
__SetPageSlab(page);
|
|
|
if (page_is_pfmemalloc(page))
|
|
@@ -1448,10 +1419,34 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
|
|
|
page->freelist = start;
|
|
|
page->inuse = page->objects;
|
|
|
page->frozen = 1;
|
|
|
+
|
|
|
out:
|
|
|
+ if (flags & __GFP_WAIT)
|
|
|
+ local_irq_disable();
|
|
|
+ if (!page)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ mod_zone_page_state(page_zone(page),
|
|
|
+ (s->flags & SLAB_RECLAIM_ACCOUNT) ?
|
|
|
+ NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
|
|
|
+ 1 << oo_order(oo));
|
|
|
+
|
|
|
+ inc_slabs_node(s, page_to_nid(page), page->objects);
|
|
|
+
|
|
|
return page;
|
|
|
}
|
|
|
|
|
|
+static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
|
|
|
+{
|
|
|
+ if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
|
|
|
+ pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK);
|
|
|
+ BUG();
|
|
|
+ }
|
|
|
+
|
|
|
+ return allocate_slab(s,
|
|
|
+ flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
|
|
|
+}
|
|
|
+
|
|
|
static void __free_slab(struct kmem_cache *s, struct page *page)
|
|
|
{
|
|
|
int order = compound_order(page);
|