|
@@ -1292,14 +1292,21 @@ static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
|
|
|
return memcg_kmem_get_cache(s, flags);
|
|
|
}
|
|
|
|
|
|
-static inline void slab_post_alloc_hook(struct kmem_cache *s,
|
|
|
- gfp_t flags, void *object)
|
|
|
+static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
|
|
|
+ size_t size, void **p)
|
|
|
{
|
|
|
+ size_t i;
|
|
|
+
|
|
|
flags &= gfp_allowed_mask;
|
|
|
- kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
|
|
|
- kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags);
|
|
|
+ for (i = 0; i < size; i++) {
|
|
|
+ void *object = p[i];
|
|
|
+
|
|
|
+ kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
|
|
|
+ kmemleak_alloc_recursive(object, s->object_size, 1,
|
|
|
+ s->flags, flags);
|
|
|
+ kasan_slab_alloc(s, object);
|
|
|
+ }
|
|
|
memcg_kmem_put_cache(s);
|
|
|
- kasan_slab_alloc(s, object);
|
|
|
}
|
|
|
|
|
|
static inline void slab_free_hook(struct kmem_cache *s, void *x)
|
|
@@ -2475,7 +2482,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
|
|
|
static __always_inline void *slab_alloc_node(struct kmem_cache *s,
|
|
|
gfp_t gfpflags, int node, unsigned long addr)
|
|
|
{
|
|
|
- void **object;
|
|
|
+ void *object;
|
|
|
struct kmem_cache_cpu *c;
|
|
|
struct page *page;
|
|
|
unsigned long tid;
|
|
@@ -2554,7 +2561,7 @@ redo:
|
|
|
if (unlikely(gfpflags & __GFP_ZERO) && object)
|
|
|
memset(object, 0, s->object_size);
|
|
|
|
|
|
- slab_post_alloc_hook(s, gfpflags, object);
|
|
|
+ slab_post_alloc_hook(s, gfpflags, 1, &object);
|
|
|
|
|
|
return object;
|
|
|
}
|
|
@@ -2904,6 +2911,10 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
|
|
|
struct kmem_cache_cpu *c;
|
|
|
int i;
|
|
|
|
|
|
+ /* memcg and kmem_cache debug support */
|
|
|
+ s = slab_pre_alloc_hook(s, flags);
|
|
|
+ if (unlikely(!s))
|
|
|
+ return false;
|
|
|
/*
|
|
|
* Drain objects in the per cpu slab, while disabling local
|
|
|
* IRQs, which protects against PREEMPT and interrupts
|
|
@@ -2928,17 +2939,8 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
|
|
|
c = this_cpu_ptr(s->cpu_slab);
|
|
|
continue; /* goto for-loop */
|
|
|
}
|
|
|
-
|
|
|
- /* kmem_cache debug support */
|
|
|
- s = slab_pre_alloc_hook(s, flags);
|
|
|
- if (unlikely(!s))
|
|
|
- goto error;
|
|
|
-
|
|
|
c->freelist = get_freepointer(s, object);
|
|
|
p[i] = object;
|
|
|
-
|
|
|
- /* kmem_cache debug support */
|
|
|
- slab_post_alloc_hook(s, flags, object);
|
|
|
}
|
|
|
c->tid = next_tid(c->tid);
|
|
|
local_irq_enable();
|
|
@@ -2951,11 +2953,13 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
|
|
|
memset(p[j], 0, s->object_size);
|
|
|
}
|
|
|
|
|
|
+ /* memcg and kmem_cache debug support */
|
|
|
+ slab_post_alloc_hook(s, flags, size, p);
|
|
|
return true;
|
|
|
-
|
|
|
error:
|
|
|
- __kmem_cache_free_bulk(s, i, p);
|
|
|
local_irq_enable();
|
|
|
+ slab_post_alloc_hook(s, flags, i, p);
|
|
|
+ __kmem_cache_free_bulk(s, i, p);
|
|
|
return false;
|
|
|
}
|
|
|
EXPORT_SYMBOL(kmem_cache_alloc_bulk);
|