|
@@ -2086,6 +2086,8 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
+ kasan_cache_create(cachep, &size, &flags);
|
|
|
+
|
|
|
size = ALIGN(size, cachep->align);
|
|
|
/*
|
|
|
* We should restrict the number of objects in a slab to implement
|
|
@@ -2387,8 +2389,13 @@ static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page)
|
|
|
* cache which they are a constructor for. Otherwise, deadlock.
|
|
|
* They must also be threaded.
|
|
|
*/
|
|
|
- if (cachep->ctor && !(cachep->flags & SLAB_POISON))
|
|
|
+ if (cachep->ctor && !(cachep->flags & SLAB_POISON)) {
|
|
|
+ kasan_unpoison_object_data(cachep,
|
|
|
+ objp + obj_offset(cachep));
|
|
|
cachep->ctor(objp + obj_offset(cachep));
|
|
|
+ kasan_poison_object_data(
|
|
|
+ cachep, objp + obj_offset(cachep));
|
|
|
+ }
|
|
|
|
|
|
if (cachep->flags & SLAB_RED_ZONE) {
|
|
|
if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
|
|
@@ -2409,6 +2416,7 @@ static void cache_init_objs(struct kmem_cache *cachep,
|
|
|
struct page *page)
|
|
|
{
|
|
|
int i;
|
|
|
+ void *objp;
|
|
|
|
|
|
cache_init_objs_debug(cachep, page);
|
|
|
|
|
@@ -2419,8 +2427,12 @@ static void cache_init_objs(struct kmem_cache *cachep,
|
|
|
|
|
|
for (i = 0; i < cachep->num; i++) {
|
|
|
/* constructor could break poison info */
|
|
|
- if (DEBUG == 0 && cachep->ctor)
|
|
|
- cachep->ctor(index_to_obj(cachep, page, i));
|
|
|
+ if (DEBUG == 0 && cachep->ctor) {
|
|
|
+ objp = index_to_obj(cachep, page, i);
|
|
|
+ kasan_unpoison_object_data(cachep, objp);
|
|
|
+ cachep->ctor(objp);
|
|
|
+ kasan_poison_object_data(cachep, objp);
|
|
|
+ }
|
|
|
|
|
|
set_free_obj(page, i, i);
|
|
|
}
|
|
@@ -2550,6 +2562,7 @@ static int cache_grow(struct kmem_cache *cachep,
|
|
|
|
|
|
slab_map_pages(cachep, page, freelist);
|
|
|
|
|
|
+ kasan_poison_slab(page);
|
|
|
cache_init_objs(cachep, page);
|
|
|
|
|
|
if (gfpflags_allow_blocking(local_flags))
|
|
@@ -3316,6 +3329,8 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
|
|
|
{
|
|
|
struct array_cache *ac = cpu_cache_get(cachep);
|
|
|
|
|
|
+ kasan_slab_free(cachep, objp);
|
|
|
+
|
|
|
check_irq_off();
|
|
|
kmemleak_free_recursive(objp, cachep->flags);
|
|
|
objp = cache_free_debugcheck(cachep, objp, caller);
|
|
@@ -3363,6 +3378,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
|
|
|
{
|
|
|
void *ret = slab_alloc(cachep, flags, _RET_IP_);
|
|
|
|
|
|
+ kasan_slab_alloc(cachep, ret);
|
|
|
trace_kmem_cache_alloc(_RET_IP_, ret,
|
|
|
cachep->object_size, cachep->size, flags);
|
|
|
|
|
@@ -3428,6 +3444,7 @@ kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
|
|
|
|
|
|
ret = slab_alloc(cachep, flags, _RET_IP_);
|
|
|
|
|
|
+ kasan_kmalloc(cachep, ret, size);
|
|
|
trace_kmalloc(_RET_IP_, ret,
|
|
|
size, cachep->size, flags);
|
|
|
return ret;
|
|
@@ -3451,6 +3468,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
|
|
|
{
|
|
|
void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
|
|
|
|
|
|
+ kasan_slab_alloc(cachep, ret);
|
|
|
trace_kmem_cache_alloc_node(_RET_IP_, ret,
|
|
|
cachep->object_size, cachep->size,
|
|
|
flags, nodeid);
|
|
@@ -3468,7 +3486,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
|
|
|
void *ret;
|
|
|
|
|
|
ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
|
|
|
-
|
|
|
+ kasan_kmalloc(cachep, ret, size);
|
|
|
trace_kmalloc_node(_RET_IP_, ret,
|
|
|
size, cachep->size,
|
|
|
flags, nodeid);
|
|
@@ -3481,11 +3499,15 @@ static __always_inline void *
|
|
|
__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
|
|
|
{
|
|
|
struct kmem_cache *cachep;
|
|
|
+ void *ret;
|
|
|
|
|
|
cachep = kmalloc_slab(size, flags);
|
|
|
if (unlikely(ZERO_OR_NULL_PTR(cachep)))
|
|
|
return cachep;
|
|
|
- return kmem_cache_alloc_node_trace(cachep, flags, node, size);
|
|
|
+ ret = kmem_cache_alloc_node_trace(cachep, flags, node, size);
|
|
|
+ kasan_kmalloc(cachep, ret, size);
|
|
|
+
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
void *__kmalloc_node(size_t size, gfp_t flags, int node)
|
|
@@ -3519,6 +3541,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
|
|
|
return cachep;
|
|
|
ret = slab_alloc(cachep, flags, caller);
|
|
|
|
|
|
+ kasan_kmalloc(cachep, ret, size);
|
|
|
trace_kmalloc(caller, ret,
|
|
|
size, cachep->size, flags);
|
|
|
|
|
@@ -4290,10 +4313,18 @@ module_init(slab_proc_init);
|
|
|
*/
|
|
|
size_t ksize(const void *objp)
|
|
|
{
|
|
|
+ size_t size;
|
|
|
+
|
|
|
BUG_ON(!objp);
|
|
|
if (unlikely(objp == ZERO_SIZE_PTR))
|
|
|
return 0;
|
|
|
|
|
|
- return virt_to_cache(objp)->object_size;
|
|
|
+ size = virt_to_cache(objp)->object_size;
|
|
|
+ /* We assume that ksize callers could use the whole allocated area,
|
|
|
+ * so we need to unpoison this area.
|
|
|
+ */
|
|
|
+ kasan_krealloc(objp, size);
|
|
|
+
|
|
|
+ return size;
|
|
|
}
|
|
|
EXPORT_SYMBOL(ksize);
|