|
@@ -1251,11 +1251,13 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
|
|
|
static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
|
|
|
{
|
|
|
kmemleak_alloc(ptr, size, 1, flags);
|
|
|
+ kasan_kmalloc_large(ptr, size);
|
|
|
}
|
|
|
|
|
|
static inline void kfree_hook(const void *x)
|
|
|
{
|
|
|
kmemleak_free(x);
|
|
|
+ kasan_kfree_large(x);
|
|
|
}
|
|
|
|
|
|
static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
|
|
@@ -1278,6 +1280,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
|
|
|
kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
|
|
|
kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags);
|
|
|
memcg_kmem_put_cache(s);
|
|
|
+ kasan_slab_alloc(s, object);
|
|
|
}
|
|
|
|
|
|
static inline void slab_free_hook(struct kmem_cache *s, void *x)
|
|
@@ -1301,6 +1304,8 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
|
|
|
#endif
|
|
|
if (!(s->flags & SLAB_DEBUG_OBJECTS))
|
|
|
debug_check_no_obj_freed(x, s->object_size);
|
|
|
+
|
|
|
+ kasan_slab_free(s, x);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1395,8 +1400,11 @@ static void setup_object(struct kmem_cache *s, struct page *page,
|
|
|
void *object)
|
|
|
{
|
|
|
setup_object_debug(s, page, object);
|
|
|
- if (unlikely(s->ctor))
|
|
|
+ if (unlikely(s->ctor)) {
|
|
|
+ kasan_unpoison_object_data(s, object);
|
|
|
s->ctor(object);
|
|
|
+ kasan_poison_object_data(s, object);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
|
|
@@ -1429,6 +1437,8 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
|
|
|
if (unlikely(s->flags & SLAB_POISON))
|
|
|
memset(start, POISON_INUSE, PAGE_SIZE << order);
|
|
|
|
|
|
+ kasan_poison_slab(page);
|
|
|
+
|
|
|
for_each_object_idx(p, idx, s, start, page->objects) {
|
|
|
setup_object(s, page, p);
|
|
|
if (likely(idx < page->objects))
|
|
@@ -2522,6 +2532,7 @@ void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
|
|
|
{
|
|
|
void *ret = slab_alloc(s, gfpflags, _RET_IP_);
|
|
|
trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
|
|
|
+ kasan_kmalloc(s, ret, size);
|
|
|
return ret;
|
|
|
}
|
|
|
EXPORT_SYMBOL(kmem_cache_alloc_trace);
|
|
@@ -2548,6 +2559,8 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
|
|
|
|
|
|
trace_kmalloc_node(_RET_IP_, ret,
|
|
|
size, s->size, gfpflags, node);
|
|
|
+
|
|
|
+ kasan_kmalloc(s, ret, size);
|
|
|
return ret;
|
|
|
}
|
|
|
EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
|
|
@@ -2933,6 +2946,7 @@ static void early_kmem_cache_node_alloc(int node)
|
|
|
init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
|
|
|
init_tracking(kmem_cache_node, n);
|
|
|
#endif
|
|
|
+ kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node));
|
|
|
init_kmem_cache_node(n);
|
|
|
inc_slabs_node(kmem_cache_node, node, page->objects);
|
|
|
|
|
@@ -3305,6 +3319,8 @@ void *__kmalloc(size_t size, gfp_t flags)
|
|
|
|
|
|
trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
|
|
|
|
|
|
+ kasan_kmalloc(s, ret, size);
|
|
|
+
|
|
|
return ret;
|
|
|
}
|
|
|
EXPORT_SYMBOL(__kmalloc);
|
|
@@ -3348,12 +3364,14 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
|
|
|
|
|
|
trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
|
|
|
|
|
|
+ kasan_kmalloc(s, ret, size);
|
|
|
+
|
|
|
return ret;
|
|
|
}
|
|
|
EXPORT_SYMBOL(__kmalloc_node);
|
|
|
#endif
|
|
|
|
|
|
-size_t ksize(const void *object)
|
|
|
+static size_t __ksize(const void *object)
|
|
|
{
|
|
|
struct page *page;
|
|
|
|
|
@@ -3369,6 +3387,15 @@ size_t ksize(const void *object)
|
|
|
|
|
|
return slab_ksize(page->slab_cache);
|
|
|
}
|
|
|
+
|
|
|
+size_t ksize(const void *object)
|
|
|
+{
|
|
|
+ size_t size = __ksize(object);
|
|
|
+ /* We assume that ksize callers could use whole allocated area,
|
|
|
+ so we need unpoison this area. */
|
|
|
+ kasan_krealloc(object, size);
|
|
|
+ return size;
|
|
|
+}
|
|
|
EXPORT_SYMBOL(ksize);
|
|
|
|
|
|
void kfree(const void *x)
|