|
@@ -3378,7 +3378,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
|
|
|
{
|
|
|
void *ret = slab_alloc(cachep, flags, _RET_IP_);
|
|
|
|
|
|
- kasan_slab_alloc(cachep, ret);
|
|
|
+ kasan_slab_alloc(cachep, ret, flags);
|
|
|
trace_kmem_cache_alloc(_RET_IP_, ret,
|
|
|
cachep->object_size, cachep->size, flags);
|
|
|
|
|
@@ -3444,7 +3444,7 @@ kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
|
|
|
|
|
|
ret = slab_alloc(cachep, flags, _RET_IP_);
|
|
|
|
|
|
- kasan_kmalloc(cachep, ret, size);
|
|
|
+ kasan_kmalloc(cachep, ret, size, flags);
|
|
|
trace_kmalloc(_RET_IP_, ret,
|
|
|
size, cachep->size, flags);
|
|
|
return ret;
|
|
@@ -3468,7 +3468,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
|
|
|
{
|
|
|
void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
|
|
|
|
|
|
- kasan_slab_alloc(cachep, ret);
|
|
|
+ kasan_slab_alloc(cachep, ret, flags);
|
|
|
trace_kmem_cache_alloc_node(_RET_IP_, ret,
|
|
|
cachep->object_size, cachep->size,
|
|
|
flags, nodeid);
|
|
@@ -3486,7 +3486,8 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
|
|
|
void *ret;
|
|
|
|
|
|
ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
|
|
|
- kasan_kmalloc(cachep, ret, size);
|
|
|
+
|
|
|
+ kasan_kmalloc(cachep, ret, size, flags);
|
|
|
trace_kmalloc_node(_RET_IP_, ret,
|
|
|
size, cachep->size,
|
|
|
flags, nodeid);
|
|
@@ -3505,7 +3506,7 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
|
|
|
if (unlikely(ZERO_OR_NULL_PTR(cachep)))
|
|
|
return cachep;
|
|
|
ret = kmem_cache_alloc_node_trace(cachep, flags, node, size);
|
|
|
- kasan_kmalloc(cachep, ret, size);
|
|
|
+ kasan_kmalloc(cachep, ret, size, flags);
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
@@ -3541,7 +3542,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
|
|
|
return cachep;
|
|
|
ret = slab_alloc(cachep, flags, caller);
|
|
|
|
|
|
- kasan_kmalloc(cachep, ret, size);
|
|
|
+ kasan_kmalloc(cachep, ret, size, flags);
|
|
|
trace_kmalloc(caller, ret,
|
|
|
size, cachep->size, flags);
|
|
|
|
|
@@ -4323,7 +4324,7 @@ size_t ksize(const void *objp)
|
|
|
/* We assume that ksize callers could use the whole allocated area,
|
|
|
* so we need to unpoison this area.
|
|
|
*/
|
|
|
- kasan_krealloc(objp, size);
|
|
|
+ kasan_krealloc(objp, size, GFP_NOWAIT);
|
|
|
|
|
|
return size;
|
|
|
}
|