|
@@ -489,21 +489,11 @@ void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
|
|
kasan_kmalloc(cache, object, cache->object_size, flags);
|
|
kasan_kmalloc(cache, object, cache->object_size, flags);
|
|
}
|
|
}
|
|
|
|
|
|
-static void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
|
|
|
|
-{
|
|
|
|
- unsigned long size = cache->object_size;
|
|
|
|
- unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
|
|
|
|
-
|
|
|
|
- /* RCU slabs could be legally used after free within the RCU period */
|
|
|
|
- if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
|
|
|
|
- return;
|
|
|
|
-
|
|
|
|
- kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
|
|
|
|
|
|
+static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
|
|
|
|
+ unsigned long ip, bool quarantine)
|
|
{
|
|
{
|
|
s8 shadow_byte;
|
|
s8 shadow_byte;
|
|
|
|
+ unsigned long rounded_up_size;
|
|
|
|
|
|
/* RCU slabs could be legally used after free within the RCU period */
|
|
/* RCU slabs could be legally used after free within the RCU period */
|
|
if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
|
|
if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
|
|
@@ -515,9 +505,10 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
|
|
return true;
|
|
return true;
|
|
}
|
|
}
|
|
|
|
|
|
- kasan_poison_slab_free(cache, object);
|
|
|
|
|
|
+ rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE);
|
|
|
|
+ kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
|
|
|
|
|
|
- if (unlikely(!(cache->flags & SLAB_KASAN)))
|
|
|
|
|
|
+ if (!quarantine || unlikely(!(cache->flags & SLAB_KASAN)))
|
|
return false;
|
|
return false;
|
|
|
|
|
|
set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT);
|
|
set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT);
|
|
@@ -525,6 +516,11 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
|
|
return true;
|
|
return true;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
|
|
|
|
+{
|
|
|
|
+ return __kasan_slab_free(cache, object, ip, true);
|
|
|
|
+}
|
|
|
|
+
|
|
void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
|
|
void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
|
|
gfp_t flags)
|
|
gfp_t flags)
|
|
{
|
|
{
|
|
@@ -602,7 +598,7 @@ void kasan_poison_kfree(void *ptr, unsigned long ip)
|
|
kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
|
|
kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
|
|
KASAN_FREE_PAGE);
|
|
KASAN_FREE_PAGE);
|
|
} else {
|
|
} else {
|
|
- kasan_poison_slab_free(page->slab_cache, ptr);
|
|
|
|
|
|
+ __kasan_slab_free(page->slab_cache, ptr, ip, false);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|