|
@@ -351,7 +351,6 @@ void kasan_free_pages(struct page *page, unsigned int order)
|
|
KASAN_FREE_PAGE);
|
|
KASAN_FREE_PAGE);
|
|
}
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_SLAB
|
|
|
|
/*
|
|
/*
|
|
* Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
|
|
* Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
|
|
* For larger allocations larger redzones are used.
|
|
* For larger allocations larger redzones are used.
|
|
@@ -373,16 +372,8 @@ void kasan_cache_create(struct kmem_cache *cache, size_t *size,
|
|
unsigned long *flags)
|
|
unsigned long *flags)
|
|
{
|
|
{
|
|
int redzone_adjust;
|
|
int redzone_adjust;
|
|
- /* Make sure the adjusted size is still less than
|
|
|
|
- * KMALLOC_MAX_CACHE_SIZE.
|
|
|
|
- * TODO: this check is only useful for SLAB, but not SLUB. We'll need
|
|
|
|
- * to skip it for SLUB when it starts using kasan_cache_create().
|
|
|
|
- */
|
|
|
|
- if (*size > KMALLOC_MAX_CACHE_SIZE -
|
|
|
|
- sizeof(struct kasan_alloc_meta) -
|
|
|
|
- sizeof(struct kasan_free_meta))
|
|
|
|
- return;
|
|
|
|
- *flags |= SLAB_KASAN;
|
|
|
|
|
|
+ int orig_size = *size;
|
|
|
|
+
|
|
/* Add alloc meta. */
|
|
/* Add alloc meta. */
|
|
cache->kasan_info.alloc_meta_offset = *size;
|
|
cache->kasan_info.alloc_meta_offset = *size;
|
|
*size += sizeof(struct kasan_alloc_meta);
|
|
*size += sizeof(struct kasan_alloc_meta);
|
|
@@ -395,14 +386,26 @@ void kasan_cache_create(struct kmem_cache *cache, size_t *size,
|
|
}
|
|
}
|
|
redzone_adjust = optimal_redzone(cache->object_size) -
|
|
redzone_adjust = optimal_redzone(cache->object_size) -
|
|
(*size - cache->object_size);
|
|
(*size - cache->object_size);
|
|
|
|
+
|
|
if (redzone_adjust > 0)
|
|
if (redzone_adjust > 0)
|
|
*size += redzone_adjust;
|
|
*size += redzone_adjust;
|
|
- *size = min(KMALLOC_MAX_CACHE_SIZE,
|
|
|
|
- max(*size,
|
|
|
|
- cache->object_size +
|
|
|
|
- optimal_redzone(cache->object_size)));
|
|
|
|
|
|
+
|
|
|
|
+ *size = min(KMALLOC_MAX_SIZE, max(*size, cache->object_size +
|
|
|
|
+ optimal_redzone(cache->object_size)));
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * If the metadata doesn't fit, don't enable KASAN at all.
|
|
|
|
+ */
|
|
|
|
+ if (*size <= cache->kasan_info.alloc_meta_offset ||
|
|
|
|
+ *size <= cache->kasan_info.free_meta_offset) {
|
|
|
|
+ cache->kasan_info.alloc_meta_offset = 0;
|
|
|
|
+ cache->kasan_info.free_meta_offset = 0;
|
|
|
|
+ *size = orig_size;
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ *flags |= SLAB_KASAN;
|
|
}
|
|
}
|
|
-#endif
|
|
|
|
|
|
|
|
void kasan_cache_shrink(struct kmem_cache *cache)
|
|
void kasan_cache_shrink(struct kmem_cache *cache)
|
|
{
|
|
{
|
|
@@ -414,6 +417,14 @@ void kasan_cache_destroy(struct kmem_cache *cache)
|
|
quarantine_remove_cache(cache);
|
|
quarantine_remove_cache(cache);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+size_t kasan_metadata_size(struct kmem_cache *cache)
|
|
|
|
+{
|
|
|
|
+ return (cache->kasan_info.alloc_meta_offset ?
|
|
|
|
+ sizeof(struct kasan_alloc_meta) : 0) +
|
|
|
|
+ (cache->kasan_info.free_meta_offset ?
|
|
|
|
+ sizeof(struct kasan_free_meta) : 0);
|
|
|
|
+}
|
|
|
|
+
|
|
void kasan_poison_slab(struct page *page)
|
|
void kasan_poison_slab(struct page *page)
|
|
{
|
|
{
|
|
kasan_poison_shadow(page_address(page),
|
|
kasan_poison_shadow(page_address(page),
|
|
@@ -431,16 +442,13 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object)
|
|
kasan_poison_shadow(object,
|
|
kasan_poison_shadow(object,
|
|
round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
|
|
round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
|
|
KASAN_KMALLOC_REDZONE);
|
|
KASAN_KMALLOC_REDZONE);
|
|
-#ifdef CONFIG_SLAB
|
|
|
|
if (cache->flags & SLAB_KASAN) {
|
|
if (cache->flags & SLAB_KASAN) {
|
|
struct kasan_alloc_meta *alloc_info =
|
|
struct kasan_alloc_meta *alloc_info =
|
|
get_alloc_info(cache, object);
|
|
get_alloc_info(cache, object);
|
|
alloc_info->state = KASAN_STATE_INIT;
|
|
alloc_info->state = KASAN_STATE_INIT;
|
|
}
|
|
}
|
|
-#endif
|
|
|
|
}
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_SLAB
|
|
|
|
static inline int in_irqentry_text(unsigned long ptr)
|
|
static inline int in_irqentry_text(unsigned long ptr)
|
|
{
|
|
{
|
|
return (ptr >= (unsigned long)&__irqentry_text_start &&
|
|
return (ptr >= (unsigned long)&__irqentry_text_start &&
|
|
@@ -501,7 +509,6 @@ struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
|
|
BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
|
|
BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
|
|
return (void *)object + cache->kasan_info.free_meta_offset;
|
|
return (void *)object + cache->kasan_info.free_meta_offset;
|
|
}
|
|
}
|
|
-#endif
|
|
|
|
|
|
|
|
void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
|
|
void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
|
|
{
|
|
{
|
|
@@ -522,16 +529,16 @@ static void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
|
|
|
|
|
|
bool kasan_slab_free(struct kmem_cache *cache, void *object)
|
|
bool kasan_slab_free(struct kmem_cache *cache, void *object)
|
|
{
|
|
{
|
|
-#ifdef CONFIG_SLAB
|
|
|
|
/* RCU slabs could be legally used after free within the RCU period */
|
|
/* RCU slabs could be legally used after free within the RCU period */
|
|
if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
|
|
if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
|
|
return false;
|
|
return false;
|
|
|
|
|
|
if (likely(cache->flags & SLAB_KASAN)) {
|
|
if (likely(cache->flags & SLAB_KASAN)) {
|
|
- struct kasan_alloc_meta *alloc_info =
|
|
|
|
- get_alloc_info(cache, object);
|
|
|
|
- struct kasan_free_meta *free_info =
|
|
|
|
- get_free_info(cache, object);
|
|
|
|
|
|
+ struct kasan_alloc_meta *alloc_info;
|
|
|
|
+ struct kasan_free_meta *free_info;
|
|
|
|
+
|
|
|
|
+ alloc_info = get_alloc_info(cache, object);
|
|
|
|
+ free_info = get_free_info(cache, object);
|
|
|
|
|
|
switch (alloc_info->state) {
|
|
switch (alloc_info->state) {
|
|
case KASAN_STATE_ALLOC:
|
|
case KASAN_STATE_ALLOC:
|
|
@@ -550,10 +557,6 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
return false;
|
|
return false;
|
|
-#else
|
|
|
|
- kasan_poison_slab_free(cache, object);
|
|
|
|
- return false;
|
|
|
|
-#endif
|
|
|
|
}
|
|
}
|
|
|
|
|
|
void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
|
|
void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
|
|
@@ -576,7 +579,6 @@ void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
|
|
kasan_unpoison_shadow(object, size);
|
|
kasan_unpoison_shadow(object, size);
|
|
kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
|
|
kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
|
|
KASAN_KMALLOC_REDZONE);
|
|
KASAN_KMALLOC_REDZONE);
|
|
-#ifdef CONFIG_SLAB
|
|
|
|
if (cache->flags & SLAB_KASAN) {
|
|
if (cache->flags & SLAB_KASAN) {
|
|
struct kasan_alloc_meta *alloc_info =
|
|
struct kasan_alloc_meta *alloc_info =
|
|
get_alloc_info(cache, object);
|
|
get_alloc_info(cache, object);
|
|
@@ -585,7 +587,6 @@ void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
|
|
alloc_info->alloc_size = size;
|
|
alloc_info->alloc_size = size;
|
|
set_track(&alloc_info->track, flags);
|
|
set_track(&alloc_info->track, flags);
|
|
}
|
|
}
|
|
-#endif
|
|
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(kasan_kmalloc);
|
|
EXPORT_SYMBOL(kasan_kmalloc);
|
|
|
|
|