|
@@ -454,8 +454,6 @@ static inline void *restore_red_left(struct kmem_cache *s, void *p)
|
|
|
*/
|
|
|
#if defined(CONFIG_SLUB_DEBUG_ON)
|
|
|
static int slub_debug = DEBUG_DEFAULT_FLAGS;
|
|
|
-#elif defined(CONFIG_KASAN)
|
|
|
-static int slub_debug = SLAB_STORE_USER;
|
|
|
#else
|
|
|
static int slub_debug;
|
|
|
#endif
|
|
@@ -660,6 +658,8 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
|
|
|
if (s->flags & SLAB_STORE_USER)
|
|
|
off += 2 * sizeof(struct track);
|
|
|
|
|
|
+ off += kasan_metadata_size(s);
|
|
|
+
|
|
|
if (off != size_from_object(s))
|
|
|
/* Beginning of the filler is the free pointer */
|
|
|
print_section("Padding ", p + off, size_from_object(s) - off);
|
|
@@ -787,6 +787,8 @@ static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
|
|
|
/* We also have user information there */
|
|
|
off += 2 * sizeof(struct track);
|
|
|
|
|
|
+ off += kasan_metadata_size(s);
|
|
|
+
|
|
|
if (size_from_object(s) == off)
|
|
|
return 1;
|
|
|
|
|
@@ -1322,8 +1324,10 @@ static inline void kfree_hook(const void *x)
|
|
|
kasan_kfree_large(x);
|
|
|
}
|
|
|
|
|
|
-static inline void slab_free_hook(struct kmem_cache *s, void *x)
|
|
|
+static inline void *slab_free_hook(struct kmem_cache *s, void *x)
|
|
|
{
|
|
|
+ void *freeptr;
|
|
|
+
|
|
|
kmemleak_free_recursive(x, s->flags);
|
|
|
|
|
|
/*
|
|
@@ -1344,7 +1348,13 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
|
|
|
if (!(s->flags & SLAB_DEBUG_OBJECTS))
|
|
|
debug_check_no_obj_freed(x, s->object_size);
|
|
|
|
|
|
+ freeptr = get_freepointer(s, x);
|
|
|
+ /*
|
|
|
+ * kasan_slab_free() may put x into memory quarantine, delaying its
|
|
|
+ * reuse. In this case the object's freelist pointer is changed.
|
|
|
+ */
|
|
|
kasan_slab_free(s, x);
|
|
|
+ return freeptr;
|
|
|
}
|
|
|
|
|
|
static inline void slab_free_freelist_hook(struct kmem_cache *s,
|
|
@@ -1362,11 +1372,11 @@ static inline void slab_free_freelist_hook(struct kmem_cache *s,
|
|
|
|
|
|
void *object = head;
|
|
|
void *tail_obj = tail ? : head;
|
|
|
+ void *freeptr;
|
|
|
|
|
|
do {
|
|
|
- slab_free_hook(s, object);
|
|
|
- } while ((object != tail_obj) &&
|
|
|
- (object = get_freepointer(s, object)));
|
|
|
+ freeptr = slab_free_hook(s, object);
|
|
|
+ } while ((object != tail_obj) && (object = freeptr));
|
|
|
#endif
|
|
|
}
|
|
|
|
|
@@ -2878,16 +2888,13 @@ slab_empty:
|
|
|
* same page) possible by specifying head and tail ptr, plus objects
|
|
|
* count (cnt). Bulk free indicated by tail pointer being set.
|
|
|
*/
|
|
|
-static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
|
|
|
- void *head, void *tail, int cnt,
|
|
|
- unsigned long addr)
|
|
|
+static __always_inline void do_slab_free(struct kmem_cache *s,
|
|
|
+ struct page *page, void *head, void *tail,
|
|
|
+ int cnt, unsigned long addr)
|
|
|
{
|
|
|
void *tail_obj = tail ? : head;
|
|
|
struct kmem_cache_cpu *c;
|
|
|
unsigned long tid;
|
|
|
-
|
|
|
- slab_free_freelist_hook(s, head, tail);
|
|
|
-
|
|
|
redo:
|
|
|
/*
|
|
|
* Determine the currently cpus per cpu slab.
|
|
@@ -2921,6 +2928,27 @@ redo:
|
|
|
|
|
|
}
|
|
|
|
|
|
+static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
|
|
|
+ void *head, void *tail, int cnt,
|
|
|
+ unsigned long addr)
|
|
|
+{
|
|
|
+ slab_free_freelist_hook(s, head, tail);
|
|
|
+ /*
|
|
|
+ * slab_free_freelist_hook() could have put the items into quarantine.
|
|
|
+ * If so, no need to free them.
|
|
|
+ */
|
|
|
+ if (s->flags & SLAB_KASAN && !(s->flags & SLAB_DESTROY_BY_RCU))
|
|
|
+ return;
|
|
|
+ do_slab_free(s, page, head, tail, cnt, addr);
|
|
|
+}
|
|
|
+
|
|
|
+#ifdef CONFIG_KASAN
|
|
|
+void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
|
|
|
+{
|
|
|
+ do_slab_free(cache, virt_to_head_page(x), x, NULL, 1, addr);
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
void kmem_cache_free(struct kmem_cache *s, void *x)
|
|
|
{
|
|
|
s = cache_from_obj(s, x);
|
|
@@ -3363,7 +3391,7 @@ static void set_min_partial(struct kmem_cache *s, unsigned long min)
|
|
|
static int calculate_sizes(struct kmem_cache *s, int forced_order)
|
|
|
{
|
|
|
unsigned long flags = s->flags;
|
|
|
- unsigned long size = s->object_size;
|
|
|
+ size_t size = s->object_size;
|
|
|
int order;
|
|
|
|
|
|
/*
|
|
@@ -3422,7 +3450,10 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
|
|
|
* the object.
|
|
|
*/
|
|
|
size += 2 * sizeof(struct track);
|
|
|
+#endif
|
|
|
|
|
|
+ kasan_cache_create(s, &size, &s->flags);
|
|
|
+#ifdef CONFIG_SLUB_DEBUG
|
|
|
if (flags & SLAB_RED_ZONE) {
|
|
|
/*
|
|
|
* Add some empty padding so that we can catch
|