|
@@ -1363,10 +1363,8 @@ static __always_inline void kfree_hook(void *x)
|
|
|
kasan_kfree_large(x, _RET_IP_);
|
|
|
}
|
|
|
|
|
|
-static __always_inline void *slab_free_hook(struct kmem_cache *s, void *x)
|
|
|
+static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x)
|
|
|
{
|
|
|
- void *freeptr;
|
|
|
-
|
|
|
kmemleak_free_recursive(x, s->flags);
|
|
|
|
|
|
/*
|
|
@@ -1386,17 +1384,12 @@ static __always_inline void *slab_free_hook(struct kmem_cache *s, void *x)
|
|
|
if (!(s->flags & SLAB_DEBUG_OBJECTS))
|
|
|
debug_check_no_obj_freed(x, s->object_size);
|
|
|
|
|
|
- freeptr = get_freepointer(s, x);
|
|
|
- /*
|
|
|
- * kasan_slab_free() may put x into memory quarantine, delaying its
|
|
|
- * reuse. In this case the object's freelist pointer is changed.
|
|
|
- */
|
|
|
- kasan_slab_free(s, x, _RET_IP_);
|
|
|
- return freeptr;
|
|
|
+ /* KASAN might put x into memory quarantine, delaying its reuse */
|
|
|
+ return kasan_slab_free(s, x, _RET_IP_);
|
|
|
}
|
|
|
|
|
|
-static inline void slab_free_freelist_hook(struct kmem_cache *s,
|
|
|
- void *head, void *tail)
|
|
|
+static inline bool slab_free_freelist_hook(struct kmem_cache *s,
|
|
|
+ void **head, void **tail)
|
|
|
{
|
|
|
/*
|
|
|
* Compiler cannot detect this function can be removed if slab_free_hook()
|
|
@@ -1407,13 +1400,33 @@ static inline void slab_free_freelist_hook(struct kmem_cache *s,
|
|
|
defined(CONFIG_DEBUG_OBJECTS_FREE) || \
|
|
|
defined(CONFIG_KASAN)
|
|
|
|
|
|
- void *object = head;
|
|
|
- void *tail_obj = tail ? : head;
|
|
|
- void *freeptr;
|
|
|
+ void *object;
|
|
|
+ void *next = *head;
|
|
|
+ void *old_tail = *tail ? *tail : *head;
|
|
|
+
|
|
|
+ /* Head and tail of the reconstructed freelist */
|
|
|
+ *head = NULL;
|
|
|
+ *tail = NULL;
|
|
|
|
|
|
do {
|
|
|
- freeptr = slab_free_hook(s, object);
|
|
|
- } while ((object != tail_obj) && (object = freeptr));
|
|
|
+ object = next;
|
|
|
+ next = get_freepointer(s, object);
|
|
|
+ /* If object's reuse doesn't have to be delayed */
|
|
|
+ if (!slab_free_hook(s, object)) {
|
|
|
+ /* Move object to the new freelist */
|
|
|
+ set_freepointer(s, object, *head);
|
|
|
+ *head = object;
|
|
|
+ if (!*tail)
|
|
|
+ *tail = object;
|
|
|
+ }
|
|
|
+ } while (object != old_tail);
|
|
|
+
|
|
|
+ if (*head == *tail)
|
|
|
+ *tail = NULL;
|
|
|
+
|
|
|
+ return *head != NULL;
|
|
|
+#else
|
|
|
+ return true;
|
|
|
#endif
|
|
|
}
|
|
|
|
|
@@ -2968,14 +2981,12 @@ static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
|
|
|
void *head, void *tail, int cnt,
|
|
|
unsigned long addr)
|
|
|
{
|
|
|
- slab_free_freelist_hook(s, head, tail);
|
|
|
/*
|
|
|
- * slab_free_freelist_hook() could have put the items into quarantine.
|
|
|
- * If so, no need to free them.
|
|
|
+ * With KASAN enabled slab_free_freelist_hook modifies the freelist
|
|
|
+ * to remove objects, whose reuse must be delayed.
|
|
|
*/
|
|
|
- if (s->flags & SLAB_KASAN && !(s->flags & SLAB_TYPESAFE_BY_RCU))
|
|
|
- return;
|
|
|
- do_slab_free(s, page, head, tail, cnt, addr);
|
|
|
+ if (slab_free_freelist_hook(s, &head, &tail))
|
|
|
+ do_slab_free(s, page, head, tail, cnt, addr);
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_KASAN
|