|
|
@@ -159,6 +159,8 @@ struct kmemleak_object {
|
|
|
atomic_t use_count;
|
|
|
unsigned long pointer;
|
|
|
size_t size;
|
|
|
+ /* pass surplus references to this pointer */
|
|
|
+ unsigned long excess_ref;
|
|
|
/* minimum number of a pointers found before it is considered leak */
|
|
|
int min_count;
|
|
|
/* the total number of pointers found pointing to this object */
|
|
|
@@ -253,7 +255,8 @@ enum {
|
|
|
KMEMLEAK_NOT_LEAK,
|
|
|
KMEMLEAK_IGNORE,
|
|
|
KMEMLEAK_SCAN_AREA,
|
|
|
- KMEMLEAK_NO_SCAN
|
|
|
+ KMEMLEAK_NO_SCAN,
|
|
|
+ KMEMLEAK_SET_EXCESS_REF
|
|
|
};
|
|
|
|
|
|
/*
|
|
|
@@ -264,7 +267,10 @@ struct early_log {
|
|
|
int op_type; /* kmemleak operation type */
|
|
|
int min_count; /* minimum reference count */
|
|
|
const void *ptr; /* allocated/freed memory block */
|
|
|
- size_t size; /* memory block size */
|
|
|
+ union {
|
|
|
+ size_t size; /* memory block size */
|
|
|
+ unsigned long excess_ref; /* surplus reference passing */
|
|
|
+ };
|
|
|
unsigned long trace[MAX_TRACE]; /* stack trace */
|
|
|
unsigned int trace_len; /* stack trace length */
|
|
|
};
|
|
|
@@ -562,6 +568,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
|
|
|
object->flags = OBJECT_ALLOCATED;
|
|
|
object->pointer = ptr;
|
|
|
object->size = size;
|
|
|
+ object->excess_ref = 0;
|
|
|
object->min_count = min_count;
|
|
|
object->count = 0; /* white color initially */
|
|
|
object->jiffies = jiffies;
|
|
|
@@ -794,6 +801,30 @@ out:
|
|
|
put_object(object);
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Any surplus references (object already gray) to 'ptr' are passed to
|
|
|
+ * 'excess_ref'. This is used in the vmalloc() case where a pointer to
|
|
|
+ * vm_struct may be used as an alternative reference to the vmalloc'ed object
|
|
|
+ * (see free_thread_stack()).
|
|
|
+ */
|
|
|
+static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+ struct kmemleak_object *object;
|
|
|
+
|
|
|
+ object = find_and_get_object(ptr, 0);
|
|
|
+ if (!object) {
|
|
|
+ kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
|
|
|
+ ptr);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ spin_lock_irqsave(&object->lock, flags);
|
|
|
+ object->excess_ref = excess_ref;
|
|
|
+ spin_unlock_irqrestore(&object->lock, flags);
|
|
|
+ put_object(object);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Set the OBJECT_NO_SCAN flag for the object corresponding to the give
|
|
|
* pointer. Such object will not be scanned by kmemleak but references to it
|
|
|
@@ -908,7 +939,7 @@ static void early_alloc_percpu(struct early_log *log)
|
|
|
* @gfp: kmalloc() flags used for kmemleak internal memory allocations
|
|
|
*
|
|
|
* This function is called from the kernel allocators when a new object
|
|
|
- * (memory block) is allocated (kmem_cache_alloc, kmalloc, vmalloc etc.).
|
|
|
+ * (memory block) is allocated (kmem_cache_alloc, kmalloc etc.).
|
|
|
*/
|
|
|
void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
|
|
|
gfp_t gfp)
|
|
|
@@ -951,6 +982,36 @@ void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
|
|
|
|
|
|
+/**
|
|
|
+ * kmemleak_vmalloc - register a newly vmalloc'ed object
|
|
|
+ * @area: pointer to vm_struct
|
|
|
+ * @size: size of the object
|
|
|
+ * @gfp: __vmalloc() flags used for kmemleak internal memory allocations
|
|
|
+ *
|
|
|
+ * This function is called from the vmalloc() kernel allocator when a new
|
|
|
+ * object (memory block) is allocated.
|
|
|
+ */
|
|
|
+void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
|
|
|
+{
|
|
|
+ pr_debug("%s(0x%p, %zu)\n", __func__, area, size);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * A min_count = 2 is needed because vm_struct contains a reference to
|
|
|
+ * the virtual address of the vmalloc'ed block.
|
|
|
+ */
|
|
|
+ if (kmemleak_enabled) {
|
|
|
+ create_object((unsigned long)area->addr, size, 2, gfp);
|
|
|
+ object_set_excess_ref((unsigned long)area,
|
|
|
+ (unsigned long)area->addr);
|
|
|
+ } else if (kmemleak_early_log) {
|
|
|
+ log_early(KMEMLEAK_ALLOC, area->addr, size, 2);
|
|
|
+ /* reusing early_log.size for storing area->addr */
|
|
|
+ log_early(KMEMLEAK_SET_EXCESS_REF,
|
|
|
+ area, (unsigned long)area->addr, 0);
|
|
|
+ }
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
|
|
|
+
|
|
|
/**
|
|
|
* kmemleak_free - unregister a previously registered object
|
|
|
* @ptr: pointer to beginning of the object
|
|
|
@@ -1248,6 +1309,7 @@ static void scan_block(void *_start, void *_end,
|
|
|
for (ptr = start; ptr < end; ptr++) {
|
|
|
struct kmemleak_object *object;
|
|
|
unsigned long pointer;
|
|
|
+ unsigned long excess_ref;
|
|
|
|
|
|
if (scan_should_stop())
|
|
|
break;
|
|
|
@@ -1283,8 +1345,27 @@ static void scan_block(void *_start, void *_end,
|
|
|
* enclosed by scan_mutex.
|
|
|
*/
|
|
|
spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
|
|
|
- update_refs(object);
|
|
|
+ /* only pass surplus references (object already gray) */
|
|
|
+ if (color_gray(object)) {
|
|
|
+ excess_ref = object->excess_ref;
|
|
|
+ /* no need for update_refs() if object already gray */
|
|
|
+ } else {
|
|
|
+ excess_ref = 0;
|
|
|
+ update_refs(object);
|
|
|
+ }
|
|
|
spin_unlock(&object->lock);
|
|
|
+
|
|
|
+ if (excess_ref) {
|
|
|
+ object = lookup_object(excess_ref, 0);
|
|
|
+ if (!object)
|
|
|
+ continue;
|
|
|
+ if (object == scanned)
|
|
|
+ /* circular reference, ignore */
|
|
|
+ continue;
|
|
|
+ spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
|
|
|
+ update_refs(object);
|
|
|
+ spin_unlock(&object->lock);
|
|
|
+ }
|
|
|
}
|
|
|
read_unlock_irqrestore(&kmemleak_lock, flags);
|
|
|
}
|
|
|
@@ -1987,6 +2068,10 @@ void __init kmemleak_init(void)
|
|
|
case KMEMLEAK_NO_SCAN:
|
|
|
kmemleak_no_scan(log->ptr);
|
|
|
break;
|
|
|
+ case KMEMLEAK_SET_EXCESS_REF:
|
|
|
+ object_set_excess_ref((unsigned long)log->ptr,
|
|
|
+ log->excess_ref);
|
|
|
+ break;
|
|
|
default:
|
|
|
kmemleak_warn("Unknown early log operation: %d\n",
|
|
|
log->op_type);
|