浏览代码

kmemleak: Remove the reported leaks number limitation

Since the leaks are no longer printed to the syslog, there is no point
in keeping this limitation. All the suspected leaks are shown on
/sys/kernel/debug/kmemleak file.

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Catalin Marinas 16 年之前
父节点
当前提交
288c857d66
共有 1 个文件被更改,包括 2 次插入15 次删除
  1. 2 15
      mm/kmemleak.c

+ 2 - 15
mm/kmemleak.c

@@ -103,7 +103,6 @@
  * Kmemleak configuration and common defines.
  * Kmemleak configuration and common defines.
  */
  */
 #define MAX_TRACE		16	/* stack trace length */
 #define MAX_TRACE		16	/* stack trace length */
-#define REPORTS_NR		50	/* maximum number of reported leaks */
 #define MSECS_MIN_AGE		5000	/* minimum object age for reporting */
 #define MSECS_MIN_AGE		5000	/* minimum object age for reporting */
 #define SECS_FIRST_SCAN		60	/* delay before the first scan */
 #define SECS_FIRST_SCAN		60	/* delay before the first scan */
 #define SECS_SCAN_WAIT		600	/* subsequent auto scanning delay */
 #define SECS_SCAN_WAIT		600	/* subsequent auto scanning delay */
@@ -196,9 +195,6 @@ static int kmemleak_stack_scan = 1;
 /* protects the memory scanning, parameters and debug/kmemleak file access */
 /* protects the memory scanning, parameters and debug/kmemleak file access */
 static DEFINE_MUTEX(scan_mutex);
 static DEFINE_MUTEX(scan_mutex);
 
 
-/* number of leaks reported (for limitation purposes) */
-static int reported_leaks;
-
 /*
 /*
  * Early object allocation/freeing logging. Kmemleak is initialized after the
  * Early object allocation/freeing logging. Kmemleak is initialized after the
  * kernel allocator. However, both the kernel allocator and kmemleak may
  * kernel allocator. However, both the kernel allocator and kmemleak may
@@ -1106,11 +1102,6 @@ static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
 	struct kmemleak_object *object;
 	struct kmemleak_object *object;
 	loff_t n = *pos;
 	loff_t n = *pos;
 
 
-	if (!n)
-		reported_leaks = 0;
-	if (reported_leaks >= REPORTS_NR)
-		return NULL;
-
 	rcu_read_lock();
 	rcu_read_lock();
 	list_for_each_entry_rcu(object, &object_list, object_list) {
 	list_for_each_entry_rcu(object, &object_list, object_list) {
 		if (n-- > 0)
 		if (n-- > 0)
@@ -1135,8 +1126,6 @@ static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 	struct list_head *n = &prev_obj->object_list;
 	struct list_head *n = &prev_obj->object_list;
 
 
 	++(*pos);
 	++(*pos);
-	if (reported_leaks >= REPORTS_NR)
-		goto out;
 
 
 	rcu_read_lock();
 	rcu_read_lock();
 	list_for_each_continue_rcu(n, &object_list) {
 	list_for_each_continue_rcu(n, &object_list) {
@@ -1145,7 +1134,7 @@ static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 			break;
 			break;
 	}
 	}
 	rcu_read_unlock();
 	rcu_read_unlock();
-out:
+
 	put_object(prev_obj);
 	put_object(prev_obj);
 	return next_obj;
 	return next_obj;
 }
 }
@@ -1168,10 +1157,8 @@ static int kmemleak_seq_show(struct seq_file *seq, void *v)
 	unsigned long flags;
 	unsigned long flags;
 
 
 	spin_lock_irqsave(&object->lock, flags);
 	spin_lock_irqsave(&object->lock, flags);
-	if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) {
+	if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
 		print_unreferenced(seq, object);
 		print_unreferenced(seq, object);
-		reported_leaks++;
-	}
 	spin_unlock_irqrestore(&object->lock, flags);
 	spin_unlock_irqrestore(&object->lock, flags);
 	return 0;
 	return 0;
 }
 }