Browse Source

sched/core: Enable might_sleep() and smp_processor_id() checks early

might_sleep() and smp_processor_id() checks are enabled after the boot
process is done. That hides bugs in the SMP bringup and driver
initialization code.

Enable it right when the scheduler starts working, i.e. when init task and
kthreadd have been created and right before the idle task enables
preemption.

Tested-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Mark Rutland <mark.rutland@arm.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Link: http://lkml.kernel.org/r/20170516184736.272225698@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Thomas Gleixner 8 years ago
parent
commit
1c3c5eab17
3 changed files with 14 additions and 2 deletions
  1. 10 0
      init/main.c
  2. 3 1
      kernel/sched/core.c
  3. 1 1
      lib/smp_processor_id.c

+ 10 - 0
init/main.c

@@ -414,6 +414,16 @@ static noinline void __ref rest_init(void)
 	rcu_read_lock();
 	rcu_read_lock();
 	kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns);
 	kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns);
 	rcu_read_unlock();
 	rcu_read_unlock();
+
+	/*
+	 * Enable might_sleep() and smp_processor_id() checks.
+	 * They cannot be enabled earlier because with CONFIG_PRREMPT=y
+	 * kernel_thread() would trigger might_sleep() splats. With
+	 * CONFIG_PREEMPT_VOLUNTARY=y the init task might have scheduled
+	 * already, but it's stuck on the kthreadd_done completion.
+	 */
+	system_state = SYSTEM_SCHEDULING;
+
 	complete(&kthreadd_done);
 	complete(&kthreadd_done);
 
 
 	/*
 	/*

+ 3 - 1
kernel/sched/core.c

@@ -6238,8 +6238,10 @@ void ___might_sleep(const char *file, int line, int preempt_offset)
 
 
 	if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
 	if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
 	     !is_idle_task(current)) ||
 	     !is_idle_task(current)) ||
-	    system_state != SYSTEM_RUNNING || oops_in_progress)
+	    system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
+	    oops_in_progress)
 		return;
 		return;
+
 	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
 	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
 		return;
 		return;
 	prev_jiffy = jiffies;
 	prev_jiffy = jiffies;

+ 1 - 1
lib/smp_processor_id.c

@@ -28,7 +28,7 @@ notrace static unsigned int check_preemption_disabled(const char *what1,
 	/*
 	/*
 	 * It is valid to assume CPU-locality during early bootup:
 	 * It is valid to assume CPU-locality during early bootup:
 	 */
 	 */
-	if (system_state != SYSTEM_RUNNING)
+	if (system_state < SYSTEM_SCHEDULING)
 		goto out;
 		goto out;
 
 
 	/*
 	/*