|
@@ -2949,12 +2949,8 @@ static inline void schedule_debug(struct task_struct *prev)
|
|
|
#ifdef CONFIG_SCHED_STACK_END_CHECK
|
|
|
BUG_ON(unlikely(task_stack_end_corrupted(prev)));
|
|
|
#endif
|
|
|
- /*
|
|
|
- * Test if we are atomic. Since do_exit() needs to call into
|
|
|
- * schedule() atomically, we ignore that path. Otherwise whine
|
|
|
- * if we are scheduling when we should not.
|
|
|
- */
|
|
|
- if (unlikely(in_atomic_preempt_off() && prev->state != TASK_DEAD))
|
|
|
+
|
|
|
+ if (unlikely(in_atomic_preempt_off()))
|
|
|
__schedule_bug(prev);
|
|
|
rcu_sleep_check();
|
|
|
|
|
@@ -3053,6 +3049,17 @@ static void __sched __schedule(void)
|
|
|
rcu_note_context_switch();
|
|
|
prev = rq->curr;
|
|
|
|
|
|
+ /*
|
|
|
+ * do_exit() calls schedule() with preemption disabled as an exception;
|
|
|
+ * however we must fix that up, otherwise the next task will see an
|
|
|
+ * inconsistent (higher) preempt count.
|
|
|
+ *
|
|
|
+ * It also avoids the below schedule_debug() test from complaining
|
|
|
+ * about this.
|
|
|
+ */
|
|
|
+ if (unlikely(prev->state == TASK_DEAD))
|
|
|
+ preempt_enable_no_resched_notrace();
|
|
|
+
|
|
|
schedule_debug(prev);
|
|
|
|
|
|
if (sched_feat(HRTICK))
|