|
@@ -513,12 +513,11 @@ static inline void init_hrtick(void)
|
|
* might also involve a cross-CPU call to trigger the scheduler on
|
|
* might also involve a cross-CPU call to trigger the scheduler on
|
|
* the target CPU.
|
|
* the target CPU.
|
|
*/
|
|
*/
|
|
-#ifdef CONFIG_SMP
|
|
|
|
void resched_task(struct task_struct *p)
|
|
void resched_task(struct task_struct *p)
|
|
{
|
|
{
|
|
int cpu;
|
|
int cpu;
|
|
|
|
|
|
- assert_raw_spin_locked(&task_rq(p)->lock);
|
|
|
|
|
|
+ lockdep_assert_held(&task_rq(p)->lock);
|
|
|
|
|
|
if (test_tsk_need_resched(p))
|
|
if (test_tsk_need_resched(p))
|
|
return;
|
|
return;
|
|
@@ -546,6 +545,7 @@ void resched_cpu(int cpu)
|
|
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
|
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+#ifdef CONFIG_SMP
|
|
#ifdef CONFIG_NO_HZ_COMMON
|
|
#ifdef CONFIG_NO_HZ_COMMON
|
|
/*
|
|
/*
|
|
* In the semi idle case, use the nearest busy cpu for migrating timers
|
|
* In the semi idle case, use the nearest busy cpu for migrating timers
|
|
@@ -693,12 +693,6 @@ void sched_avg_update(struct rq *rq)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
-#else /* !CONFIG_SMP */
|
|
|
|
-void resched_task(struct task_struct *p)
|
|
|
|
-{
|
|
|
|
- assert_raw_spin_locked(&task_rq(p)->lock);
|
|
|
|
- set_tsk_need_resched(p);
|
|
|
|
-}
|
|
|
|
#endif /* CONFIG_SMP */
|
|
#endif /* CONFIG_SMP */
|
|
|
|
|
|
#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
|
|
#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
|