|
@@ -975,7 +975,6 @@ static inline int task_on_rq_migrating(struct task_struct *p)
|
|
|
# define finish_arch_post_lock_switch() do { } while (0)
|
|
|
#endif
|
|
|
|
|
|
-#ifndef __ARCH_WANT_UNLOCKED_CTXSW
|
|
|
static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
|
|
|
{
|
|
|
#ifdef CONFIG_SMP
|
|
@@ -1013,35 +1012,6 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
|
|
|
raw_spin_unlock_irq(&rq->lock);
|
|
|
}
|
|
|
|
|
|
-#else /* __ARCH_WANT_UNLOCKED_CTXSW */
|
|
|
-static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
|
|
|
-{
|
|
|
-#ifdef CONFIG_SMP
|
|
|
- /*
|
|
|
- * We can optimise this out completely for !SMP, because the
|
|
|
- * SMP rebalancing from interrupt is the only thing that cares
|
|
|
- * here.
|
|
|
- */
|
|
|
- next->on_cpu = 1;
|
|
|
-#endif
|
|
|
- raw_spin_unlock(&rq->lock);
|
|
|
-}
|
|
|
-
|
|
|
-static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
|
|
|
-{
|
|
|
-#ifdef CONFIG_SMP
|
|
|
- /*
|
|
|
- * After ->on_cpu is cleared, the task can be moved to a different CPU.
|
|
|
- * We must ensure this doesn't happen until the switch is completely
|
|
|
- * finished.
|
|
|
- */
|
|
|
- smp_wmb();
|
|
|
- prev->on_cpu = 0;
|
|
|
-#endif
|
|
|
- local_irq_enable();
|
|
|
-}
|
|
|
-#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
|
|
|
-
|
|
|
/*
|
|
|
* wake flags
|
|
|
*/
|