|
@@ -333,9 +333,12 @@ static inline struct rq *__task_rq_lock(struct task_struct *p)
|
|
|
for (;;) {
|
|
|
rq = task_rq(p);
|
|
|
raw_spin_lock(&rq->lock);
|
|
|
- if (likely(rq == task_rq(p)))
|
|
|
+ if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
|
|
|
return rq;
|
|
|
raw_spin_unlock(&rq->lock);
|
|
|
+
|
|
|
+ while (unlikely(task_on_rq_migrating(p)))
|
|
|
+ cpu_relax();
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -352,10 +355,13 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
|
|
|
raw_spin_lock_irqsave(&p->pi_lock, *flags);
|
|
|
rq = task_rq(p);
|
|
|
raw_spin_lock(&rq->lock);
|
|
|
- if (likely(rq == task_rq(p)))
|
|
|
+ if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
|
|
|
return rq;
|
|
|
raw_spin_unlock(&rq->lock);
|
|
|
raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
|
|
|
+
|
|
|
+ while (unlikely(task_on_rq_migrating(p)))
|
|
|
+ cpu_relax();
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -1678,7 +1684,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
|
|
|
success = 1; /* we're going to change ->state */
|
|
|
cpu = task_cpu(p);
|
|
|
|
|
|
- if (task_on_rq_queued(p) && ttwu_remote(p, wake_flags))
|
|
|
+ if (p->on_rq && ttwu_remote(p, wake_flags))
|
|
|
goto stat;
|
|
|
|
|
|
#ifdef CONFIG_SMP
|