|
@@ -4679,20 +4679,20 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
|
|
|
*/
|
|
|
static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
|
|
|
{
|
|
|
- struct rq *rq_dest, *rq_src;
|
|
|
+ struct rq *rq;
|
|
|
int ret = 0;
|
|
|
|
|
|
if (unlikely(!cpu_active(dest_cpu)))
|
|
|
return ret;
|
|
|
|
|
|
- rq_src = cpu_rq(src_cpu);
|
|
|
- rq_dest = cpu_rq(dest_cpu);
|
|
|
+ rq = cpu_rq(src_cpu);
|
|
|
|
|
|
raw_spin_lock(&p->pi_lock);
|
|
|
- double_rq_lock(rq_src, rq_dest);
|
|
|
+ raw_spin_lock(&rq->lock);
|
|
|
/* Already moved. */
|
|
|
if (task_cpu(p) != src_cpu)
|
|
|
goto done;
|
|
|
+
|
|
|
/* Affinity changed (again). */
|
|
|
if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
|
|
|
goto fail;
|
|
@@ -4702,15 +4702,22 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
|
|
|
* placed properly.
|
|
|
*/
|
|
|
if (task_on_rq_queued(p)) {
|
|
|
- dequeue_task(rq_src, p, 0);
|
|
|
+ dequeue_task(rq, p, 0);
|
|
|
+ p->on_rq = TASK_ON_RQ_MIGRATING;
|
|
|
set_task_cpu(p, dest_cpu);
|
|
|
- enqueue_task(rq_dest, p, 0);
|
|
|
- check_preempt_curr(rq_dest, p, 0);
|
|
|
+ raw_spin_unlock(&rq->lock);
|
|
|
+
|
|
|
+ rq = cpu_rq(dest_cpu);
|
|
|
+ raw_spin_lock(&rq->lock);
|
|
|
+ BUG_ON(task_rq(p) != rq);
|
|
|
+ p->on_rq = TASK_ON_RQ_QUEUED;
|
|
|
+ enqueue_task(rq, p, 0);
|
|
|
+ check_preempt_curr(rq, p, 0);
|
|
|
}
|
|
|
done:
|
|
|
ret = 1;
|
|
|
fail:
|
|
|
- double_rq_unlock(rq_src, rq_dest);
|
|
|
+ raw_spin_unlock(&rq->lock);
|
|
|
raw_spin_unlock(&p->pi_lock);
|
|
|
return ret;
|
|
|
}
|