|
@@ -1109,10 +1109,10 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
|
|
|
|
|
|
p->sched_class->set_cpus_allowed(p, new_mask);
|
|
p->sched_class->set_cpus_allowed(p, new_mask);
|
|
|
|
|
|
- if (running)
|
|
|
|
- p->sched_class->set_curr_task(rq);
|
|
|
|
if (queued)
|
|
if (queued)
|
|
enqueue_task(rq, p, ENQUEUE_RESTORE);
|
|
enqueue_task(rq, p, ENQUEUE_RESTORE);
|
|
|
|
+ if (running)
|
|
|
|
+ p->sched_class->set_curr_task(rq);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -3707,10 +3707,10 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
|
|
|
|
|
|
p->prio = prio;
|
|
p->prio = prio;
|
|
|
|
|
|
- if (running)
|
|
|
|
- p->sched_class->set_curr_task(rq);
|
|
|
|
if (queued)
|
|
if (queued)
|
|
enqueue_task(rq, p, queue_flag);
|
|
enqueue_task(rq, p, queue_flag);
|
|
|
|
+ if (running)
|
|
|
|
+ p->sched_class->set_curr_task(rq);
|
|
|
|
|
|
check_class_changed(rq, p, prev_class, oldprio);
|
|
check_class_changed(rq, p, prev_class, oldprio);
|
|
out_unlock:
|
|
out_unlock:
|
|
@@ -4263,8 +4263,6 @@ change:
|
|
prev_class = p->sched_class;
|
|
prev_class = p->sched_class;
|
|
__setscheduler(rq, p, attr, pi);
|
|
__setscheduler(rq, p, attr, pi);
|
|
|
|
|
|
- if (running)
|
|
|
|
- p->sched_class->set_curr_task(rq);
|
|
|
|
if (queued) {
|
|
if (queued) {
|
|
/*
|
|
/*
|
|
* We enqueue to tail when the priority of a task is
|
|
* We enqueue to tail when the priority of a task is
|
|
@@ -4275,6 +4273,8 @@ change:
|
|
|
|
|
|
enqueue_task(rq, p, queue_flags);
|
|
enqueue_task(rq, p, queue_flags);
|
|
}
|
|
}
|
|
|
|
+ if (running)
|
|
|
|
+ p->sched_class->set_curr_task(rq);
|
|
|
|
|
|
check_class_changed(rq, p, prev_class, oldprio);
|
|
check_class_changed(rq, p, prev_class, oldprio);
|
|
preempt_disable(); /* avoid rq from going away on us */
|
|
preempt_disable(); /* avoid rq from going away on us */
|
|
@@ -5439,10 +5439,10 @@ void sched_setnuma(struct task_struct *p, int nid)
|
|
|
|
|
|
p->numa_preferred_nid = nid;
|
|
p->numa_preferred_nid = nid;
|
|
|
|
|
|
- if (running)
|
|
|
|
- p->sched_class->set_curr_task(rq);
|
|
|
|
if (queued)
|
|
if (queued)
|
|
enqueue_task(rq, p, ENQUEUE_RESTORE);
|
|
enqueue_task(rq, p, ENQUEUE_RESTORE);
|
|
|
|
+ if (running)
|
|
|
|
+ p->sched_class->set_curr_task(rq);
|
|
task_rq_unlock(rq, p, &rf);
|
|
task_rq_unlock(rq, p, &rf);
|
|
}
|
|
}
|
|
#endif /* CONFIG_NUMA_BALANCING */
|
|
#endif /* CONFIG_NUMA_BALANCING */
|
|
@@ -7949,10 +7949,10 @@ void sched_move_task(struct task_struct *tsk)
|
|
|
|
|
|
sched_change_group(tsk, TASK_MOVE_GROUP);
|
|
sched_change_group(tsk, TASK_MOVE_GROUP);
|
|
|
|
|
|
- if (unlikely(running))
|
|
|
|
- tsk->sched_class->set_curr_task(rq);
|
|
|
|
if (queued)
|
|
if (queued)
|
|
enqueue_task(rq, tsk, ENQUEUE_RESTORE | ENQUEUE_MOVE);
|
|
enqueue_task(rq, tsk, ENQUEUE_RESTORE | ENQUEUE_MOVE);
|
|
|
|
+ if (unlikely(running))
|
|
|
|
+ tsk->sched_class->set_curr_task(rq);
|
|
|
|
|
|
task_rq_unlock(rq, tsk, &rf);
|
|
task_rq_unlock(rq, tsk, &rf);
|
|
}
|
|
}
|