|
@@ -1070,7 +1070,7 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
|
|
|
p->sched_class->set_cpus_allowed(p, new_mask);
|
|
p->sched_class->set_cpus_allowed(p, new_mask);
|
|
|
|
|
|
|
|
if (queued)
|
|
if (queued)
|
|
|
- enqueue_task(rq, p, ENQUEUE_RESTORE);
|
|
|
|
|
|
|
+ enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
|
|
|
if (running)
|
|
if (running)
|
|
|
set_curr_task(rq, p);
|
|
set_curr_task(rq, p);
|
|
|
}
|
|
}
|
|
@@ -3815,7 +3815,7 @@ void set_user_nice(struct task_struct *p, long nice)
|
|
|
delta = p->prio - old_prio;
|
|
delta = p->prio - old_prio;
|
|
|
|
|
|
|
|
if (queued) {
|
|
if (queued) {
|
|
|
- enqueue_task(rq, p, ENQUEUE_RESTORE);
|
|
|
|
|
|
|
+ enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
|
|
|
/*
|
|
/*
|
|
|
* If the task increased its priority or is running and
|
|
* If the task increased its priority or is running and
|
|
|
* lowered its priority, then reschedule its CPU:
|
|
* lowered its priority, then reschedule its CPU:
|
|
@@ -5517,7 +5517,7 @@ void sched_setnuma(struct task_struct *p, int nid)
|
|
|
p->numa_preferred_nid = nid;
|
|
p->numa_preferred_nid = nid;
|
|
|
|
|
|
|
|
if (queued)
|
|
if (queued)
|
|
|
- enqueue_task(rq, p, ENQUEUE_RESTORE);
|
|
|
|
|
|
|
+ enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
|
|
|
if (running)
|
|
if (running)
|
|
|
set_curr_task(rq, p);
|
|
set_curr_task(rq, p);
|
|
|
task_rq_unlock(rq, p, &rf);
|
|
task_rq_unlock(rq, p, &rf);
|
|
@@ -6431,7 +6431,7 @@ void sched_move_task(struct task_struct *tsk)
|
|
|
sched_change_group(tsk, TASK_MOVE_GROUP);
|
|
sched_change_group(tsk, TASK_MOVE_GROUP);
|
|
|
|
|
|
|
|
if (queued)
|
|
if (queued)
|
|
|
- enqueue_task(rq, tsk, ENQUEUE_RESTORE | ENQUEUE_MOVE);
|
|
|
|
|
|
|
+ enqueue_task(rq, tsk, ENQUEUE_RESTORE | ENQUEUE_MOVE | ENQUEUE_NOCLOCK);
|
|
|
if (running)
|
|
if (running)
|
|
|
set_curr_task(rq, tsk);
|
|
set_curr_task(rq, tsk);
|
|
|
|
|
|