|
@@ -641,17 +641,18 @@ static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time);
|
|
|
|
|
|
inline void update_rq_clock(struct rq *rq)
|
|
inline void update_rq_clock(struct rq *rq)
|
|
{
|
|
{
|
|
- if (!rq->skip_clock_update) {
|
|
|
|
- int cpu = cpu_of(rq);
|
|
|
|
- u64 irq_time;
|
|
|
|
|
|
+ int cpu = cpu_of(rq);
|
|
|
|
+ u64 irq_time;
|
|
|
|
|
|
- rq->clock = sched_clock_cpu(cpu);
|
|
|
|
- irq_time = irq_time_cpu(cpu);
|
|
|
|
- if (rq->clock - irq_time > rq->clock_task)
|
|
|
|
- rq->clock_task = rq->clock - irq_time;
|
|
|
|
|
|
+ if (rq->skip_clock_update)
|
|
|
|
+ return;
|
|
|
|
|
|
- sched_irq_time_avg_update(rq, irq_time);
|
|
|
|
- }
|
|
|
|
|
|
+ rq->clock = sched_clock_cpu(cpu);
|
|
|
|
+ irq_time = irq_time_cpu(cpu);
|
|
|
|
+ if (rq->clock - irq_time > rq->clock_task)
|
|
|
|
+ rq->clock_task = rq->clock - irq_time;
|
|
|
|
+
|
|
|
|
+ sched_irq_time_avg_update(rq, irq_time);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -2129,7 +2130,7 @@ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
|
|
* A queue event has occurred, and we're going to schedule. In
|
|
* A queue event has occurred, and we're going to schedule. In
|
|
* this case, we can save a useless back to back clock update.
|
|
* this case, we can save a useless back to back clock update.
|
|
*/
|
|
*/
|
|
- if (test_tsk_need_resched(rq->curr))
|
|
|
|
|
|
+ if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr))
|
|
rq->skip_clock_update = 1;
|
|
rq->skip_clock_update = 1;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -3973,7 +3974,6 @@ static void put_prev_task(struct rq *rq, struct task_struct *prev)
|
|
{
|
|
{
|
|
if (prev->se.on_rq)
|
|
if (prev->se.on_rq)
|
|
update_rq_clock(rq);
|
|
update_rq_clock(rq);
|
|
- rq->skip_clock_update = 0;
|
|
|
|
prev->sched_class->put_prev_task(rq, prev);
|
|
prev->sched_class->put_prev_task(rq, prev);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -4031,7 +4031,6 @@ need_resched_nonpreemptible:
|
|
hrtick_clear(rq);
|
|
hrtick_clear(rq);
|
|
|
|
|
|
raw_spin_lock_irq(&rq->lock);
|
|
raw_spin_lock_irq(&rq->lock);
|
|
- clear_tsk_need_resched(prev);
|
|
|
|
|
|
|
|
switch_count = &prev->nivcsw;
|
|
switch_count = &prev->nivcsw;
|
|
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
|
|
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
|
|
@@ -4063,6 +4062,8 @@ need_resched_nonpreemptible:
|
|
|
|
|
|
put_prev_task(rq, prev);
|
|
put_prev_task(rq, prev);
|
|
next = pick_next_task(rq);
|
|
next = pick_next_task(rq);
|
|
|
|
+ clear_tsk_need_resched(prev);
|
|
|
|
+ rq->skip_clock_update = 0;
|
|
|
|
|
|
if (likely(prev != next)) {
|
|
if (likely(prev != next)) {
|
|
sched_info_switch(prev, next);
|
|
sched_info_switch(prev, next);
|
|
@@ -4071,6 +4072,7 @@ need_resched_nonpreemptible:
|
|
rq->nr_switches++;
|
|
rq->nr_switches++;
|
|
rq->curr = next;
|
|
rq->curr = next;
|
|
++*switch_count;
|
|
++*switch_count;
|
|
|
|
+ WARN_ON_ONCE(test_tsk_need_resched(next));
|
|
|
|
|
|
context_switch(rq, prev, next); /* unlocks the rq */
|
|
context_switch(rq, prev, next); /* unlocks the rq */
|
|
/*
|
|
/*
|