|
@@ -119,7 +119,9 @@ void update_rq_clock(struct rq *rq)
|
|
|
{
|
|
|
s64 delta;
|
|
|
|
|
|
- if (rq->skip_clock_update > 0)
|
|
|
+ lockdep_assert_held(&rq->lock);
|
|
|
+
|
|
|
+ if (rq->clock_skip_update & RQCF_ACT_SKIP)
|
|
|
return;
|
|
|
|
|
|
delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
|
|
@@ -1046,7 +1048,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
|
|
|
* this case, we can save a useless back to back clock update.
|
|
|
*/
|
|
|
if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
|
|
|
- rq->skip_clock_update = 1;
|
|
|
+ rq_clock_skip_update(rq, true);
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
@@ -2779,6 +2781,8 @@ need_resched:
|
|
|
smp_mb__before_spinlock();
|
|
|
raw_spin_lock_irq(&rq->lock);
|
|
|
|
|
|
+ rq->clock_skip_update <<= 1; /* promote REQ to ACT */
|
|
|
+
|
|
|
switch_count = &prev->nivcsw;
|
|
|
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
|
|
|
if (unlikely(signal_pending_state(prev->state, prev))) {
|
|
@@ -2803,13 +2807,13 @@ need_resched:
|
|
|
switch_count = &prev->nvcsw;
|
|
|
}
|
|
|
|
|
|
- if (task_on_rq_queued(prev) || rq->skip_clock_update < 0)
|
|
|
+ if (task_on_rq_queued(prev))
|
|
|
update_rq_clock(rq);
|
|
|
|
|
|
next = pick_next_task(rq, prev);
|
|
|
clear_tsk_need_resched(prev);
|
|
|
clear_preempt_need_resched();
|
|
|
- rq->skip_clock_update = 0;
|
|
|
+ rq->clock_skip_update = 0;
|
|
|
|
|
|
if (likely(prev != next)) {
|
|
|
rq->nr_switches++;
|