|
@@ -650,6 +650,11 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
|
|
|
ts->next_tick = 0;
|
|
|
}
|
|
|
|
|
|
+static inline bool local_timer_softirq_pending(void)
|
|
|
+{
|
|
|
+ return local_softirq_pending() & TIMER_SOFTIRQ;
|
|
|
+}
|
|
|
+
|
|
|
static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
|
|
|
ktime_t now, int cpu)
|
|
|
{
|
|
@@ -666,8 +671,18 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
|
|
|
} while (read_seqretry(&jiffies_lock, seq));
|
|
|
ts->last_jiffies = basejiff;
|
|
|
|
|
|
- if (rcu_needs_cpu(basemono, &next_rcu) ||
|
|
|
- arch_needs_cpu() || irq_work_needs_cpu()) {
|
|
|
+ /*
|
|
|
+ * Keep the periodic tick, when RCU, architecture or irq_work
|
|
|
+ * requests it.
|
|
|
+ * Aside of that check whether the local timer softirq is
|
|
|
+ * pending. If so its a bad idea to call get_next_timer_interrupt()
|
|
|
+ * because there is an already expired timer, so it will request
|
|
|
+ * immeditate expiry, which rearms the hardware timer with a
|
|
|
+ * minimal delta which brings us back to this place
|
|
|
+ * immediately. Lather, rinse and repeat...
|
|
|
+ */
|
|
|
+ if (rcu_needs_cpu(basemono, &next_rcu) || arch_needs_cpu() ||
|
|
|
+ irq_work_needs_cpu() || local_timer_softirq_pending()) {
|
|
|
next_tick = basemono + TICK_NSEC;
|
|
|
} else {
|
|
|
/*
|