|
@@ -266,7 +266,7 @@ lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
|
|
/*
|
|
/*
|
|
* Divide a ktime value by a nanosecond value
|
|
* Divide a ktime value by a nanosecond value
|
|
*/
|
|
*/
|
|
-u64 ktime_divns(const ktime_t kt, s64 div)
|
|
|
|
|
|
+u64 __ktime_divns(const ktime_t kt, s64 div)
|
|
{
|
|
{
|
|
u64 dclc;
|
|
u64 dclc;
|
|
int sft = 0;
|
|
int sft = 0;
|
|
@@ -282,7 +282,7 @@ u64 ktime_divns(const ktime_t kt, s64 div)
|
|
|
|
|
|
return dclc;
|
|
return dclc;
|
|
}
|
|
}
|
|
-EXPORT_SYMBOL_GPL(ktime_divns);
|
|
|
|
|
|
+EXPORT_SYMBOL_GPL(__ktime_divns);
|
|
#endif /* BITS_PER_LONG >= 64 */
|
|
#endif /* BITS_PER_LONG >= 64 */
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -440,6 +440,37 @@ static inline void debug_deactivate(struct hrtimer *timer)
|
|
trace_hrtimer_cancel(timer);
|
|
trace_hrtimer_cancel(timer);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+#if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
|
|
|
|
+static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
|
|
|
|
+{
|
|
|
|
+ struct hrtimer_clock_base *base = cpu_base->clock_base;
|
|
|
|
+ ktime_t expires, expires_next = { .tv64 = KTIME_MAX };
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
|
|
|
|
+ struct timerqueue_node *next;
|
|
|
|
+ struct hrtimer *timer;
|
|
|
|
+
|
|
|
|
+ next = timerqueue_getnext(&base->active);
|
|
|
|
+ if (!next)
|
|
|
|
+ continue;
|
|
|
|
+
|
|
|
|
+ timer = container_of(next, struct hrtimer, node);
|
|
|
|
+ expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
|
|
|
|
+ if (expires.tv64 < expires_next.tv64)
|
|
|
|
+ expires_next = expires;
|
|
|
|
+ }
|
|
|
|
+ /*
|
|
|
|
+ * clock_was_set() might have changed base->offset of any of
|
|
|
|
+ * the clock bases so the result might be negative. Fix it up
|
|
|
|
+ * to prevent a false positive in clockevents_program_event().
|
|
|
|
+ */
|
|
|
|
+ if (expires_next.tv64 < 0)
|
|
|
|
+ expires_next.tv64 = 0;
|
|
|
|
+ return expires_next;
|
|
|
|
+}
|
|
|
|
+#endif
|
|
|
|
+
|
|
/* High resolution timer related functions */
|
|
/* High resolution timer related functions */
|
|
#ifdef CONFIG_HIGH_RES_TIMERS
|
|
#ifdef CONFIG_HIGH_RES_TIMERS
|
|
|
|
|
|
@@ -488,32 +519,7 @@ static inline int hrtimer_hres_active(void)
|
|
static void
|
|
static void
|
|
hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
|
|
hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
|
|
{
|
|
{
|
|
- int i;
|
|
|
|
- struct hrtimer_clock_base *base = cpu_base->clock_base;
|
|
|
|
- ktime_t expires, expires_next;
|
|
|
|
-
|
|
|
|
- expires_next.tv64 = KTIME_MAX;
|
|
|
|
-
|
|
|
|
- for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
|
|
|
|
- struct hrtimer *timer;
|
|
|
|
- struct timerqueue_node *next;
|
|
|
|
-
|
|
|
|
- next = timerqueue_getnext(&base->active);
|
|
|
|
- if (!next)
|
|
|
|
- continue;
|
|
|
|
- timer = container_of(next, struct hrtimer, node);
|
|
|
|
-
|
|
|
|
- expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
|
|
|
|
- /*
|
|
|
|
- * clock_was_set() has changed base->offset so the
|
|
|
|
- * result might be negative. Fix it up to prevent a
|
|
|
|
- * false positive in clockevents_program_event()
|
|
|
|
- */
|
|
|
|
- if (expires.tv64 < 0)
|
|
|
|
- expires.tv64 = 0;
|
|
|
|
- if (expires.tv64 < expires_next.tv64)
|
|
|
|
- expires_next = expires;
|
|
|
|
- }
|
|
|
|
|
|
+ ktime_t expires_next = __hrtimer_get_next_event(cpu_base);
|
|
|
|
|
|
if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64)
|
|
if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64)
|
|
return;
|
|
return;
|
|
@@ -586,6 +592,15 @@ static int hrtimer_reprogram(struct hrtimer *timer,
|
|
if (expires.tv64 >= cpu_base->expires_next.tv64)
|
|
if (expires.tv64 >= cpu_base->expires_next.tv64)
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * When the target cpu of the timer is currently executing
|
|
|
|
+ * hrtimer_interrupt(), then we do not touch the clock event
|
|
|
|
+ * device. hrtimer_interrupt() will reevaluate all clock bases
|
|
|
|
+ * before reprogramming the device.
|
|
|
|
+ */
|
|
|
|
+ if (cpu_base->in_hrtirq)
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* If a hang was detected in the last timer interrupt then we
|
|
* If a hang was detected in the last timer interrupt then we
|
|
* do not schedule a timer which is earlier than the expiry
|
|
* do not schedule a timer which is earlier than the expiry
|
|
@@ -1104,29 +1119,14 @@ EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
|
|
ktime_t hrtimer_get_next_event(void)
|
|
ktime_t hrtimer_get_next_event(void)
|
|
{
|
|
{
|
|
struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
|
|
struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
|
|
- struct hrtimer_clock_base *base = cpu_base->clock_base;
|
|
|
|
- ktime_t delta, mindelta = { .tv64 = KTIME_MAX };
|
|
|
|
|
|
+ ktime_t mindelta = { .tv64 = KTIME_MAX };
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
- int i;
|
|
|
|
|
|
|
|
raw_spin_lock_irqsave(&cpu_base->lock, flags);
|
|
raw_spin_lock_irqsave(&cpu_base->lock, flags);
|
|
|
|
|
|
- if (!hrtimer_hres_active()) {
|
|
|
|
- for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
|
|
|
|
- struct hrtimer *timer;
|
|
|
|
- struct timerqueue_node *next;
|
|
|
|
-
|
|
|
|
- next = timerqueue_getnext(&base->active);
|
|
|
|
- if (!next)
|
|
|
|
- continue;
|
|
|
|
-
|
|
|
|
- timer = container_of(next, struct hrtimer, node);
|
|
|
|
- delta.tv64 = hrtimer_get_expires_tv64(timer);
|
|
|
|
- delta = ktime_sub(delta, base->get_time());
|
|
|
|
- if (delta.tv64 < mindelta.tv64)
|
|
|
|
- mindelta.tv64 = delta.tv64;
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
|
|
+ if (!hrtimer_hres_active())
|
|
|
|
+ mindelta = ktime_sub(__hrtimer_get_next_event(cpu_base),
|
|
|
|
+ ktime_get());
|
|
|
|
|
|
raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
|
|
raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
|
|
|
|
|
|
@@ -1253,7 +1253,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
|
|
raw_spin_lock(&cpu_base->lock);
|
|
raw_spin_lock(&cpu_base->lock);
|
|
entry_time = now = hrtimer_update_base(cpu_base);
|
|
entry_time = now = hrtimer_update_base(cpu_base);
|
|
retry:
|
|
retry:
|
|
- expires_next.tv64 = KTIME_MAX;
|
|
|
|
|
|
+ cpu_base->in_hrtirq = 1;
|
|
/*
|
|
/*
|
|
* We set expires_next to KTIME_MAX here with cpu_base->lock
|
|
* We set expires_next to KTIME_MAX here with cpu_base->lock
|
|
* held to prevent that a timer is enqueued in our queue via
|
|
* held to prevent that a timer is enqueued in our queue via
|
|
@@ -1291,28 +1291,20 @@ retry:
|
|
* are right-of a not yet expired timer, because that
|
|
* are right-of a not yet expired timer, because that
|
|
* timer will have to trigger a wakeup anyway.
|
|
* timer will have to trigger a wakeup anyway.
|
|
*/
|
|
*/
|
|
-
|
|
|
|
- if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) {
|
|
|
|
- ktime_t expires;
|
|
|
|
-
|
|
|
|
- expires = ktime_sub(hrtimer_get_expires(timer),
|
|
|
|
- base->offset);
|
|
|
|
- if (expires.tv64 < 0)
|
|
|
|
- expires.tv64 = KTIME_MAX;
|
|
|
|
- if (expires.tv64 < expires_next.tv64)
|
|
|
|
- expires_next = expires;
|
|
|
|
|
|
+ if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer))
|
|
break;
|
|
break;
|
|
- }
|
|
|
|
|
|
|
|
__run_hrtimer(timer, &basenow);
|
|
__run_hrtimer(timer, &basenow);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
-
|
|
|
|
|
|
+ /* Reevaluate the clock bases for the next expiry */
|
|
|
|
+ expires_next = __hrtimer_get_next_event(cpu_base);
|
|
/*
|
|
/*
|
|
* Store the new expiry value so the migration code can verify
|
|
* Store the new expiry value so the migration code can verify
|
|
* against it.
|
|
* against it.
|
|
*/
|
|
*/
|
|
cpu_base->expires_next = expires_next;
|
|
cpu_base->expires_next = expires_next;
|
|
|
|
+ cpu_base->in_hrtirq = 0;
|
|
raw_spin_unlock(&cpu_base->lock);
|
|
raw_spin_unlock(&cpu_base->lock);
|
|
|
|
|
|
/* Reprogramming necessary ? */
|
|
/* Reprogramming necessary ? */
|