|
@@ -59,17 +59,15 @@ struct tk_fast {
|
|
|
};
|
|
|
|
|
|
static struct tk_fast tk_fast_mono ____cacheline_aligned;
|
|
|
+static struct tk_fast tk_fast_raw ____cacheline_aligned;
|
|
|
|
|
|
/* flag for if timekeeping is suspended */
|
|
|
int __read_mostly timekeeping_suspended;
|
|
|
|
|
|
-/* Flag for if there is a persistent clock on this platform */
|
|
|
-bool __read_mostly persistent_clock_exist = false;
|
|
|
-
|
|
|
static inline void tk_normalize_xtime(struct timekeeper *tk)
|
|
|
{
|
|
|
- while (tk->tkr.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr.shift)) {
|
|
|
- tk->tkr.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr.shift;
|
|
|
+ while (tk->tkr_mono.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_mono.shift)) {
|
|
|
+ tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
|
|
|
tk->xtime_sec++;
|
|
|
}
|
|
|
}
|
|
@@ -79,20 +77,20 @@ static inline struct timespec64 tk_xtime(struct timekeeper *tk)
|
|
|
struct timespec64 ts;
|
|
|
|
|
|
ts.tv_sec = tk->xtime_sec;
|
|
|
- ts.tv_nsec = (long)(tk->tkr.xtime_nsec >> tk->tkr.shift);
|
|
|
+ ts.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
|
|
|
return ts;
|
|
|
}
|
|
|
|
|
|
static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
|
|
|
{
|
|
|
tk->xtime_sec = ts->tv_sec;
|
|
|
- tk->tkr.xtime_nsec = (u64)ts->tv_nsec << tk->tkr.shift;
|
|
|
+ tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift;
|
|
|
}
|
|
|
|
|
|
static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
|
|
|
{
|
|
|
tk->xtime_sec += ts->tv_sec;
|
|
|
- tk->tkr.xtime_nsec += (u64)ts->tv_nsec << tk->tkr.shift;
|
|
|
+ tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift;
|
|
|
tk_normalize_xtime(tk);
|
|
|
}
|
|
|
|
|
@@ -118,6 +116,117 @@ static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
|
|
|
tk->offs_boot = ktime_add(tk->offs_boot, delta);
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_DEBUG_TIMEKEEPING
|
|
|
+#define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */
|
|
|
+/*
|
|
|
+ * These simple flag variables are managed
|
|
|
+ * without locks, which is racy, but ok since
|
|
|
+ * we don't really care about being super
|
|
|
+ * precise about how many events were seen,
|
|
|
+ * just that a problem was observed.
|
|
|
+ */
|
|
|
+static int timekeeping_underflow_seen;
|
|
|
+static int timekeeping_overflow_seen;
|
|
|
+
|
|
|
+/* last_warning is only modified under the timekeeping lock */
|
|
|
+static long timekeeping_last_warning;
|
|
|
+
|
|
|
+static void timekeeping_check_update(struct timekeeper *tk, cycle_t offset)
|
|
|
+{
|
|
|
+
|
|
|
+ cycle_t max_cycles = tk->tkr_mono.clock->max_cycles;
|
|
|
+ const char *name = tk->tkr_mono.clock->name;
|
|
|
+
|
|
|
+ if (offset > max_cycles) {
|
|
|
+ printk_deferred("WARNING: timekeeping: Cycle offset (%lld) is larger than allowed by the '%s' clock's max_cycles value (%lld): time overflow danger\n",
|
|
|
+ offset, name, max_cycles);
|
|
|
+ printk_deferred(" timekeeping: Your kernel is sick, but tries to cope by capping time updates\n");
|
|
|
+ } else {
|
|
|
+ if (offset > (max_cycles >> 1)) {
|
|
|
+ printk_deferred("INFO: timekeeping: Cycle offset (%lld) is larger than the the '%s' clock's 50%% safety margin (%lld)\n",
|
|
|
+ offset, name, max_cycles >> 1);
|
|
|
+ printk_deferred(" timekeeping: Your kernel is still fine, but is feeling a bit nervous\n");
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (timekeeping_underflow_seen) {
|
|
|
+ if (jiffies - timekeeping_last_warning > WARNING_FREQ) {
|
|
|
+ printk_deferred("WARNING: Underflow in clocksource '%s' observed, time update ignored.\n", name);
|
|
|
+ printk_deferred(" Please report this, consider using a different clocksource, if possible.\n");
|
|
|
+ printk_deferred(" Your kernel is probably still fine.\n");
|
|
|
+ timekeeping_last_warning = jiffies;
|
|
|
+ }
|
|
|
+ timekeeping_underflow_seen = 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (timekeeping_overflow_seen) {
|
|
|
+ if (jiffies - timekeeping_last_warning > WARNING_FREQ) {
|
|
|
+ printk_deferred("WARNING: Overflow in clocksource '%s' observed, time update capped.\n", name);
|
|
|
+ printk_deferred(" Please report this, consider using a different clocksource, if possible.\n");
|
|
|
+ printk_deferred(" Your kernel is probably still fine.\n");
|
|
|
+ timekeeping_last_warning = jiffies;
|
|
|
+ }
|
|
|
+ timekeeping_overflow_seen = 0;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr)
|
|
|
+{
|
|
|
+ cycle_t now, last, mask, max, delta;
|
|
|
+ unsigned int seq;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Since we're called holding a seqlock, the data may shift
|
|
|
+ * under us while we're doing the calculation. This can cause
|
|
|
+ * false positives, since we'd note a problem but throw the
|
|
|
+ * results away. So nest another seqlock here to atomically
|
|
|
+ * grab the points we are checking with.
|
|
|
+ */
|
|
|
+ do {
|
|
|
+ seq = read_seqcount_begin(&tk_core.seq);
|
|
|
+ now = tkr->read(tkr->clock);
|
|
|
+ last = tkr->cycle_last;
|
|
|
+ mask = tkr->mask;
|
|
|
+ max = tkr->clock->max_cycles;
|
|
|
+ } while (read_seqcount_retry(&tk_core.seq, seq));
|
|
|
+
|
|
|
+ delta = clocksource_delta(now, last, mask);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Try to catch underflows by checking if we are seeing small
|
|
|
+ * mask-relative negative values.
|
|
|
+ */
|
|
|
+ if (unlikely((~delta & mask) < (mask >> 3))) {
|
|
|
+ timekeeping_underflow_seen = 1;
|
|
|
+ delta = 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Cap delta value to the max_cycles values to avoid mult overflows */
|
|
|
+ if (unlikely(delta > max)) {
|
|
|
+ timekeeping_overflow_seen = 1;
|
|
|
+ delta = tkr->clock->max_cycles;
|
|
|
+ }
|
|
|
+
|
|
|
+ return delta;
|
|
|
+}
|
|
|
+#else
|
|
|
+static inline void timekeeping_check_update(struct timekeeper *tk, cycle_t offset)
|
|
|
+{
|
|
|
+}
|
|
|
+static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr)
|
|
|
+{
|
|
|
+ cycle_t cycle_now, delta;
|
|
|
+
|
|
|
+ /* read clocksource */
|
|
|
+ cycle_now = tkr->read(tkr->clock);
|
|
|
+
|
|
|
+ /* calculate the delta since the last update_wall_time */
|
|
|
+ delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
|
|
|
+
|
|
|
+ return delta;
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
/**
|
|
|
* tk_setup_internals - Set up internals to use clocksource clock.
|
|
|
*
|
|
@@ -135,11 +244,16 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
|
|
|
u64 tmp, ntpinterval;
|
|
|
struct clocksource *old_clock;
|
|
|
|
|
|
- old_clock = tk->tkr.clock;
|
|
|
- tk->tkr.clock = clock;
|
|
|
- tk->tkr.read = clock->read;
|
|
|
- tk->tkr.mask = clock->mask;
|
|
|
- tk->tkr.cycle_last = tk->tkr.read(clock);
|
|
|
+ old_clock = tk->tkr_mono.clock;
|
|
|
+ tk->tkr_mono.clock = clock;
|
|
|
+ tk->tkr_mono.read = clock->read;
|
|
|
+ tk->tkr_mono.mask = clock->mask;
|
|
|
+ tk->tkr_mono.cycle_last = tk->tkr_mono.read(clock);
|
|
|
+
|
|
|
+ tk->tkr_raw.clock = clock;
|
|
|
+ tk->tkr_raw.read = clock->read;
|
|
|
+ tk->tkr_raw.mask = clock->mask;
|
|
|
+ tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
|
|
|
|
|
|
/* Do the ns -> cycle conversion first, using original mult */
|
|
|
tmp = NTP_INTERVAL_LENGTH;
|
|
@@ -163,11 +277,14 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
|
|
|
if (old_clock) {
|
|
|
int shift_change = clock->shift - old_clock->shift;
|
|
|
if (shift_change < 0)
|
|
|
- tk->tkr.xtime_nsec >>= -shift_change;
|
|
|
+ tk->tkr_mono.xtime_nsec >>= -shift_change;
|
|
|
else
|
|
|
- tk->tkr.xtime_nsec <<= shift_change;
|
|
|
+ tk->tkr_mono.xtime_nsec <<= shift_change;
|
|
|
}
|
|
|
- tk->tkr.shift = clock->shift;
|
|
|
+ tk->tkr_raw.xtime_nsec = 0;
|
|
|
+
|
|
|
+ tk->tkr_mono.shift = clock->shift;
|
|
|
+ tk->tkr_raw.shift = clock->shift;
|
|
|
|
|
|
tk->ntp_error = 0;
|
|
|
tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
|
|
@@ -178,7 +295,8 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
|
|
|
* active clocksource. These value will be adjusted via NTP
|
|
|
* to counteract clock drifting.
|
|
|
*/
|
|
|
- tk->tkr.mult = clock->mult;
|
|
|
+ tk->tkr_mono.mult = clock->mult;
|
|
|
+ tk->tkr_raw.mult = clock->mult;
|
|
|
tk->ntp_err_mult = 0;
|
|
|
}
|
|
|
|
|
@@ -193,14 +311,10 @@ static inline u32 arch_gettimeoffset(void) { return 0; }
|
|
|
|
|
|
static inline s64 timekeeping_get_ns(struct tk_read_base *tkr)
|
|
|
{
|
|
|
- cycle_t cycle_now, delta;
|
|
|
+ cycle_t delta;
|
|
|
s64 nsec;
|
|
|
|
|
|
- /* read clocksource: */
|
|
|
- cycle_now = tkr->read(tkr->clock);
|
|
|
-
|
|
|
- /* calculate the delta since the last update_wall_time: */
|
|
|
- delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
|
|
|
+ delta = timekeeping_get_delta(tkr);
|
|
|
|
|
|
nsec = delta * tkr->mult + tkr->xtime_nsec;
|
|
|
nsec >>= tkr->shift;
|
|
@@ -209,25 +323,6 @@ static inline s64 timekeeping_get_ns(struct tk_read_base *tkr)
|
|
|
return nsec + arch_gettimeoffset();
|
|
|
}
|
|
|
|
|
|
-static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
|
|
|
-{
|
|
|
- struct clocksource *clock = tk->tkr.clock;
|
|
|
- cycle_t cycle_now, delta;
|
|
|
- s64 nsec;
|
|
|
-
|
|
|
- /* read clocksource: */
|
|
|
- cycle_now = tk->tkr.read(clock);
|
|
|
-
|
|
|
- /* calculate the delta since the last update_wall_time: */
|
|
|
- delta = clocksource_delta(cycle_now, tk->tkr.cycle_last, tk->tkr.mask);
|
|
|
-
|
|
|
- /* convert delta to nanoseconds. */
|
|
|
- nsec = clocksource_cyc2ns(delta, clock->mult, clock->shift);
|
|
|
-
|
|
|
- /* If arch requires, add in get_arch_timeoffset() */
|
|
|
- return nsec + arch_gettimeoffset();
|
|
|
-}
|
|
|
-
|
|
|
/**
|
|
|
* update_fast_timekeeper - Update the fast and NMI safe monotonic timekeeper.
|
|
|
* @tkr: Timekeeping readout base from which we take the update
|
|
@@ -267,18 +362,18 @@ static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
|
|
|
* slightly wrong timestamp (a few nanoseconds). See
|
|
|
* @ktime_get_mono_fast_ns.
|
|
|
*/
|
|
|
-static void update_fast_timekeeper(struct tk_read_base *tkr)
|
|
|
+static void update_fast_timekeeper(struct tk_read_base *tkr, struct tk_fast *tkf)
|
|
|
{
|
|
|
- struct tk_read_base *base = tk_fast_mono.base;
|
|
|
+ struct tk_read_base *base = tkf->base;
|
|
|
|
|
|
/* Force readers off to base[1] */
|
|
|
- raw_write_seqcount_latch(&tk_fast_mono.seq);
|
|
|
+ raw_write_seqcount_latch(&tkf->seq);
|
|
|
|
|
|
/* Update base[0] */
|
|
|
memcpy(base, tkr, sizeof(*base));
|
|
|
|
|
|
/* Force readers back to base[0] */
|
|
|
- raw_write_seqcount_latch(&tk_fast_mono.seq);
|
|
|
+ raw_write_seqcount_latch(&tkf->seq);
|
|
|
|
|
|
/* Update base[1] */
|
|
|
memcpy(base + 1, base, sizeof(*base));
|
|
@@ -316,22 +411,33 @@ static void update_fast_timekeeper(struct tk_read_base *tkr)
|
|
|
* of the following timestamps. Callers need to be aware of that and
|
|
|
* deal with it.
|
|
|
*/
|
|
|
-u64 notrace ktime_get_mono_fast_ns(void)
|
|
|
+static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
|
|
|
{
|
|
|
struct tk_read_base *tkr;
|
|
|
unsigned int seq;
|
|
|
u64 now;
|
|
|
|
|
|
do {
|
|
|
- seq = raw_read_seqcount(&tk_fast_mono.seq);
|
|
|
- tkr = tk_fast_mono.base + (seq & 0x01);
|
|
|
- now = ktime_to_ns(tkr->base_mono) + timekeeping_get_ns(tkr);
|
|
|
+ seq = raw_read_seqcount(&tkf->seq);
|
|
|
+ tkr = tkf->base + (seq & 0x01);
|
|
|
+ now = ktime_to_ns(tkr->base) + timekeeping_get_ns(tkr);
|
|
|
+ } while (read_seqcount_retry(&tkf->seq, seq));
|
|
|
|
|
|
- } while (read_seqcount_retry(&tk_fast_mono.seq, seq));
|
|
|
return now;
|
|
|
}
|
|
|
+
|
|
|
+u64 ktime_get_mono_fast_ns(void)
|
|
|
+{
|
|
|
+ return __ktime_get_fast_ns(&tk_fast_mono);
|
|
|
+}
|
|
|
EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);
|
|
|
|
|
|
+u64 ktime_get_raw_fast_ns(void)
|
|
|
+{
|
|
|
+ return __ktime_get_fast_ns(&tk_fast_raw);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
|
|
|
+
|
|
|
/* Suspend-time cycles value for halted fast timekeeper. */
|
|
|
static cycle_t cycles_at_suspend;
|
|
|
|
|
@@ -353,12 +459,17 @@ static cycle_t dummy_clock_read(struct clocksource *cs)
|
|
|
static void halt_fast_timekeeper(struct timekeeper *tk)
|
|
|
{
|
|
|
static struct tk_read_base tkr_dummy;
|
|
|
- struct tk_read_base *tkr = &tk->tkr;
|
|
|
+ struct tk_read_base *tkr = &tk->tkr_mono;
|
|
|
|
|
|
memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
|
|
|
cycles_at_suspend = tkr->read(tkr->clock);
|
|
|
tkr_dummy.read = dummy_clock_read;
|
|
|
- update_fast_timekeeper(&tkr_dummy);
|
|
|
+ update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
|
|
|
+
|
|
|
+ tkr = &tk->tkr_raw;
|
|
|
+ memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
|
|
|
+ tkr_dummy.read = dummy_clock_read;
|
|
|
+ update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD
|
|
@@ -369,8 +480,8 @@ static inline void update_vsyscall(struct timekeeper *tk)
|
|
|
|
|
|
xt = timespec64_to_timespec(tk_xtime(tk));
|
|
|
wm = timespec64_to_timespec(tk->wall_to_monotonic);
|
|
|
- update_vsyscall_old(&xt, &wm, tk->tkr.clock, tk->tkr.mult,
|
|
|
- tk->tkr.cycle_last);
|
|
|
+ update_vsyscall_old(&xt, &wm, tk->tkr_mono.clock, tk->tkr_mono.mult,
|
|
|
+ tk->tkr_mono.cycle_last);
|
|
|
}
|
|
|
|
|
|
static inline void old_vsyscall_fixup(struct timekeeper *tk)
|
|
@@ -387,11 +498,11 @@ static inline void old_vsyscall_fixup(struct timekeeper *tk)
|
|
|
* (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD
|
|
|
* users are removed, this can be killed.
|
|
|
*/
|
|
|
- remainder = tk->tkr.xtime_nsec & ((1ULL << tk->tkr.shift) - 1);
|
|
|
- tk->tkr.xtime_nsec -= remainder;
|
|
|
- tk->tkr.xtime_nsec += 1ULL << tk->tkr.shift;
|
|
|
+ remainder = tk->tkr_mono.xtime_nsec & ((1ULL << tk->tkr_mono.shift) - 1);
|
|
|
+ tk->tkr_mono.xtime_nsec -= remainder;
|
|
|
+ tk->tkr_mono.xtime_nsec += 1ULL << tk->tkr_mono.shift;
|
|
|
tk->ntp_error += remainder << tk->ntp_error_shift;
|
|
|
- tk->ntp_error -= (1ULL << tk->tkr.shift) << tk->ntp_error_shift;
|
|
|
+ tk->ntp_error -= (1ULL << tk->tkr_mono.shift) << tk->ntp_error_shift;
|
|
|
}
|
|
|
#else
|
|
|
#define old_vsyscall_fixup(tk)
|
|
@@ -456,17 +567,17 @@ static inline void tk_update_ktime_data(struct timekeeper *tk)
|
|
|
*/
|
|
|
seconds = (u64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec);
|
|
|
nsec = (u32) tk->wall_to_monotonic.tv_nsec;
|
|
|
- tk->tkr.base_mono = ns_to_ktime(seconds * NSEC_PER_SEC + nsec);
|
|
|
+ tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec);
|
|
|
|
|
|
/* Update the monotonic raw base */
|
|
|
- tk->base_raw = timespec64_to_ktime(tk->raw_time);
|
|
|
+ tk->tkr_raw.base = timespec64_to_ktime(tk->raw_time);
|
|
|
|
|
|
/*
|
|
|
* The sum of the nanoseconds portions of xtime and
|
|
|
* wall_to_monotonic can be greater/equal one second. Take
|
|
|
* this into account before updating tk->ktime_sec.
|
|
|
*/
|
|
|
- nsec += (u32)(tk->tkr.xtime_nsec >> tk->tkr.shift);
|
|
|
+ nsec += (u32)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
|
|
|
if (nsec >= NSEC_PER_SEC)
|
|
|
seconds++;
|
|
|
tk->ktime_sec = seconds;
|
|
@@ -489,7 +600,8 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action)
|
|
|
memcpy(&shadow_timekeeper, &tk_core.timekeeper,
|
|
|
sizeof(tk_core.timekeeper));
|
|
|
|
|
|
- update_fast_timekeeper(&tk->tkr);
|
|
|
+ update_fast_timekeeper(&tk->tkr_mono, &tk_fast_mono);
|
|
|
+ update_fast_timekeeper(&tk->tkr_raw, &tk_fast_raw);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -501,22 +613,23 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action)
|
|
|
*/
|
|
|
static void timekeeping_forward_now(struct timekeeper *tk)
|
|
|
{
|
|
|
- struct clocksource *clock = tk->tkr.clock;
|
|
|
+ struct clocksource *clock = tk->tkr_mono.clock;
|
|
|
cycle_t cycle_now, delta;
|
|
|
s64 nsec;
|
|
|
|
|
|
- cycle_now = tk->tkr.read(clock);
|
|
|
- delta = clocksource_delta(cycle_now, tk->tkr.cycle_last, tk->tkr.mask);
|
|
|
- tk->tkr.cycle_last = cycle_now;
|
|
|
+ cycle_now = tk->tkr_mono.read(clock);
|
|
|
+ delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
|
|
|
+ tk->tkr_mono.cycle_last = cycle_now;
|
|
|
+ tk->tkr_raw.cycle_last = cycle_now;
|
|
|
|
|
|
- tk->tkr.xtime_nsec += delta * tk->tkr.mult;
|
|
|
+ tk->tkr_mono.xtime_nsec += delta * tk->tkr_mono.mult;
|
|
|
|
|
|
/* If arch requires, add in get_arch_timeoffset() */
|
|
|
- tk->tkr.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr.shift;
|
|
|
+ tk->tkr_mono.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_mono.shift;
|
|
|
|
|
|
tk_normalize_xtime(tk);
|
|
|
|
|
|
- nsec = clocksource_cyc2ns(delta, clock->mult, clock->shift);
|
|
|
+ nsec = clocksource_cyc2ns(delta, tk->tkr_raw.mult, tk->tkr_raw.shift);
|
|
|
timespec64_add_ns(&tk->raw_time, nsec);
|
|
|
}
|
|
|
|
|
@@ -537,7 +650,7 @@ int __getnstimeofday64(struct timespec64 *ts)
|
|
|
seq = read_seqcount_begin(&tk_core.seq);
|
|
|
|
|
|
ts->tv_sec = tk->xtime_sec;
|
|
|
- nsecs = timekeeping_get_ns(&tk->tkr);
|
|
|
+ nsecs = timekeeping_get_ns(&tk->tkr_mono);
|
|
|
|
|
|
} while (read_seqcount_retry(&tk_core.seq, seq));
|
|
|
|
|
@@ -577,8 +690,8 @@ ktime_t ktime_get(void)
|
|
|
|
|
|
do {
|
|
|
seq = read_seqcount_begin(&tk_core.seq);
|
|
|
- base = tk->tkr.base_mono;
|
|
|
- nsecs = timekeeping_get_ns(&tk->tkr);
|
|
|
+ base = tk->tkr_mono.base;
|
|
|
+ nsecs = timekeeping_get_ns(&tk->tkr_mono);
|
|
|
|
|
|
} while (read_seqcount_retry(&tk_core.seq, seq));
|
|
|
|
|
@@ -603,8 +716,8 @@ ktime_t ktime_get_with_offset(enum tk_offsets offs)
|
|
|
|
|
|
do {
|
|
|
seq = read_seqcount_begin(&tk_core.seq);
|
|
|
- base = ktime_add(tk->tkr.base_mono, *offset);
|
|
|
- nsecs = timekeeping_get_ns(&tk->tkr);
|
|
|
+ base = ktime_add(tk->tkr_mono.base, *offset);
|
|
|
+ nsecs = timekeeping_get_ns(&tk->tkr_mono);
|
|
|
|
|
|
} while (read_seqcount_retry(&tk_core.seq, seq));
|
|
|
|
|
@@ -645,8 +758,8 @@ ktime_t ktime_get_raw(void)
|
|
|
|
|
|
do {
|
|
|
seq = read_seqcount_begin(&tk_core.seq);
|
|
|
- base = tk->base_raw;
|
|
|
- nsecs = timekeeping_get_ns_raw(tk);
|
|
|
+ base = tk->tkr_raw.base;
|
|
|
+ nsecs = timekeeping_get_ns(&tk->tkr_raw);
|
|
|
|
|
|
} while (read_seqcount_retry(&tk_core.seq, seq));
|
|
|
|
|
@@ -674,7 +787,7 @@ void ktime_get_ts64(struct timespec64 *ts)
|
|
|
do {
|
|
|
seq = read_seqcount_begin(&tk_core.seq);
|
|
|
ts->tv_sec = tk->xtime_sec;
|
|
|
- nsec = timekeeping_get_ns(&tk->tkr);
|
|
|
+ nsec = timekeeping_get_ns(&tk->tkr_mono);
|
|
|
tomono = tk->wall_to_monotonic;
|
|
|
|
|
|
} while (read_seqcount_retry(&tk_core.seq, seq));
|
|
@@ -759,8 +872,8 @@ void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
|
|
|
ts_real->tv_sec = tk->xtime_sec;
|
|
|
ts_real->tv_nsec = 0;
|
|
|
|
|
|
- nsecs_raw = timekeeping_get_ns_raw(tk);
|
|
|
- nsecs_real = timekeeping_get_ns(&tk->tkr);
|
|
|
+ nsecs_raw = timekeeping_get_ns(&tk->tkr_raw);
|
|
|
+ nsecs_real = timekeeping_get_ns(&tk->tkr_mono);
|
|
|
|
|
|
} while (read_seqcount_retry(&tk_core.seq, seq));
|
|
|
|
|
@@ -943,7 +1056,7 @@ static int change_clocksource(void *data)
|
|
|
*/
|
|
|
if (try_module_get(new->owner)) {
|
|
|
if (!new->enable || new->enable(new) == 0) {
|
|
|
- old = tk->tkr.clock;
|
|
|
+ old = tk->tkr_mono.clock;
|
|
|
tk_setup_internals(tk, new);
|
|
|
if (old->disable)
|
|
|
old->disable(old);
|
|
@@ -971,11 +1084,11 @@ int timekeeping_notify(struct clocksource *clock)
|
|
|
{
|
|
|
struct timekeeper *tk = &tk_core.timekeeper;
|
|
|
|
|
|
- if (tk->tkr.clock == clock)
|
|
|
+ if (tk->tkr_mono.clock == clock)
|
|
|
return 0;
|
|
|
stop_machine(change_clocksource, clock, NULL);
|
|
|
tick_clock_notify();
|
|
|
- return tk->tkr.clock == clock ? 0 : -1;
|
|
|
+ return tk->tkr_mono.clock == clock ? 0 : -1;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -993,7 +1106,7 @@ void getrawmonotonic64(struct timespec64 *ts)
|
|
|
|
|
|
do {
|
|
|
seq = read_seqcount_begin(&tk_core.seq);
|
|
|
- nsecs = timekeeping_get_ns_raw(tk);
|
|
|
+ nsecs = timekeeping_get_ns(&tk->tkr_raw);
|
|
|
ts64 = tk->raw_time;
|
|
|
|
|
|
} while (read_seqcount_retry(&tk_core.seq, seq));
|
|
@@ -1016,7 +1129,7 @@ int timekeeping_valid_for_hres(void)
|
|
|
do {
|
|
|
seq = read_seqcount_begin(&tk_core.seq);
|
|
|
|
|
|
- ret = tk->tkr.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
|
|
|
+ ret = tk->tkr_mono.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
|
|
|
|
|
|
} while (read_seqcount_retry(&tk_core.seq, seq));
|
|
|
|
|
@@ -1035,7 +1148,7 @@ u64 timekeeping_max_deferment(void)
|
|
|
do {
|
|
|
seq = read_seqcount_begin(&tk_core.seq);
|
|
|
|
|
|
- ret = tk->tkr.clock->max_idle_ns;
|
|
|
+ ret = tk->tkr_mono.clock->max_idle_ns;
|
|
|
|
|
|
} while (read_seqcount_retry(&tk_core.seq, seq));
|
|
|
|
|
@@ -1057,6 +1170,14 @@ void __weak read_persistent_clock(struct timespec *ts)
|
|
|
ts->tv_nsec = 0;
|
|
|
}
|
|
|
|
|
|
+void __weak read_persistent_clock64(struct timespec64 *ts64)
|
|
|
+{
|
|
|
+ struct timespec ts;
|
|
|
+
|
|
|
+ read_persistent_clock(&ts);
|
|
|
+ *ts64 = timespec_to_timespec64(ts);
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* read_boot_clock - Return time of the system start.
|
|
|
*
|
|
@@ -1072,6 +1193,20 @@ void __weak read_boot_clock(struct timespec *ts)
|
|
|
ts->tv_nsec = 0;
|
|
|
}
|
|
|
|
|
|
+void __weak read_boot_clock64(struct timespec64 *ts64)
|
|
|
+{
|
|
|
+ struct timespec ts;
|
|
|
+
|
|
|
+ read_boot_clock(&ts);
|
|
|
+ *ts64 = timespec_to_timespec64(ts);
|
|
|
+}
|
|
|
+
|
|
|
+/* Flag for if timekeeping_resume() has injected sleeptime */
|
|
|
+static bool sleeptime_injected;
|
|
|
+
|
|
|
+/* Flag for if there is a persistent clock on this platform */
|
|
|
+static bool persistent_clock_exists;
|
|
|
+
|
|
|
/*
|
|
|
* timekeeping_init - Initializes the clocksource and common timekeeping values
|
|
|
*/
|
|
@@ -1081,20 +1216,17 @@ void __init timekeeping_init(void)
|
|
|
struct clocksource *clock;
|
|
|
unsigned long flags;
|
|
|
struct timespec64 now, boot, tmp;
|
|
|
- struct timespec ts;
|
|
|
|
|
|
- read_persistent_clock(&ts);
|
|
|
- now = timespec_to_timespec64(ts);
|
|
|
+ read_persistent_clock64(&now);
|
|
|
if (!timespec64_valid_strict(&now)) {
|
|
|
pr_warn("WARNING: Persistent clock returned invalid value!\n"
|
|
|
" Check your CMOS/BIOS settings.\n");
|
|
|
now.tv_sec = 0;
|
|
|
now.tv_nsec = 0;
|
|
|
} else if (now.tv_sec || now.tv_nsec)
|
|
|
- persistent_clock_exist = true;
|
|
|
+ persistent_clock_exists = true;
|
|
|
|
|
|
- read_boot_clock(&ts);
|
|
|
- boot = timespec_to_timespec64(ts);
|
|
|
+ read_boot_clock64(&boot);
|
|
|
if (!timespec64_valid_strict(&boot)) {
|
|
|
pr_warn("WARNING: Boot clock returned invalid value!\n"
|
|
|
" Check your CMOS/BIOS settings.\n");
|
|
@@ -1114,7 +1246,6 @@ void __init timekeeping_init(void)
|
|
|
tk_set_xtime(tk, &now);
|
|
|
tk->raw_time.tv_sec = 0;
|
|
|
tk->raw_time.tv_nsec = 0;
|
|
|
- tk->base_raw.tv64 = 0;
|
|
|
if (boot.tv_sec == 0 && boot.tv_nsec == 0)
|
|
|
boot = tk_xtime(tk);
|
|
|
|
|
@@ -1127,7 +1258,7 @@ void __init timekeeping_init(void)
|
|
|
raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
|
|
|
}
|
|
|
|
|
|
-/* time in seconds when suspend began */
|
|
|
+/* time in seconds when suspend began for persistent clock */
|
|
|
static struct timespec64 timekeeping_suspend_time;
|
|
|
|
|
|
/**
|
|
@@ -1152,12 +1283,49 @@ static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
|
|
|
tk_debug_account_sleep_time(delta);
|
|
|
}
|
|
|
|
|
|
+#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_RTC_HCTOSYS_DEVICE)
|
|
|
+/**
|
|
|
+ * We have three kinds of time sources to use for sleep time
|
|
|
+ * injection, the preference order is:
|
|
|
+ * 1) non-stop clocksource
|
|
|
+ * 2) persistent clock (ie: RTC accessible when irqs are off)
|
|
|
+ * 3) RTC
|
|
|
+ *
|
|
|
+ * 1) and 2) are used by timekeeping, 3) by RTC subsystem.
|
|
|
+ * If system has neither 1) nor 2), 3) will be used finally.
|
|
|
+ *
|
|
|
+ *
|
|
|
+ * If timekeeping has injected sleeptime via either 1) or 2),
|
|
|
+ * 3) becomes needless, so in this case we don't need to call
|
|
|
+ * rtc_resume(), and this is what timekeeping_rtc_skipresume()
|
|
|
+ * means.
|
|
|
+ */
|
|
|
+bool timekeeping_rtc_skipresume(void)
|
|
|
+{
|
|
|
+ return sleeptime_injected;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * 1) can be determined whether to use or not only when doing
|
|
|
+ * timekeeping_resume() which is invoked after rtc_suspend(),
|
|
|
+ * so we can't skip rtc_suspend() surely if system has 1).
|
|
|
+ *
|
|
|
+ * But if system has 2), 2) will definitely be used, so in this
|
|
|
+ * case we don't need to call rtc_suspend(), and this is what
|
|
|
+ * timekeeping_rtc_skipsuspend() means.
|
|
|
+ */
|
|
|
+bool timekeeping_rtc_skipsuspend(void)
|
|
|
+{
|
|
|
+ return persistent_clock_exists;
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* timekeeping_inject_sleeptime64 - Adds suspend interval to timeekeeping values
|
|
|
* @delta: pointer to a timespec64 delta value
|
|
|
*
|
|
|
- * This hook is for architectures that cannot support read_persistent_clock
|
|
|
+ * This hook is for architectures that cannot support read_persistent_clock64
|
|
|
* because their RTC/persistent clock is only accessible when irqs are enabled.
|
|
|
+ * and also don't have an effective nonstop clocksource.
|
|
|
*
|
|
|
* This function should only be called by rtc_resume(), and allows
|
|
|
* a suspend offset to be injected into the timekeeping values.
|
|
@@ -1167,13 +1335,6 @@ void timekeeping_inject_sleeptime64(struct timespec64 *delta)
|
|
|
struct timekeeper *tk = &tk_core.timekeeper;
|
|
|
unsigned long flags;
|
|
|
|
|
|
- /*
|
|
|
- * Make sure we don't set the clock twice, as timekeeping_resume()
|
|
|
- * already did it
|
|
|
- */
|
|
|
- if (has_persistent_clock())
|
|
|
- return;
|
|
|
-
|
|
|
raw_spin_lock_irqsave(&timekeeper_lock, flags);
|
|
|
write_seqcount_begin(&tk_core.seq);
|
|
|
|
|
@@ -1189,26 +1350,21 @@ void timekeeping_inject_sleeptime64(struct timespec64 *delta)
|
|
|
/* signal hrtimers about time change */
|
|
|
clock_was_set();
|
|
|
}
|
|
|
+#endif
|
|
|
|
|
|
/**
|
|
|
* timekeeping_resume - Resumes the generic timekeeping subsystem.
|
|
|
- *
|
|
|
- * This is for the generic clocksource timekeeping.
|
|
|
- * xtime/wall_to_monotonic/jiffies/etc are
|
|
|
- * still managed by arch specific suspend/resume code.
|
|
|
*/
|
|
|
void timekeeping_resume(void)
|
|
|
{
|
|
|
struct timekeeper *tk = &tk_core.timekeeper;
|
|
|
- struct clocksource *clock = tk->tkr.clock;
|
|
|
+ struct clocksource *clock = tk->tkr_mono.clock;
|
|
|
unsigned long flags;
|
|
|
struct timespec64 ts_new, ts_delta;
|
|
|
- struct timespec tmp;
|
|
|
cycle_t cycle_now, cycle_delta;
|
|
|
- bool suspendtime_found = false;
|
|
|
|
|
|
- read_persistent_clock(&tmp);
|
|
|
- ts_new = timespec_to_timespec64(tmp);
|
|
|
+ sleeptime_injected = false;
|
|
|
+ read_persistent_clock64(&ts_new);
|
|
|
|
|
|
clockevents_resume();
|
|
|
clocksource_resume();
|
|
@@ -1228,16 +1384,16 @@ void timekeeping_resume(void)
|
|
|
* The less preferred source will only be tried if there is no better
|
|
|
* usable source. The rtc part is handled separately in rtc core code.
|
|
|
*/
|
|
|
- cycle_now = tk->tkr.read(clock);
|
|
|
+ cycle_now = tk->tkr_mono.read(clock);
|
|
|
if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
|
|
|
- cycle_now > tk->tkr.cycle_last) {
|
|
|
+ cycle_now > tk->tkr_mono.cycle_last) {
|
|
|
u64 num, max = ULLONG_MAX;
|
|
|
u32 mult = clock->mult;
|
|
|
u32 shift = clock->shift;
|
|
|
s64 nsec = 0;
|
|
|
|
|
|
- cycle_delta = clocksource_delta(cycle_now, tk->tkr.cycle_last,
|
|
|
- tk->tkr.mask);
|
|
|
+ cycle_delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last,
|
|
|
+ tk->tkr_mono.mask);
|
|
|
|
|
|
/*
|
|
|
* "cycle_delta * mutl" may cause 64 bits overflow, if the
|
|
@@ -1253,17 +1409,19 @@ void timekeeping_resume(void)
|
|
|
nsec += ((u64) cycle_delta * mult) >> shift;
|
|
|
|
|
|
ts_delta = ns_to_timespec64(nsec);
|
|
|
- suspendtime_found = true;
|
|
|
+ sleeptime_injected = true;
|
|
|
} else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) {
|
|
|
ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time);
|
|
|
- suspendtime_found = true;
|
|
|
+ sleeptime_injected = true;
|
|
|
}
|
|
|
|
|
|
- if (suspendtime_found)
|
|
|
+ if (sleeptime_injected)
|
|
|
__timekeeping_inject_sleeptime(tk, &ts_delta);
|
|
|
|
|
|
/* Re-base the last cycle value */
|
|
|
- tk->tkr.cycle_last = cycle_now;
|
|
|
+ tk->tkr_mono.cycle_last = cycle_now;
|
|
|
+ tk->tkr_raw.cycle_last = cycle_now;
|
|
|
+
|
|
|
tk->ntp_error = 0;
|
|
|
timekeeping_suspended = 0;
|
|
|
timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
|
|
@@ -1272,9 +1430,7 @@ void timekeeping_resume(void)
|
|
|
|
|
|
touch_softlockup_watchdog();
|
|
|
|
|
|
- clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL);
|
|
|
-
|
|
|
- /* Resume hrtimers */
|
|
|
+ tick_resume();
|
|
|
hrtimers_resume();
|
|
|
}
|
|
|
|
|
@@ -1284,10 +1440,8 @@ int timekeeping_suspend(void)
|
|
|
unsigned long flags;
|
|
|
struct timespec64 delta, delta_delta;
|
|
|
static struct timespec64 old_delta;
|
|
|
- struct timespec tmp;
|
|
|
|
|
|
- read_persistent_clock(&tmp);
|
|
|
- timekeeping_suspend_time = timespec_to_timespec64(tmp);
|
|
|
+ read_persistent_clock64(&timekeeping_suspend_time);
|
|
|
|
|
|
/*
|
|
|
* On some systems the persistent_clock can not be detected at
|
|
@@ -1295,31 +1449,33 @@ int timekeeping_suspend(void)
|
|
|
* value returned, update the persistent_clock_exists flag.
|
|
|
*/
|
|
|
if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
|
|
|
- persistent_clock_exist = true;
|
|
|
+ persistent_clock_exists = true;
|
|
|
|
|
|
raw_spin_lock_irqsave(&timekeeper_lock, flags);
|
|
|
write_seqcount_begin(&tk_core.seq);
|
|
|
timekeeping_forward_now(tk);
|
|
|
timekeeping_suspended = 1;
|
|
|
|
|
|
- /*
|
|
|
- * To avoid drift caused by repeated suspend/resumes,
|
|
|
- * which each can add ~1 second drift error,
|
|
|
- * try to compensate so the difference in system time
|
|
|
- * and persistent_clock time stays close to constant.
|
|
|
- */
|
|
|
- delta = timespec64_sub(tk_xtime(tk), timekeeping_suspend_time);
|
|
|
- delta_delta = timespec64_sub(delta, old_delta);
|
|
|
- if (abs(delta_delta.tv_sec) >= 2) {
|
|
|
+ if (persistent_clock_exists) {
|
|
|
/*
|
|
|
- * if delta_delta is too large, assume time correction
|
|
|
- * has occured and set old_delta to the current delta.
|
|
|
+ * To avoid drift caused by repeated suspend/resumes,
|
|
|
+ * which each can add ~1 second drift error,
|
|
|
+ * try to compensate so the difference in system time
|
|
|
+ * and persistent_clock time stays close to constant.
|
|
|
*/
|
|
|
- old_delta = delta;
|
|
|
- } else {
|
|
|
- /* Otherwise try to adjust old_system to compensate */
|
|
|
- timekeeping_suspend_time =
|
|
|
- timespec64_add(timekeeping_suspend_time, delta_delta);
|
|
|
+ delta = timespec64_sub(tk_xtime(tk), timekeeping_suspend_time);
|
|
|
+ delta_delta = timespec64_sub(delta, old_delta);
|
|
|
+ if (abs(delta_delta.tv_sec) >= 2) {
|
|
|
+ /*
|
|
|
+ * if delta_delta is too large, assume time correction
|
|
|
+ * has occurred and set old_delta to the current delta.
|
|
|
+ */
|
|
|
+ old_delta = delta;
|
|
|
+ } else {
|
|
|
+ /* Otherwise try to adjust old_system to compensate */
|
|
|
+ timekeeping_suspend_time =
|
|
|
+ timespec64_add(timekeeping_suspend_time, delta_delta);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
timekeeping_update(tk, TK_MIRROR);
|
|
@@ -1327,7 +1483,7 @@ int timekeeping_suspend(void)
|
|
|
write_seqcount_end(&tk_core.seq);
|
|
|
raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
|
|
|
|
|
|
- clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
|
|
|
+ tick_suspend();
|
|
|
clocksource_suspend();
|
|
|
clockevents_suspend();
|
|
|
|
|
@@ -1416,15 +1572,15 @@ static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk,
|
|
|
*
|
|
|
* XXX - TODO: Doc ntp_error calculation.
|
|
|
*/
|
|
|
- if ((mult_adj > 0) && (tk->tkr.mult + mult_adj < mult_adj)) {
|
|
|
+ if ((mult_adj > 0) && (tk->tkr_mono.mult + mult_adj < mult_adj)) {
|
|
|
/* NTP adjustment caused clocksource mult overflow */
|
|
|
WARN_ON_ONCE(1);
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
- tk->tkr.mult += mult_adj;
|
|
|
+ tk->tkr_mono.mult += mult_adj;
|
|
|
tk->xtime_interval += interval;
|
|
|
- tk->tkr.xtime_nsec -= offset;
|
|
|
+ tk->tkr_mono.xtime_nsec -= offset;
|
|
|
tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
|
|
|
}
|
|
|
|
|
@@ -1486,13 +1642,13 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
|
|
|
tk->ntp_err_mult = 0;
|
|
|
}
|
|
|
|
|
|
- if (unlikely(tk->tkr.clock->maxadj &&
|
|
|
- (abs(tk->tkr.mult - tk->tkr.clock->mult)
|
|
|
- > tk->tkr.clock->maxadj))) {
|
|
|
+ if (unlikely(tk->tkr_mono.clock->maxadj &&
|
|
|
+ (abs(tk->tkr_mono.mult - tk->tkr_mono.clock->mult)
|
|
|
+ > tk->tkr_mono.clock->maxadj))) {
|
|
|
printk_once(KERN_WARNING
|
|
|
"Adjusting %s more than 11%% (%ld vs %ld)\n",
|
|
|
- tk->tkr.clock->name, (long)tk->tkr.mult,
|
|
|
- (long)tk->tkr.clock->mult + tk->tkr.clock->maxadj);
|
|
|
+ tk->tkr_mono.clock->name, (long)tk->tkr_mono.mult,
|
|
|
+ (long)tk->tkr_mono.clock->mult + tk->tkr_mono.clock->maxadj);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1509,9 +1665,9 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
|
|
|
* We'll correct this error next time through this function, when
|
|
|
* xtime_nsec is not as small.
|
|
|
*/
|
|
|
- if (unlikely((s64)tk->tkr.xtime_nsec < 0)) {
|
|
|
- s64 neg = -(s64)tk->tkr.xtime_nsec;
|
|
|
- tk->tkr.xtime_nsec = 0;
|
|
|
+ if (unlikely((s64)tk->tkr_mono.xtime_nsec < 0)) {
|
|
|
+ s64 neg = -(s64)tk->tkr_mono.xtime_nsec;
|
|
|
+ tk->tkr_mono.xtime_nsec = 0;
|
|
|
tk->ntp_error += neg << tk->ntp_error_shift;
|
|
|
}
|
|
|
}
|
|
@@ -1526,13 +1682,13 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
|
|
|
*/
|
|
|
static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
|
|
|
{
|
|
|
- u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr.shift;
|
|
|
+ u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
|
|
|
unsigned int clock_set = 0;
|
|
|
|
|
|
- while (tk->tkr.xtime_nsec >= nsecps) {
|
|
|
+ while (tk->tkr_mono.xtime_nsec >= nsecps) {
|
|
|
int leap;
|
|
|
|
|
|
- tk->tkr.xtime_nsec -= nsecps;
|
|
|
+ tk->tkr_mono.xtime_nsec -= nsecps;
|
|
|
tk->xtime_sec++;
|
|
|
|
|
|
/* Figure out if its a leap sec and apply if needed */
|
|
@@ -1577,9 +1733,10 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
|
|
|
|
|
|
/* Accumulate one shifted interval */
|
|
|
offset -= interval;
|
|
|
- tk->tkr.cycle_last += interval;
|
|
|
+ tk->tkr_mono.cycle_last += interval;
|
|
|
+ tk->tkr_raw.cycle_last += interval;
|
|
|
|
|
|
- tk->tkr.xtime_nsec += tk->xtime_interval << shift;
|
|
|
+ tk->tkr_mono.xtime_nsec += tk->xtime_interval << shift;
|
|
|
*clock_set |= accumulate_nsecs_to_secs(tk);
|
|
|
|
|
|
/* Accumulate raw time */
|
|
@@ -1622,14 +1779,17 @@ void update_wall_time(void)
|
|
|
#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
|
|
|
offset = real_tk->cycle_interval;
|
|
|
#else
|
|
|
- offset = clocksource_delta(tk->tkr.read(tk->tkr.clock),
|
|
|
- tk->tkr.cycle_last, tk->tkr.mask);
|
|
|
+ offset = clocksource_delta(tk->tkr_mono.read(tk->tkr_mono.clock),
|
|
|
+ tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
|
|
|
#endif
|
|
|
|
|
|
/* Check if there's really nothing to do */
|
|
|
if (offset < real_tk->cycle_interval)
|
|
|
goto out;
|
|
|
|
|
|
+ /* Do some additional sanity checking */
|
|
|
+ timekeeping_check_update(real_tk, offset);
|
|
|
+
|
|
|
/*
|
|
|
* With NO_HZ we may have to accumulate many cycle_intervals
|
|
|
* (think "ticks") worth of time at once. To do this efficiently,
|
|
@@ -1784,8 +1944,8 @@ ktime_t ktime_get_update_offsets_tick(ktime_t *offs_real, ktime_t *offs_boot,
|
|
|
do {
|
|
|
seq = read_seqcount_begin(&tk_core.seq);
|
|
|
|
|
|
- base = tk->tkr.base_mono;
|
|
|
- nsecs = tk->tkr.xtime_nsec >> tk->tkr.shift;
|
|
|
+ base = tk->tkr_mono.base;
|
|
|
+ nsecs = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
|
|
|
|
|
|
*offs_real = tk->offs_real;
|
|
|
*offs_boot = tk->offs_boot;
|
|
@@ -1816,8 +1976,8 @@ ktime_t ktime_get_update_offsets_now(ktime_t *offs_real, ktime_t *offs_boot,
|
|
|
do {
|
|
|
seq = read_seqcount_begin(&tk_core.seq);
|
|
|
|
|
|
- base = tk->tkr.base_mono;
|
|
|
- nsecs = timekeeping_get_ns(&tk->tkr);
|
|
|
+ base = tk->tkr_mono.base;
|
|
|
+ nsecs = timekeeping_get_ns(&tk->tkr_mono);
|
|
|
|
|
|
*offs_real = tk->offs_real;
|
|
|
*offs_boot = tk->offs_boot;
|