|
@@ -34,6 +34,14 @@
|
|
|
#define TK_MIRROR (1 << 1)
|
|
|
#define TK_CLOCK_WAS_SET (1 << 2)
|
|
|
|
|
|
+enum timekeeping_adv_mode {
|
|
|
+ /* Update timekeeper when a tick has passed */
|
|
|
+ TK_ADV_TICK,
|
|
|
+
|
|
|
+ /* Update timekeeper on a direct frequency change */
|
|
|
+ TK_ADV_FREQ
|
|
|
+};
|
|
|
+
|
|
|
/*
|
|
|
* The most important data for readout fits into a single 64 byte
|
|
|
* cache line.
|
|
@@ -97,7 +105,7 @@ static inline void tk_normalize_xtime(struct timekeeper *tk)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static inline struct timespec64 tk_xtime(struct timekeeper *tk)
|
|
|
+static inline struct timespec64 tk_xtime(const struct timekeeper *tk)
|
|
|
{
|
|
|
struct timespec64 ts;
|
|
|
|
|
@@ -154,7 +162,7 @@ static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
|
|
|
* a read of the fast-timekeeper tkrs (which is protected by its own locking
|
|
|
* and update logic).
|
|
|
*/
|
|
|
-static inline u64 tk_clock_read(struct tk_read_base *tkr)
|
|
|
+static inline u64 tk_clock_read(const struct tk_read_base *tkr)
|
|
|
{
|
|
|
struct clocksource *clock = READ_ONCE(tkr->clock);
|
|
|
|
|
@@ -203,7 +211,7 @@ static void timekeeping_check_update(struct timekeeper *tk, u64 offset)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
|
|
|
+static inline u64 timekeeping_get_delta(const struct tk_read_base *tkr)
|
|
|
{
|
|
|
struct timekeeper *tk = &tk_core.timekeeper;
|
|
|
u64 now, last, mask, max, delta;
|
|
@@ -247,7 +255,7 @@ static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
|
|
|
static inline void timekeeping_check_update(struct timekeeper *tk, u64 offset)
|
|
|
{
|
|
|
}
|
|
|
-static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
|
|
|
+static inline u64 timekeeping_get_delta(const struct tk_read_base *tkr)
|
|
|
{
|
|
|
u64 cycle_now, delta;
|
|
|
|
|
@@ -344,7 +352,7 @@ u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset;
|
|
|
static inline u32 arch_gettimeoffset(void) { return 0; }
|
|
|
#endif
|
|
|
|
|
|
-static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr, u64 delta)
|
|
|
+static inline u64 timekeeping_delta_to_ns(const struct tk_read_base *tkr, u64 delta)
|
|
|
{
|
|
|
u64 nsec;
|
|
|
|
|
@@ -355,7 +363,7 @@ static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr, u64 delta)
|
|
|
return nsec + arch_gettimeoffset();
|
|
|
}
|
|
|
|
|
|
-static inline u64 timekeeping_get_ns(struct tk_read_base *tkr)
|
|
|
+static inline u64 timekeeping_get_ns(const struct tk_read_base *tkr)
|
|
|
{
|
|
|
u64 delta;
|
|
|
|
|
@@ -363,7 +371,7 @@ static inline u64 timekeeping_get_ns(struct tk_read_base *tkr)
|
|
|
return timekeeping_delta_to_ns(tkr, delta);
|
|
|
}
|
|
|
|
|
|
-static inline u64 timekeeping_cycles_to_ns(struct tk_read_base *tkr, u64 cycles)
|
|
|
+static inline u64 timekeeping_cycles_to_ns(const struct tk_read_base *tkr, u64 cycles)
|
|
|
{
|
|
|
u64 delta;
|
|
|
|
|
@@ -386,7 +394,8 @@ static inline u64 timekeeping_cycles_to_ns(struct tk_read_base *tkr, u64 cycles)
|
|
|
* slightly wrong timestamp (a few nanoseconds). See
|
|
|
* @ktime_get_mono_fast_ns.
|
|
|
*/
|
|
|
-static void update_fast_timekeeper(struct tk_read_base *tkr, struct tk_fast *tkf)
|
|
|
+static void update_fast_timekeeper(const struct tk_read_base *tkr,
|
|
|
+ struct tk_fast *tkf)
|
|
|
{
|
|
|
struct tk_read_base *base = tkf->base;
|
|
|
|
|
@@ -541,10 +550,10 @@ EXPORT_SYMBOL_GPL(ktime_get_real_fast_ns);
|
|
|
* number of cycles every time until timekeeping is resumed at which time the
|
|
|
* proper readout base for the fast timekeeper will be restored automatically.
|
|
|
*/
|
|
|
-static void halt_fast_timekeeper(struct timekeeper *tk)
|
|
|
+static void halt_fast_timekeeper(const struct timekeeper *tk)
|
|
|
{
|
|
|
static struct tk_read_base tkr_dummy;
|
|
|
- struct tk_read_base *tkr = &tk->tkr_mono;
|
|
|
+ const struct tk_read_base *tkr = &tk->tkr_mono;
|
|
|
|
|
|
memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
|
|
|
cycles_at_suspend = tk_clock_read(tkr);
|
|
@@ -1269,7 +1278,7 @@ EXPORT_SYMBOL(do_settimeofday64);
|
|
|
*
|
|
|
* Adds or subtracts an offset value from the current time.
|
|
|
*/
|
|
|
-static int timekeeping_inject_offset(struct timespec64 *ts)
|
|
|
+static int timekeeping_inject_offset(const struct timespec64 *ts)
|
|
|
{
|
|
|
struct timekeeper *tk = &tk_core.timekeeper;
|
|
|
unsigned long flags;
|
|
@@ -1510,8 +1519,20 @@ void __weak read_boot_clock64(struct timespec64 *ts)
|
|
|
ts->tv_nsec = 0;
|
|
|
}
|
|
|
|
|
|
-/* Flag for if timekeeping_resume() has injected sleeptime */
|
|
|
-static bool sleeptime_injected;
|
|
|
+/*
|
|
|
+ * Flag reflecting whether timekeeping_resume() has injected sleeptime.
|
|
|
+ *
|
|
|
+ * The flag starts of false and is only set when a suspend reaches
|
|
|
+ * timekeeping_suspend(), timekeeping_resume() sets it to false when the
|
|
|
+ * timekeeper clocksource is not stopping across suspend and has been
|
|
|
+ * used to update sleep time. If the timekeeper clocksource has stopped
|
|
|
+ * then the flag stays true and is used by the RTC resume code to decide
|
|
|
+ * whether sleeptime must be injected and if so the flag gets false then.
|
|
|
+ *
|
|
|
+ * If a suspend fails before reaching timekeeping_resume() then the flag
|
|
|
+ * stays false and prevents erroneous sleeptime injection.
|
|
|
+ */
|
|
|
+static bool suspend_timing_needed;
|
|
|
|
|
|
/* Flag for if there is a persistent clock on this platform */
|
|
|
static bool persistent_clock_exists;
|
|
@@ -1577,7 +1598,7 @@ static struct timespec64 timekeeping_suspend_time;
|
|
|
* adds the sleep offset to the timekeeping variables.
|
|
|
*/
|
|
|
static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
|
|
|
- struct timespec64 *delta)
|
|
|
+ const struct timespec64 *delta)
|
|
|
{
|
|
|
if (!timespec64_valid_strict(delta)) {
|
|
|
printk_deferred(KERN_WARNING
|
|
@@ -1610,7 +1631,7 @@ static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
|
|
|
*/
|
|
|
bool timekeeping_rtc_skipresume(void)
|
|
|
{
|
|
|
- return sleeptime_injected;
|
|
|
+ return !suspend_timing_needed;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -1638,7 +1659,7 @@ bool timekeeping_rtc_skipsuspend(void)
|
|
|
* This function should only be called by rtc_resume(), and allows
|
|
|
* a suspend offset to be injected into the timekeeping values.
|
|
|
*/
|
|
|
-void timekeeping_inject_sleeptime64(struct timespec64 *delta)
|
|
|
+void timekeeping_inject_sleeptime64(const struct timespec64 *delta)
|
|
|
{
|
|
|
struct timekeeper *tk = &tk_core.timekeeper;
|
|
|
unsigned long flags;
|
|
@@ -1646,6 +1667,8 @@ void timekeeping_inject_sleeptime64(struct timespec64 *delta)
|
|
|
raw_spin_lock_irqsave(&timekeeper_lock, flags);
|
|
|
write_seqcount_begin(&tk_core.seq);
|
|
|
|
|
|
+ suspend_timing_needed = false;
|
|
|
+
|
|
|
timekeeping_forward_now(tk);
|
|
|
|
|
|
__timekeeping_inject_sleeptime(tk, delta);
|
|
@@ -1669,9 +1692,9 @@ void timekeeping_resume(void)
|
|
|
struct clocksource *clock = tk->tkr_mono.clock;
|
|
|
unsigned long flags;
|
|
|
struct timespec64 ts_new, ts_delta;
|
|
|
- u64 cycle_now;
|
|
|
+ u64 cycle_now, nsec;
|
|
|
+ bool inject_sleeptime = false;
|
|
|
|
|
|
- sleeptime_injected = false;
|
|
|
read_persistent_clock64(&ts_new);
|
|
|
|
|
|
clockevents_resume();
|
|
@@ -1693,22 +1716,19 @@ void timekeeping_resume(void)
|
|
|
* usable source. The rtc part is handled separately in rtc core code.
|
|
|
*/
|
|
|
cycle_now = tk_clock_read(&tk->tkr_mono);
|
|
|
- if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
|
|
|
- cycle_now > tk->tkr_mono.cycle_last) {
|
|
|
- u64 nsec, cyc_delta;
|
|
|
-
|
|
|
- cyc_delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last,
|
|
|
- tk->tkr_mono.mask);
|
|
|
- nsec = mul_u64_u32_shr(cyc_delta, clock->mult, clock->shift);
|
|
|
+ nsec = clocksource_stop_suspend_timing(clock, cycle_now);
|
|
|
+ if (nsec > 0) {
|
|
|
ts_delta = ns_to_timespec64(nsec);
|
|
|
- sleeptime_injected = true;
|
|
|
+ inject_sleeptime = true;
|
|
|
} else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) {
|
|
|
ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time);
|
|
|
- sleeptime_injected = true;
|
|
|
+ inject_sleeptime = true;
|
|
|
}
|
|
|
|
|
|
- if (sleeptime_injected)
|
|
|
+ if (inject_sleeptime) {
|
|
|
+ suspend_timing_needed = false;
|
|
|
__timekeeping_inject_sleeptime(tk, &ts_delta);
|
|
|
+ }
|
|
|
|
|
|
/* Re-base the last cycle value */
|
|
|
tk->tkr_mono.cycle_last = cycle_now;
|
|
@@ -1732,6 +1752,8 @@ int timekeeping_suspend(void)
|
|
|
unsigned long flags;
|
|
|
struct timespec64 delta, delta_delta;
|
|
|
static struct timespec64 old_delta;
|
|
|
+ struct clocksource *curr_clock;
|
|
|
+ u64 cycle_now;
|
|
|
|
|
|
read_persistent_clock64(&timekeeping_suspend_time);
|
|
|
|
|
@@ -1743,11 +1765,22 @@ int timekeeping_suspend(void)
|
|
|
if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
|
|
|
persistent_clock_exists = true;
|
|
|
|
|
|
+ suspend_timing_needed = true;
|
|
|
+
|
|
|
raw_spin_lock_irqsave(&timekeeper_lock, flags);
|
|
|
write_seqcount_begin(&tk_core.seq);
|
|
|
timekeeping_forward_now(tk);
|
|
|
timekeeping_suspended = 1;
|
|
|
|
|
|
+ /*
|
|
|
+ * Since we've called forward_now, cycle_last stores the value
|
|
|
+ * just read from the current clocksource. Save this to potentially
|
|
|
+ * use in suspend timing.
|
|
|
+ */
|
|
|
+ curr_clock = tk->tkr_mono.clock;
|
|
|
+ cycle_now = tk->tkr_mono.cycle_last;
|
|
|
+ clocksource_start_suspend_timing(curr_clock, cycle_now);
|
|
|
+
|
|
|
if (persistent_clock_exists) {
|
|
|
/*
|
|
|
* To avoid drift caused by repeated suspend/resumes,
|
|
@@ -2021,11 +2054,11 @@ static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
|
|
|
return offset;
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * update_wall_time - Uses the current clocksource to increment the wall time
|
|
|
- *
|
|
|
+/*
|
|
|
+ * timekeeping_advance - Updates the timekeeper to the current time and
|
|
|
+ * current NTP tick length
|
|
|
*/
|
|
|
-void update_wall_time(void)
|
|
|
+static void timekeeping_advance(enum timekeeping_adv_mode mode)
|
|
|
{
|
|
|
struct timekeeper *real_tk = &tk_core.timekeeper;
|
|
|
struct timekeeper *tk = &shadow_timekeeper;
|
|
@@ -2042,14 +2075,17 @@ void update_wall_time(void)
|
|
|
|
|
|
#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
|
|
|
offset = real_tk->cycle_interval;
|
|
|
+
|
|
|
+ if (mode != TK_ADV_TICK)
|
|
|
+ goto out;
|
|
|
#else
|
|
|
offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
|
|
|
tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
|
|
|
-#endif
|
|
|
|
|
|
/* Check if there's really nothing to do */
|
|
|
- if (offset < real_tk->cycle_interval)
|
|
|
+ if (offset < real_tk->cycle_interval && mode == TK_ADV_TICK)
|
|
|
goto out;
|
|
|
+#endif
|
|
|
|
|
|
/* Do some additional sanity checking */
|
|
|
timekeeping_check_update(tk, offset);
|
|
@@ -2105,6 +2141,15 @@ out:
|
|
|
clock_was_set_delayed();
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * update_wall_time - Uses the current clocksource to increment the wall time
|
|
|
+ *
|
|
|
+ */
|
|
|
+void update_wall_time(void)
|
|
|
+{
|
|
|
+ timekeeping_advance(TK_ADV_TICK);
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* getboottime64 - Return the real time of system boot.
|
|
|
* @ts: pointer to the timespec64 to be set
|
|
@@ -2220,7 +2265,7 @@ ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
|
|
|
/**
|
|
|
* timekeeping_validate_timex - Ensures the timex is ok for use in do_adjtimex
|
|
|
*/
|
|
|
-static int timekeeping_validate_timex(struct timex *txc)
|
|
|
+static int timekeeping_validate_timex(const struct timex *txc)
|
|
|
{
|
|
|
if (txc->modes & ADJ_ADJTIME) {
|
|
|
/* singleshot must not be used with any other mode bits */
|
|
@@ -2310,7 +2355,7 @@ int do_adjtimex(struct timex *txc)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
- getnstimeofday64(&ts);
|
|
|
+ ktime_get_real_ts64(&ts);
|
|
|
|
|
|
raw_spin_lock_irqsave(&timekeeper_lock, flags);
|
|
|
write_seqcount_begin(&tk_core.seq);
|
|
@@ -2327,6 +2372,10 @@ int do_adjtimex(struct timex *txc)
|
|
|
write_seqcount_end(&tk_core.seq);
|
|
|
raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
|
|
|
|
|
|
+ /* Update the multiplier immediately if frequency was set directly */
|
|
|
+ if (txc->modes & (ADJ_FREQUENCY | ADJ_TICK))
|
|
|
+ timekeeping_advance(TK_ADV_FREQ);
|
|
|
+
|
|
|
if (tai != orig_tai)
|
|
|
clock_was_set();
|
|
|
|