|
@@ -105,7 +105,7 @@ static inline void tk_normalize_xtime(struct timekeeper *tk)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static inline struct timespec64 tk_xtime(struct timekeeper *tk)
|
|
|
+static inline struct timespec64 tk_xtime(const struct timekeeper *tk)
|
|
|
{
|
|
|
struct timespec64 ts;
|
|
|
|
|
@@ -162,7 +162,7 @@ static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
|
|
|
* a read of the fast-timekeeper tkrs (which is protected by its own locking
|
|
|
* and update logic).
|
|
|
*/
|
|
|
-static inline u64 tk_clock_read(struct tk_read_base *tkr)
|
|
|
+static inline u64 tk_clock_read(const struct tk_read_base *tkr)
|
|
|
{
|
|
|
struct clocksource *clock = READ_ONCE(tkr->clock);
|
|
|
|
|
@@ -211,7 +211,7 @@ static void timekeeping_check_update(struct timekeeper *tk, u64 offset)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
|
|
|
+static inline u64 timekeeping_get_delta(const struct tk_read_base *tkr)
|
|
|
{
|
|
|
struct timekeeper *tk = &tk_core.timekeeper;
|
|
|
u64 now, last, mask, max, delta;
|
|
@@ -255,7 +255,7 @@ static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
|
|
|
static inline void timekeeping_check_update(struct timekeeper *tk, u64 offset)
|
|
|
{
|
|
|
}
|
|
|
-static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
|
|
|
+static inline u64 timekeeping_get_delta(const struct tk_read_base *tkr)
|
|
|
{
|
|
|
u64 cycle_now, delta;
|
|
|
|
|
@@ -352,7 +352,7 @@ u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset;
|
|
|
static inline u32 arch_gettimeoffset(void) { return 0; }
|
|
|
#endif
|
|
|
|
|
|
-static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr, u64 delta)
|
|
|
+static inline u64 timekeeping_delta_to_ns(const struct tk_read_base *tkr, u64 delta)
|
|
|
{
|
|
|
u64 nsec;
|
|
|
|
|
@@ -363,7 +363,7 @@ static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr, u64 delta)
|
|
|
return nsec + arch_gettimeoffset();
|
|
|
}
|
|
|
|
|
|
-static inline u64 timekeeping_get_ns(struct tk_read_base *tkr)
|
|
|
+static inline u64 timekeeping_get_ns(const struct tk_read_base *tkr)
|
|
|
{
|
|
|
u64 delta;
|
|
|
|
|
@@ -371,7 +371,7 @@ static inline u64 timekeeping_get_ns(struct tk_read_base *tkr)
|
|
|
return timekeeping_delta_to_ns(tkr, delta);
|
|
|
}
|
|
|
|
|
|
-static inline u64 timekeeping_cycles_to_ns(struct tk_read_base *tkr, u64 cycles)
|
|
|
+static inline u64 timekeeping_cycles_to_ns(const struct tk_read_base *tkr, u64 cycles)
|
|
|
{
|
|
|
u64 delta;
|
|
|
|
|
@@ -394,7 +394,8 @@ static inline u64 timekeeping_cycles_to_ns(struct tk_read_base *tkr, u64 cycles)
|
|
|
* slightly wrong timestamp (a few nanoseconds). See
|
|
|
* @ktime_get_mono_fast_ns.
|
|
|
*/
|
|
|
-static void update_fast_timekeeper(struct tk_read_base *tkr, struct tk_fast *tkf)
|
|
|
+static void update_fast_timekeeper(const struct tk_read_base *tkr,
|
|
|
+ struct tk_fast *tkf)
|
|
|
{
|
|
|
struct tk_read_base *base = tkf->base;
|
|
|
|
|
@@ -549,10 +550,10 @@ EXPORT_SYMBOL_GPL(ktime_get_real_fast_ns);
|
|
|
* number of cycles every time until timekeeping is resumed at which time the
|
|
|
* proper readout base for the fast timekeeper will be restored automatically.
|
|
|
*/
|
|
|
-static void halt_fast_timekeeper(struct timekeeper *tk)
|
|
|
+static void halt_fast_timekeeper(const struct timekeeper *tk)
|
|
|
{
|
|
|
static struct tk_read_base tkr_dummy;
|
|
|
- struct tk_read_base *tkr = &tk->tkr_mono;
|
|
|
+ const struct tk_read_base *tkr = &tk->tkr_mono;
|
|
|
|
|
|
memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
|
|
|
cycles_at_suspend = tk_clock_read(tkr);
|
|
@@ -1277,7 +1278,7 @@ EXPORT_SYMBOL(do_settimeofday64);
|
|
|
*
|
|
|
* Adds or subtracts an offset value from the current time.
|
|
|
*/
|
|
|
-static int timekeeping_inject_offset(struct timespec64 *ts)
|
|
|
+static int timekeeping_inject_offset(const struct timespec64 *ts)
|
|
|
{
|
|
|
struct timekeeper *tk = &tk_core.timekeeper;
|
|
|
unsigned long flags;
|
|
@@ -1518,8 +1519,20 @@ void __weak read_boot_clock64(struct timespec64 *ts)
|
|
|
ts->tv_nsec = 0;
|
|
|
}
|
|
|
|
|
|
-/* Flag for if timekeeping_resume() has injected sleeptime */
|
|
|
-static bool sleeptime_injected;
|
|
|
+/*
|
|
|
+ * Flag reflecting whether timekeeping_resume() has injected sleeptime.
|
|
|
+ *
|
|
|
+ * The flag starts of false and is only set when a suspend reaches
|
|
|
+ * timekeeping_suspend(), timekeeping_resume() sets it to false when the
|
|
|
+ * timekeeper clocksource is not stopping across suspend and has been
|
|
|
+ * used to update sleep time. If the timekeeper clocksource has stopped
|
|
|
+ * then the flag stays true and is used by the RTC resume code to decide
|
|
|
+ * whether sleeptime must be injected and if so the flag gets false then.
|
|
|
+ *
|
|
|
+ * If a suspend fails before reaching timekeeping_resume() then the flag
|
|
|
+ * stays false and prevents erroneous sleeptime injection.
|
|
|
+ */
|
|
|
+static bool suspend_timing_needed;
|
|
|
|
|
|
/* Flag for if there is a persistent clock on this platform */
|
|
|
static bool persistent_clock_exists;
|
|
@@ -1585,7 +1598,7 @@ static struct timespec64 timekeeping_suspend_time;
|
|
|
* adds the sleep offset to the timekeeping variables.
|
|
|
*/
|
|
|
static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
|
|
|
- struct timespec64 *delta)
|
|
|
+ const struct timespec64 *delta)
|
|
|
{
|
|
|
if (!timespec64_valid_strict(delta)) {
|
|
|
printk_deferred(KERN_WARNING
|
|
@@ -1618,7 +1631,7 @@ static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
|
|
|
*/
|
|
|
bool timekeeping_rtc_skipresume(void)
|
|
|
{
|
|
|
- return sleeptime_injected;
|
|
|
+ return !suspend_timing_needed;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -1646,7 +1659,7 @@ bool timekeeping_rtc_skipsuspend(void)
|
|
|
* This function should only be called by rtc_resume(), and allows
|
|
|
* a suspend offset to be injected into the timekeeping values.
|
|
|
*/
|
|
|
-void timekeeping_inject_sleeptime64(struct timespec64 *delta)
|
|
|
+void timekeeping_inject_sleeptime64(const struct timespec64 *delta)
|
|
|
{
|
|
|
struct timekeeper *tk = &tk_core.timekeeper;
|
|
|
unsigned long flags;
|
|
@@ -1654,6 +1667,8 @@ void timekeeping_inject_sleeptime64(struct timespec64 *delta)
|
|
|
raw_spin_lock_irqsave(&timekeeper_lock, flags);
|
|
|
write_seqcount_begin(&tk_core.seq);
|
|
|
|
|
|
+ suspend_timing_needed = false;
|
|
|
+
|
|
|
timekeeping_forward_now(tk);
|
|
|
|
|
|
__timekeeping_inject_sleeptime(tk, delta);
|
|
@@ -1677,9 +1692,9 @@ void timekeeping_resume(void)
|
|
|
struct clocksource *clock = tk->tkr_mono.clock;
|
|
|
unsigned long flags;
|
|
|
struct timespec64 ts_new, ts_delta;
|
|
|
- u64 cycle_now;
|
|
|
+ u64 cycle_now, nsec;
|
|
|
+ bool inject_sleeptime = false;
|
|
|
|
|
|
- sleeptime_injected = false;
|
|
|
read_persistent_clock64(&ts_new);
|
|
|
|
|
|
clockevents_resume();
|
|
@@ -1701,22 +1716,19 @@ void timekeeping_resume(void)
|
|
|
* usable source. The rtc part is handled separately in rtc core code.
|
|
|
*/
|
|
|
cycle_now = tk_clock_read(&tk->tkr_mono);
|
|
|
- if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
|
|
|
- cycle_now > tk->tkr_mono.cycle_last) {
|
|
|
- u64 nsec, cyc_delta;
|
|
|
-
|
|
|
- cyc_delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last,
|
|
|
- tk->tkr_mono.mask);
|
|
|
- nsec = mul_u64_u32_shr(cyc_delta, clock->mult, clock->shift);
|
|
|
+ nsec = clocksource_stop_suspend_timing(clock, cycle_now);
|
|
|
+ if (nsec > 0) {
|
|
|
ts_delta = ns_to_timespec64(nsec);
|
|
|
- sleeptime_injected = true;
|
|
|
+ inject_sleeptime = true;
|
|
|
} else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) {
|
|
|
ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time);
|
|
|
- sleeptime_injected = true;
|
|
|
+ inject_sleeptime = true;
|
|
|
}
|
|
|
|
|
|
- if (sleeptime_injected)
|
|
|
+ if (inject_sleeptime) {
|
|
|
+ suspend_timing_needed = false;
|
|
|
__timekeeping_inject_sleeptime(tk, &ts_delta);
|
|
|
+ }
|
|
|
|
|
|
/* Re-base the last cycle value */
|
|
|
tk->tkr_mono.cycle_last = cycle_now;
|
|
@@ -1740,6 +1752,8 @@ int timekeeping_suspend(void)
|
|
|
unsigned long flags;
|
|
|
struct timespec64 delta, delta_delta;
|
|
|
static struct timespec64 old_delta;
|
|
|
+ struct clocksource *curr_clock;
|
|
|
+ u64 cycle_now;
|
|
|
|
|
|
read_persistent_clock64(&timekeeping_suspend_time);
|
|
|
|
|
@@ -1751,11 +1765,22 @@ int timekeeping_suspend(void)
|
|
|
if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
|
|
|
persistent_clock_exists = true;
|
|
|
|
|
|
+ suspend_timing_needed = true;
|
|
|
+
|
|
|
raw_spin_lock_irqsave(&timekeeper_lock, flags);
|
|
|
write_seqcount_begin(&tk_core.seq);
|
|
|
timekeeping_forward_now(tk);
|
|
|
timekeeping_suspended = 1;
|
|
|
|
|
|
+ /*
|
|
|
+ * Since we've called forward_now, cycle_last stores the value
|
|
|
+ * just read from the current clocksource. Save this to potentially
|
|
|
+ * use in suspend timing.
|
|
|
+ */
|
|
|
+ curr_clock = tk->tkr_mono.clock;
|
|
|
+ cycle_now = tk->tkr_mono.cycle_last;
|
|
|
+ clocksource_start_suspend_timing(curr_clock, cycle_now);
|
|
|
+
|
|
|
if (persistent_clock_exists) {
|
|
|
/*
|
|
|
* To avoid drift caused by repeated suspend/resumes,
|
|
@@ -2240,7 +2265,7 @@ ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
|
|
|
/**
|
|
|
* timekeeping_validate_timex - Ensures the timex is ok for use in do_adjtimex
|
|
|
*/
|
|
|
-static int timekeeping_validate_timex(struct timex *txc)
|
|
|
+static int timekeeping_validate_timex(const struct timex *txc)
|
|
|
{
|
|
|
if (txc->modes & ADJ_ADJTIME) {
|
|
|
/* singleshot must not be used with any other mode bits */
|