|
@@ -138,12 +138,7 @@ static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
|
|
|
|
|
|
static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
|
|
static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
|
|
{
|
|
{
|
|
- /* Update both bases so mono and raw stay coupled. */
|
|
|
|
- tk->tkr_mono.base += delta;
|
|
|
|
- tk->tkr_raw.base += delta;
|
|
|
|
-
|
|
|
|
- /* Accumulate time spent in suspend */
|
|
|
|
- tk->time_suspended += delta;
|
|
|
|
|
|
+ tk->offs_boot = ktime_add(tk->offs_boot, delta);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -473,6 +468,36 @@ u64 ktime_get_raw_fast_ns(void)
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
|
|
EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
|
|
|
|
|
|
|
|
+/**
|
|
|
|
+ * ktime_get_boot_fast_ns - NMI safe and fast access to boot clock.
|
|
|
|
+ *
|
|
|
|
+ * To keep it NMI safe since we're accessing from tracing, we're not using a
|
|
|
|
+ * separate timekeeper with updates to monotonic clock and boot offset
|
|
|
|
+ * protected with seqlocks. This has the following minor side effects:
|
|
|
|
+ *
|
|
|
|
+ * (1) Its possible that a timestamp be taken after the boot offset is updated
|
|
|
|
+ * but before the timekeeper is updated. If this happens, the new boot offset
|
|
|
|
+ * is added to the old timekeeping making the clock appear to update slightly
|
|
|
|
+ * earlier:
|
|
|
|
+ * CPU 0 CPU 1
|
|
|
|
+ * timekeeping_inject_sleeptime64()
|
|
|
|
+ * __timekeeping_inject_sleeptime(tk, delta);
|
|
|
|
+ * timestamp();
|
|
|
|
+ * timekeeping_update(tk, TK_CLEAR_NTP...);
|
|
|
|
+ *
|
|
|
|
+ * (2) On 32-bit systems, the 64-bit boot offset (tk->offs_boot) may be
|
|
|
|
+ * partially updated. Since the tk->offs_boot update is a rare event, this
|
|
|
|
+ * should be a rare occurrence which postprocessing should be able to handle.
|
|
|
|
+ */
|
|
|
|
+u64 notrace ktime_get_boot_fast_ns(void)
|
|
|
|
+{
|
|
|
|
+ struct timekeeper *tk = &tk_core.timekeeper;
|
|
|
|
+
|
|
|
|
+ return (ktime_get_mono_fast_ns() + ktime_to_ns(tk->offs_boot));
|
|
|
|
+}
|
|
|
|
+EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns);
|
|
|
|
+
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* See comment for __ktime_get_fast_ns() vs. timestamp ordering
|
|
* See comment for __ktime_get_fast_ns() vs. timestamp ordering
|
|
*/
|
|
*/
|
|
@@ -764,6 +789,7 @@ EXPORT_SYMBOL_GPL(ktime_get_resolution_ns);
|
|
|
|
|
|
static ktime_t *offsets[TK_OFFS_MAX] = {
|
|
static ktime_t *offsets[TK_OFFS_MAX] = {
|
|
[TK_OFFS_REAL] = &tk_core.timekeeper.offs_real,
|
|
[TK_OFFS_REAL] = &tk_core.timekeeper.offs_real,
|
|
|
|
+ [TK_OFFS_BOOT] = &tk_core.timekeeper.offs_boot,
|
|
[TK_OFFS_TAI] = &tk_core.timekeeper.offs_tai,
|
|
[TK_OFFS_TAI] = &tk_core.timekeeper.offs_tai,
|
|
};
|
|
};
|
|
|
|
|
|
@@ -860,39 +886,6 @@ void ktime_get_ts64(struct timespec64 *ts)
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(ktime_get_ts64);
|
|
EXPORT_SYMBOL_GPL(ktime_get_ts64);
|
|
|
|
|
|
-/**
|
|
|
|
- * ktime_get_active_ts64 - Get the active non-suspended monotonic clock
|
|
|
|
- * @ts: pointer to timespec variable
|
|
|
|
- *
|
|
|
|
- * The function calculates the monotonic clock from the realtime clock and
|
|
|
|
- * the wall_to_monotonic offset, subtracts the accumulated suspend time and
|
|
|
|
- * stores the result in normalized timespec64 format in the variable
|
|
|
|
- * pointed to by @ts.
|
|
|
|
- */
|
|
|
|
-void ktime_get_active_ts64(struct timespec64 *ts)
|
|
|
|
-{
|
|
|
|
- struct timekeeper *tk = &tk_core.timekeeper;
|
|
|
|
- struct timespec64 tomono, tsusp;
|
|
|
|
- u64 nsec, nssusp;
|
|
|
|
- unsigned int seq;
|
|
|
|
-
|
|
|
|
- WARN_ON(timekeeping_suspended);
|
|
|
|
-
|
|
|
|
- do {
|
|
|
|
- seq = read_seqcount_begin(&tk_core.seq);
|
|
|
|
- ts->tv_sec = tk->xtime_sec;
|
|
|
|
- nsec = timekeeping_get_ns(&tk->tkr_mono);
|
|
|
|
- tomono = tk->wall_to_monotonic;
|
|
|
|
- nssusp = tk->time_suspended;
|
|
|
|
- } while (read_seqcount_retry(&tk_core.seq, seq));
|
|
|
|
-
|
|
|
|
- ts->tv_sec += tomono.tv_sec;
|
|
|
|
- ts->tv_nsec = 0;
|
|
|
|
- timespec64_add_ns(ts, nsec + tomono.tv_nsec);
|
|
|
|
- tsusp = ns_to_timespec64(nssusp);
|
|
|
|
- *ts = timespec64_sub(*ts, tsusp);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/**
|
|
/**
|
|
* ktime_get_seconds - Get the seconds portion of CLOCK_MONOTONIC
|
|
* ktime_get_seconds - Get the seconds portion of CLOCK_MONOTONIC
|
|
*
|
|
*
|
|
@@ -1593,6 +1586,7 @@ static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
tk_xtime_add(tk, delta);
|
|
tk_xtime_add(tk, delta);
|
|
|
|
+ tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta));
|
|
tk_update_sleep_time(tk, timespec64_to_ktime(*delta));
|
|
tk_update_sleep_time(tk, timespec64_to_ktime(*delta));
|
|
tk_debug_account_sleep_time(delta);
|
|
tk_debug_account_sleep_time(delta);
|
|
}
|
|
}
|
|
@@ -2125,7 +2119,7 @@ out:
|
|
void getboottime64(struct timespec64 *ts)
|
|
void getboottime64(struct timespec64 *ts)
|
|
{
|
|
{
|
|
struct timekeeper *tk = &tk_core.timekeeper;
|
|
struct timekeeper *tk = &tk_core.timekeeper;
|
|
- ktime_t t = ktime_sub(tk->offs_real, tk->time_suspended);
|
|
|
|
|
|
+ ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot);
|
|
|
|
|
|
*ts = ktime_to_timespec64(t);
|
|
*ts = ktime_to_timespec64(t);
|
|
}
|
|
}
|
|
@@ -2188,6 +2182,7 @@ void do_timer(unsigned long ticks)
|
|
* ktime_get_update_offsets_now - hrtimer helper
|
|
* ktime_get_update_offsets_now - hrtimer helper
|
|
* @cwsseq: pointer to check and store the clock was set sequence number
|
|
* @cwsseq: pointer to check and store the clock was set sequence number
|
|
* @offs_real: pointer to storage for monotonic -> realtime offset
|
|
* @offs_real: pointer to storage for monotonic -> realtime offset
|
|
|
|
+ * @offs_boot: pointer to storage for monotonic -> boottime offset
|
|
* @offs_tai: pointer to storage for monotonic -> clock tai offset
|
|
* @offs_tai: pointer to storage for monotonic -> clock tai offset
|
|
*
|
|
*
|
|
* Returns current monotonic time and updates the offsets if the
|
|
* Returns current monotonic time and updates the offsets if the
|
|
@@ -2197,7 +2192,7 @@ void do_timer(unsigned long ticks)
|
|
* Called from hrtimer_interrupt() or retrigger_next_event()
|
|
* Called from hrtimer_interrupt() or retrigger_next_event()
|
|
*/
|
|
*/
|
|
ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
|
|
ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
|
|
- ktime_t *offs_tai)
|
|
|
|
|
|
+ ktime_t *offs_boot, ktime_t *offs_tai)
|
|
{
|
|
{
|
|
struct timekeeper *tk = &tk_core.timekeeper;
|
|
struct timekeeper *tk = &tk_core.timekeeper;
|
|
unsigned int seq;
|
|
unsigned int seq;
|
|
@@ -2214,6 +2209,7 @@ ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
|
|
if (*cwsseq != tk->clock_was_set_seq) {
|
|
if (*cwsseq != tk->clock_was_set_seq) {
|
|
*cwsseq = tk->clock_was_set_seq;
|
|
*cwsseq = tk->clock_was_set_seq;
|
|
*offs_real = tk->offs_real;
|
|
*offs_real = tk->offs_real;
|
|
|
|
+ *offs_boot = tk->offs_boot;
|
|
*offs_tai = tk->offs_tai;
|
|
*offs_tai = tk->offs_tai;
|
|
}
|
|
}
|
|
|
|
|