|
@@ -280,7 +280,7 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
|
|
|
/* Go back from cycles -> shifted ns */
|
|
|
tk->xtime_interval = interval * clock->mult;
|
|
|
tk->xtime_remainder = ntpinterval - tk->xtime_interval;
|
|
|
- tk->raw_interval = (interval * clock->mult) >> clock->shift;
|
|
|
+ tk->raw_interval = interval * clock->mult;
|
|
|
|
|
|
/* if changing clocks, convert xtime_nsec shift units */
|
|
|
if (old_clock) {
|
|
@@ -1996,7 +1996,7 @@ static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
|
|
|
u32 shift, unsigned int *clock_set)
|
|
|
{
|
|
|
u64 interval = tk->cycle_interval << shift;
|
|
|
- u64 raw_nsecs;
|
|
|
+ u64 snsec_per_sec;
|
|
|
|
|
|
/* If the offset is smaller than a shifted interval, do nothing */
|
|
|
if (offset < interval)
|
|
@@ -2011,14 +2011,15 @@ static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
|
|
|
*clock_set |= accumulate_nsecs_to_secs(tk);
|
|
|
|
|
|
/* Accumulate raw time */
|
|
|
- raw_nsecs = (u64)tk->raw_interval << shift;
|
|
|
- raw_nsecs += tk->raw_time.tv_nsec;
|
|
|
- if (raw_nsecs >= NSEC_PER_SEC) {
|
|
|
- u64 raw_secs = raw_nsecs;
|
|
|
- raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
|
|
|
- tk->raw_time.tv_sec += raw_secs;
|
|
|
+ tk->tkr_raw.xtime_nsec += (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift;
|
|
|
+ tk->tkr_raw.xtime_nsec += tk->raw_interval << shift;
|
|
|
+ snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
|
|
|
+ while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) {
|
|
|
+ tk->tkr_raw.xtime_nsec -= snsec_per_sec;
|
|
|
+ tk->raw_time.tv_sec++;
|
|
|
}
|
|
|
- tk->raw_time.tv_nsec = raw_nsecs;
|
|
|
+ tk->raw_time.tv_nsec = tk->tkr_raw.xtime_nsec >> tk->tkr_raw.shift;
|
|
|
+ tk->tkr_raw.xtime_nsec -= (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift;
|
|
|
|
|
|
/* Accumulate error between NTP and clock interval */
|
|
|
tk->ntp_error += tk->ntp_tick << shift;
|