|
@@ -178,6 +178,7 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
|
|
|
* to counteract clock drifting.
|
|
|
*/
|
|
|
tk->tkr.mult = clock->mult;
|
|
|
+ tk->ntp_err_mult = 0;
|
|
|
}
|
|
|
|
|
|
/* Timekeeper helper functions. */
|
|
@@ -1257,125 +1258,34 @@ static int __init timekeeping_init_ops(void)
|
|
|
register_syscore_ops(&timekeeping_syscore_ops);
|
|
|
return 0;
|
|
|
}
|
|
|
-
|
|
|
device_initcall(timekeeping_init_ops);
|
|
|
|
|
|
/*
|
|
|
- * If the error is already larger, we look ahead even further
|
|
|
- * to compensate for late or lost adjustments.
|
|
|
- */
|
|
|
-static __always_inline int timekeeping_bigadjust(struct timekeeper *tk,
|
|
|
- s64 error, s64 *interval,
|
|
|
- s64 *offset)
|
|
|
-{
|
|
|
- s64 tick_error, i;
|
|
|
- u32 look_ahead, adj;
|
|
|
- s32 error2, mult;
|
|
|
-
|
|
|
- /*
|
|
|
- * Use the current error value to determine how much to look ahead.
|
|
|
- * The larger the error the slower we adjust for it to avoid problems
|
|
|
- * with losing too many ticks, otherwise we would overadjust and
|
|
|
- * produce an even larger error. The smaller the adjustment the
|
|
|
- * faster we try to adjust for it, as lost ticks can do less harm
|
|
|
- * here. This is tuned so that an error of about 1 msec is adjusted
|
|
|
- * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
|
|
|
- */
|
|
|
- error2 = tk->ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ);
|
|
|
- error2 = abs(error2);
|
|
|
- for (look_ahead = 0; error2 > 0; look_ahead++)
|
|
|
- error2 >>= 2;
|
|
|
-
|
|
|
- /*
|
|
|
- * Now calculate the error in (1 << look_ahead) ticks, but first
|
|
|
- * remove the single look ahead already included in the error.
|
|
|
- */
|
|
|
- tick_error = ntp_tick_length() >> (tk->ntp_error_shift + 1);
|
|
|
- tick_error -= tk->xtime_interval >> 1;
|
|
|
- error = ((error - tick_error) >> look_ahead) + tick_error;
|
|
|
-
|
|
|
- /* Finally calculate the adjustment shift value. */
|
|
|
- i = *interval;
|
|
|
- mult = 1;
|
|
|
- if (error < 0) {
|
|
|
- error = -error;
|
|
|
- *interval = -*interval;
|
|
|
- *offset = -*offset;
|
|
|
- mult = -1;
|
|
|
- }
|
|
|
- for (adj = 0; error > i; adj++)
|
|
|
- error >>= 1;
|
|
|
-
|
|
|
- *interval <<= adj;
|
|
|
- *offset <<= adj;
|
|
|
- return mult << adj;
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * Adjust the multiplier to reduce the error value,
|
|
|
- * this is optimized for the most common adjustments of -1,0,1,
|
|
|
- * for other values we can do a bit more work.
|
|
|
+ * Apply a multiplier adjustment to the timekeeper
|
|
|
*/
|
|
|
-static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
|
|
|
+static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk,
|
|
|
+ s64 offset,
|
|
|
+ bool negative,
|
|
|
+ int adj_scale)
|
|
|
{
|
|
|
- s64 error, interval = tk->cycle_interval;
|
|
|
- int adj;
|
|
|
+ s64 interval = tk->cycle_interval;
|
|
|
+ s32 mult_adj = 1;
|
|
|
|
|
|
- /*
|
|
|
- * The point of this is to check if the error is greater than half
|
|
|
- * an interval.
|
|
|
- *
|
|
|
- * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs.
|
|
|
- *
|
|
|
- * Note we subtract one in the shift, so that error is really error*2.
|
|
|
- * This "saves" dividing(shifting) interval twice, but keeps the
|
|
|
- * (error > interval) comparison as still measuring if error is
|
|
|
- * larger than half an interval.
|
|
|
- *
|
|
|
- * Note: It does not "save" on aggravation when reading the code.
|
|
|
- */
|
|
|
- error = tk->ntp_error >> (tk->ntp_error_shift - 1);
|
|
|
- if (error > interval) {
|
|
|
- /*
|
|
|
- * We now divide error by 4(via shift), which checks if
|
|
|
- * the error is greater than twice the interval.
|
|
|
- * If it is greater, we need a bigadjust, if its smaller,
|
|
|
- * we can adjust by 1.
|
|
|
- */
|
|
|
- error >>= 2;
|
|
|
- if (likely(error <= interval))
|
|
|
- adj = 1;
|
|
|
- else
|
|
|
- adj = timekeeping_bigadjust(tk, error, &interval, &offset);
|
|
|
- } else {
|
|
|
- if (error < -interval) {
|
|
|
- /* See comment above, this is just switched for the negative */
|
|
|
- error >>= 2;
|
|
|
- if (likely(error >= -interval)) {
|
|
|
- adj = -1;
|
|
|
- interval = -interval;
|
|
|
- offset = -offset;
|
|
|
- } else {
|
|
|
- adj = timekeeping_bigadjust(tk, error, &interval, &offset);
|
|
|
- }
|
|
|
- } else {
|
|
|
- goto out_adjust;
|
|
|
- }
|
|
|
+ if (negative) {
|
|
|
+ mult_adj = -mult_adj;
|
|
|
+ interval = -interval;
|
|
|
+ offset = -offset;
|
|
|
}
|
|
|
+ mult_adj <<= adj_scale;
|
|
|
+ interval <<= adj_scale;
|
|
|
+ offset <<= adj_scale;
|
|
|
|
|
|
- if (unlikely(tk->tkr.clock->maxadj &&
|
|
|
- (tk->tkr.mult + adj > tk->tkr.clock->mult + tk->tkr.clock->maxadj))) {
|
|
|
- printk_deferred_once(KERN_WARNING
|
|
|
- "Adjusting %s more than 11%% (%ld vs %ld)\n",
|
|
|
- tk->tkr.clock->name, (long)tk->tkr.mult + adj,
|
|
|
- (long)tk->tkr.clock->mult + tk->tkr.clock->maxadj);
|
|
|
- }
|
|
|
/*
|
|
|
* So the following can be confusing.
|
|
|
*
|
|
|
- * To keep things simple, lets assume adj == 1 for now.
|
|
|
+ * To keep things simple, lets assume mult_adj == 1 for now.
|
|
|
*
|
|
|
- * When adj != 1, remember that the interval and offset values
|
|
|
+ * When mult_adj != 1, remember that the interval and offset values
|
|
|
* have been appropriately scaled so the math is the same.
|
|
|
*
|
|
|
* The basic idea here is that we're increasing the multiplier
|
|
@@ -1419,12 +1329,76 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
|
|
|
*
|
|
|
* XXX - TODO: Doc ntp_error calculation.
|
|
|
*/
|
|
|
- tk->tkr.mult += adj;
|
|
|
+ tk->tkr.mult += mult_adj;
|
|
|
tk->xtime_interval += interval;
|
|
|
tk->tkr.xtime_nsec -= offset;
|
|
|
tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Calculate the multiplier adjustment needed to match the frequency
|
|
|
+ * specified by NTP
|
|
|
+ */
|
|
|
+static __always_inline void timekeeping_freqadjust(struct timekeeper *tk,
|
|
|
+ s64 offset)
|
|
|
+{
|
|
|
+ s64 interval = tk->cycle_interval;
|
|
|
+ s64 xinterval = tk->xtime_interval;
|
|
|
+ s64 tick_error;
|
|
|
+ bool negative;
|
|
|
+ u32 adj;
|
|
|
+
|
|
|
+ /* Remove any current error adj from freq calculation */
|
|
|
+ if (tk->ntp_err_mult)
|
|
|
+ xinterval -= tk->cycle_interval;
|
|
|
+
|
|
|
+ /* Calculate current error per tick */
|
|
|
+ tick_error = ntp_tick_length() >> tk->ntp_error_shift;
|
|
|
+ tick_error -= (xinterval + tk->xtime_remainder);
|
|
|
+
|
|
|
+ /* Don't worry about correcting it if its small */
|
|
|
+ if (likely((tick_error >= 0) && (tick_error <= interval)))
|
|
|
+ return;
|
|
|
+
|
|
|
+ /* preserve the direction of correction */
|
|
|
+ negative = (tick_error < 0);
|
|
|
+
|
|
|
+ /* Sort out the magnitude of the correction */
|
|
|
+ tick_error = abs(tick_error);
|
|
|
+ for (adj = 0; tick_error > interval; adj++)
|
|
|
+ tick_error >>= 1;
|
|
|
+
|
|
|
+ /* scale the corrections */
|
|
|
+ timekeeping_apply_adjustment(tk, offset, negative, adj);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Adjust the timekeeper's multiplier to the correct frequency
|
|
|
+ * and also to reduce the accumulated error value.
|
|
|
+ */
|
|
|
+static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
|
|
|
+{
|
|
|
+ /* Correct for the current frequency error */
|
|
|
+ timekeeping_freqadjust(tk, offset);
|
|
|
+
|
|
|
+ /* Next make a small adjustment to fix any cumulative error */
|
|
|
+ if (!tk->ntp_err_mult && (tk->ntp_error > 0)) {
|
|
|
+ tk->ntp_err_mult = 1;
|
|
|
+ timekeeping_apply_adjustment(tk, offset, 0, 0);
|
|
|
+ } else if (tk->ntp_err_mult && (tk->ntp_error <= 0)) {
|
|
|
+ /* Undo any existing error adjustment */
|
|
|
+ timekeeping_apply_adjustment(tk, offset, 1, 0);
|
|
|
+ tk->ntp_err_mult = 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (unlikely(tk->tkr.clock->maxadj &&
|
|
|
+ (tk->tkr.mult > tk->tkr.clock->mult + tk->tkr.clock->maxadj))) {
|
|
|
+ printk_once(KERN_WARNING
|
|
|
+ "Adjusting %s more than 11%% (%ld vs %ld)\n",
|
|
|
+ tk->tkr.clock->name, (long)tk->tkr.mult,
|
|
|
+ (long)tk->tkr.clock->mult + tk->tkr.clock->maxadj);
|
|
|
+ }
|
|
|
|
|
|
-out_adjust:
|
|
|
/*
|
|
|
* It may be possible that when we entered this function, xtime_nsec
|
|
|
* was very small. Further, if we're slightly speeding the clocksource
|
|
@@ -1444,7 +1418,6 @@ out_adjust:
|
|
|
tk->tkr.xtime_nsec = 0;
|
|
|
tk->ntp_error += neg << tk->ntp_error_shift;
|
|
|
}
|
|
|
-
|
|
|
}
|
|
|
|
|
|
/**
|