|
@@ -951,7 +951,7 @@ core_initcall(cpufreq_tsc);
|
|
|
static struct clocksource clocksource_tsc;
|
|
|
|
|
|
/*
|
|
|
- * We compare the TSC to the cycle_last value in the clocksource
|
|
|
+ * We used to compare the TSC to the cycle_last value in the clocksource
|
|
|
* structure to avoid a nasty time-warp. This can be observed in a
|
|
|
* very small window right after one CPU updated cycle_last under
|
|
|
* xtime/vsyscall_gtod lock and the other CPU reads a TSC value which
|
|
@@ -961,26 +961,23 @@ static struct clocksource clocksource_tsc;
|
|
|
* due to the unsigned delta calculation of the time keeping core
|
|
|
* code, which is necessary to support wrapping clocksources like pm
|
|
|
* timer.
|
|
|
+ *
|
|
|
+ * This sanity check is now done in the core timekeeping code.
|
|
|
+ * checking the result of read_tsc() - cycle_last for being negative.
|
|
|
+ * That works because CLOCKSOURCE_MASK(64) does not mask out any bit.
|
|
|
*/
|
|
|
static cycle_t read_tsc(struct clocksource *cs)
|
|
|
{
|
|
|
- cycle_t ret = (cycle_t)get_cycles();
|
|
|
-
|
|
|
- return ret >= clocksource_tsc.cycle_last ?
|
|
|
- ret : clocksource_tsc.cycle_last;
|
|
|
-}
|
|
|
-
|
|
|
-static void resume_tsc(struct clocksource *cs)
|
|
|
-{
|
|
|
- if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
|
|
|
- clocksource_tsc.cycle_last = 0;
|
|
|
+ return (cycle_t)get_cycles();
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc()
|
|
|
+ */
|
|
|
static struct clocksource clocksource_tsc = {
|
|
|
.name = "tsc",
|
|
|
.rating = 300,
|
|
|
.read = read_tsc,
|
|
|
- .resume = resume_tsc,
|
|
|
.mask = CLOCKSOURCE_MASK(64),
|
|
|
.flags = CLOCK_SOURCE_IS_CONTINUOUS |
|
|
|
CLOCK_SOURCE_MUST_VERIFY,
|