|
@@ -26,41 +26,26 @@
|
|
|
|
|
|
#define gtod (&VVAR(vsyscall_gtod_data))
|
|
|
|
|
|
-notrace static cycle_t vread_tsc(void)
|
|
|
+static notrace cycle_t vread_hpet(void)
|
|
|
{
|
|
|
- cycle_t ret;
|
|
|
- u64 last;
|
|
|
-
|
|
|
- /*
|
|
|
- * Empirically, a fence (of type that depends on the CPU)
|
|
|
- * before rdtsc is enough to ensure that rdtsc is ordered
|
|
|
- * with respect to loads. The various CPU manuals are unclear
|
|
|
- * as to whether rdtsc can be reordered with later loads,
|
|
|
- * but no one has ever seen it happen.
|
|
|
- */
|
|
|
- rdtsc_barrier();
|
|
|
- ret = (cycle_t)vget_cycles();
|
|
|
-
|
|
|
- last = VVAR(vsyscall_gtod_data).clock.cycle_last;
|
|
|
-
|
|
|
- if (likely(ret >= last))
|
|
|
- return ret;
|
|
|
+ return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + HPET_COUNTER);
|
|
|
+}
|
|
|
|
|
|
- /*
|
|
|
- * GCC likes to generate cmov here, but this branch is extremely
|
|
|
- * predictable (it's just a funciton of time and the likely is
|
|
|
- * very likely) and there's a data dependence, so force GCC
|
|
|
- * to generate a branch instead. I don't barrier() because
|
|
|
- * we don't actually need a barrier, and if this function
|
|
|
- * ever gets inlined it will generate worse code.
|
|
|
- */
|
|
|
- asm volatile ("");
|
|
|
- return last;
|
|
|
+notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
|
|
|
+{
|
|
|
+ long ret;
|
|
|
+ asm("syscall" : "=a" (ret) :
|
|
|
+ "0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory");
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
-static notrace cycle_t vread_hpet(void)
|
|
|
+notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
|
|
|
{
|
|
|
- return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + HPET_COUNTER);
|
|
|
+ long ret;
|
|
|
+
|
|
|
+ asm("syscall" : "=a" (ret) :
|
|
|
+ "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_PARAVIRT_CLOCK
|
|
@@ -133,23 +118,37 @@ static notrace cycle_t vread_pvclock(int *mode)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
-notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
|
|
|
+notrace static cycle_t vread_tsc(void)
|
|
|
{
|
|
|
- long ret;
|
|
|
- asm("syscall" : "=a" (ret) :
|
|
|
- "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
|
|
|
- return ret;
|
|
|
-}
|
|
|
+ cycle_t ret;
|
|
|
+ u64 last;
|
|
|
|
|
|
-notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
|
|
|
-{
|
|
|
- long ret;
|
|
|
+ /*
|
|
|
+ * Empirically, a fence (of type that depends on the CPU)
|
|
|
+ * before rdtsc is enough to ensure that rdtsc is ordered
|
|
|
+ * with respect to loads. The various CPU manuals are unclear
|
|
|
+ * as to whether rdtsc can be reordered with later loads,
|
|
|
+ * but no one has ever seen it happen.
|
|
|
+ */
|
|
|
+ rdtsc_barrier();
|
|
|
+ ret = (cycle_t)vget_cycles();
|
|
|
|
|
|
- asm("syscall" : "=a" (ret) :
|
|
|
- "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
|
|
|
- return ret;
|
|
|
-}
|
|
|
+ last = VVAR(vsyscall_gtod_data).clock.cycle_last;
|
|
|
|
|
|
+ if (likely(ret >= last))
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * GCC likes to generate cmov here, but this branch is extremely
|
|
|
+ * predictable (it's just a funciton of time and the likely is
|
|
|
+ * very likely) and there's a data dependence, so force GCC
|
|
|
+ * to generate a branch instead. I don't barrier() because
|
|
|
+ * we don't actually need a barrier, and if this function
|
|
|
+ * ever gets inlined it will generate worse code.
|
|
|
+ */
|
|
|
+ asm volatile ("");
|
|
|
+ return last;
|
|
|
+}
|
|
|
|
|
|
notrace static inline u64 vgetsns(int *mode)
|
|
|
{
|