vclock_gettime.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240
  1. /*
  2. * Copyright 2006 Andi Kleen, SUSE Labs.
  3. * Subject to the GNU Public License, v.2
  4. *
  5. * Fast user context implementation of clock_gettime, gettimeofday, and time.
  6. *
  7. * 32 Bit compat layer by Stefani Seibold <stefani@seibold.net>
  8. * sponsored by Rohde & Schwarz GmbH & Co. KG Munich/Germany
  9. *
  10. * The code should have no internal unresolved relocations.
  11. * Check with readelf after changing.
  12. */
  13. #include <uapi/linux/time.h>
  14. #include <asm/vgtod.h>
  15. #include <asm/vvar.h>
  16. #include <asm/unistd.h>
  17. #include <asm/msr.h>
  18. #include <asm/pvclock.h>
  19. #include <asm/mshyperv.h>
  20. #include <linux/math64.h>
  21. #include <linux/time.h>
  22. #include <linux/kernel.h>
  23. #define gtod (&VVAR(vsyscall_gtod_data))
  24. extern int __vdso_clock_gettime(clockid_t clock, struct timespec *ts);
  25. extern int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz);
  26. extern time_t __vdso_time(time_t *t);
  27. #ifdef CONFIG_PARAVIRT_CLOCK
  28. extern u8 pvclock_page
  29. __attribute__((visibility("hidden")));
  30. #endif
  31. #ifdef CONFIG_HYPERV_TSCPAGE
  32. extern u8 hvclock_page
  33. __attribute__((visibility("hidden")));
  34. #endif
  35. #ifndef BUILD_VDSO32
  36. notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
  37. {
  38. long ret;
  39. asm ("syscall" : "=a" (ret), "=m" (*ts) :
  40. "0" (__NR_clock_gettime), "D" (clock), "S" (ts) :
  41. "rcx", "r11");
  42. return ret;
  43. }
  44. #else
  45. notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
  46. {
  47. long ret;
  48. asm (
  49. "mov %%ebx, %%edx \n"
  50. "mov %[clock], %%ebx \n"
  51. "call __kernel_vsyscall \n"
  52. "mov %%edx, %%ebx \n"
  53. : "=a" (ret), "=m" (*ts)
  54. : "0" (__NR_clock_gettime), [clock] "g" (clock), "c" (ts)
  55. : "edx");
  56. return ret;
  57. }
  58. #endif
  59. #ifdef CONFIG_PARAVIRT_CLOCK
  60. static notrace const struct pvclock_vsyscall_time_info *get_pvti0(void)
  61. {
  62. return (const struct pvclock_vsyscall_time_info *)&pvclock_page;
  63. }
  64. static notrace u64 vread_pvclock(void)
  65. {
  66. const struct pvclock_vcpu_time_info *pvti = &get_pvti0()->pvti;
  67. u32 version;
  68. u64 ret;
  69. /*
  70. * Note: The kernel and hypervisor must guarantee that cpu ID
  71. * number maps 1:1 to per-CPU pvclock time info.
  72. *
  73. * Because the hypervisor is entirely unaware of guest userspace
  74. * preemption, it cannot guarantee that per-CPU pvclock time
  75. * info is updated if the underlying CPU changes or that that
  76. * version is increased whenever underlying CPU changes.
  77. *
  78. * On KVM, we are guaranteed that pvti updates for any vCPU are
  79. * atomic as seen by *all* vCPUs. This is an even stronger
  80. * guarantee than we get with a normal seqlock.
  81. *
  82. * On Xen, we don't appear to have that guarantee, but Xen still
  83. * supplies a valid seqlock using the version field.
  84. *
  85. * We only do pvclock vdso timing at all if
  86. * PVCLOCK_TSC_STABLE_BIT is set, and we interpret that bit to
  87. * mean that all vCPUs have matching pvti and that the TSC is
  88. * synced, so we can just look at vCPU 0's pvti.
  89. */
  90. do {
  91. version = pvclock_read_begin(pvti);
  92. if (unlikely(!(pvti->flags & PVCLOCK_TSC_STABLE_BIT)))
  93. return U64_MAX;
  94. ret = __pvclock_read_cycles(pvti, rdtsc_ordered());
  95. } while (pvclock_read_retry(pvti, version));
  96. return ret;
  97. }
  98. #endif
  99. #ifdef CONFIG_HYPERV_TSCPAGE
  100. static notrace u64 vread_hvclock(void)
  101. {
  102. const struct ms_hyperv_tsc_page *tsc_pg =
  103. (const struct ms_hyperv_tsc_page *)&hvclock_page;
  104. return hv_read_tsc_page(tsc_pg);
  105. }
  106. #endif
  107. notrace static inline u64 vgetcyc(int mode)
  108. {
  109. if (mode == VCLOCK_TSC)
  110. return (u64)rdtsc_ordered();
  111. #ifdef CONFIG_PARAVIRT_CLOCK
  112. else if (mode == VCLOCK_PVCLOCK)
  113. return vread_pvclock();
  114. #endif
  115. #ifdef CONFIG_HYPERV_TSCPAGE
  116. else if (mode == VCLOCK_HVCLOCK)
  117. return vread_hvclock();
  118. #endif
  119. return U64_MAX;
  120. }
  121. notrace static int do_hres(clockid_t clk, struct timespec *ts)
  122. {
  123. struct vgtod_ts *base = &gtod->basetime[clk];
  124. u64 cycles, last, sec, ns;
  125. unsigned int seq;
  126. do {
  127. seq = gtod_read_begin(gtod);
  128. cycles = vgetcyc(gtod->vclock_mode);
  129. ns = base->nsec;
  130. last = gtod->cycle_last;
  131. if (unlikely((s64)cycles < 0))
  132. return vdso_fallback_gettime(clk, ts);
  133. if (cycles > last)
  134. ns += (cycles - last) * gtod->mult;
  135. ns >>= gtod->shift;
  136. sec = base->sec;
  137. } while (unlikely(gtod_read_retry(gtod, seq)));
  138. /*
  139. * Do this outside the loop: a race inside the loop could result
  140. * in __iter_div_u64_rem() being extremely slow.
  141. */
  142. ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
  143. ts->tv_nsec = ns;
  144. return 0;
  145. }
  146. notrace static void do_coarse(clockid_t clk, struct timespec *ts)
  147. {
  148. struct vgtod_ts *base = &gtod->basetime[clk];
  149. unsigned int seq;
  150. do {
  151. seq = gtod_read_begin(gtod);
  152. ts->tv_sec = base->sec;
  153. ts->tv_nsec = base->nsec;
  154. } while (unlikely(gtod_read_retry(gtod, seq)));
  155. }
  156. notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
  157. {
  158. unsigned int msk;
  159. /* Sort out negative (CPU/FD) and invalid clocks */
  160. if (unlikely((unsigned int) clock >= MAX_CLOCKS))
  161. return vdso_fallback_gettime(clock, ts);
  162. /*
  163. * Convert the clockid to a bitmask and use it to check which
  164. * clocks are handled in the VDSO directly.
  165. */
  166. msk = 1U << clock;
  167. if (likely(msk & VGTOD_HRES)) {
  168. return do_hres(clock, ts);
  169. } else if (msk & VGTOD_COARSE) {
  170. do_coarse(clock, ts);
  171. return 0;
  172. }
  173. return vdso_fallback_gettime(clock, ts);
  174. }
  175. int clock_gettime(clockid_t, struct timespec *)
  176. __attribute__((weak, alias("__vdso_clock_gettime")));
  177. notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
  178. {
  179. if (likely(tv != NULL)) {
  180. struct timespec *ts = (struct timespec *) tv;
  181. do_hres(CLOCK_REALTIME, ts);
  182. tv->tv_usec /= 1000;
  183. }
  184. if (unlikely(tz != NULL)) {
  185. tz->tz_minuteswest = gtod->tz_minuteswest;
  186. tz->tz_dsttime = gtod->tz_dsttime;
  187. }
  188. return 0;
  189. }
  190. int gettimeofday(struct timeval *, struct timezone *)
  191. __attribute__((weak, alias("__vdso_gettimeofday")));
  192. /*
  193. * This will break when the xtime seconds get inaccurate, but that is
  194. * unlikely
  195. */
  196. notrace time_t __vdso_time(time_t *t)
  197. {
  198. /* This is atomic on x86 so we don't need any locks. */
  199. time_t result = READ_ONCE(gtod->basetime[CLOCK_REALTIME].sec);
  200. if (t)
  201. *t = result;
  202. return result;
  203. }
  204. time_t time(time_t *t)
  205. __attribute__((weak, alias("__vdso_time")));