|
@@ -1443,10 +1443,10 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
|
|
struct kvm *kvm = vcpu->kvm;
|
|
struct kvm *kvm = vcpu->kvm;
|
|
u64 offset, ns, elapsed;
|
|
u64 offset, ns, elapsed;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
- s64 usdiff;
|
|
|
|
bool matched;
|
|
bool matched;
|
|
bool already_matched;
|
|
bool already_matched;
|
|
u64 data = msr->data;
|
|
u64 data = msr->data;
|
|
|
|
+ bool synchronizing = false;
|
|
|
|
|
|
raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
|
|
raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
|
|
offset = kvm_compute_tsc_offset(vcpu, data);
|
|
offset = kvm_compute_tsc_offset(vcpu, data);
|
|
@@ -1454,51 +1454,25 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
|
|
elapsed = ns - kvm->arch.last_tsc_nsec;
|
|
elapsed = ns - kvm->arch.last_tsc_nsec;
|
|
|
|
|
|
if (vcpu->arch.virtual_tsc_khz) {
|
|
if (vcpu->arch.virtual_tsc_khz) {
|
|
- int faulted = 0;
|
|
|
|
-
|
|
|
|
- /* n.b - signed multiplication and division required */
|
|
|
|
- usdiff = data - kvm->arch.last_tsc_write;
|
|
|
|
-#ifdef CONFIG_X86_64
|
|
|
|
- usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz;
|
|
|
|
-#else
|
|
|
|
- /* do_div() only does unsigned */
|
|
|
|
- asm("1: idivl %[divisor]\n"
|
|
|
|
- "2: xor %%edx, %%edx\n"
|
|
|
|
- " movl $0, %[faulted]\n"
|
|
|
|
- "3:\n"
|
|
|
|
- ".section .fixup,\"ax\"\n"
|
|
|
|
- "4: movl $1, %[faulted]\n"
|
|
|
|
- " jmp 3b\n"
|
|
|
|
- ".previous\n"
|
|
|
|
-
|
|
|
|
- _ASM_EXTABLE(1b, 4b)
|
|
|
|
-
|
|
|
|
- : "=A"(usdiff), [faulted] "=r" (faulted)
|
|
|
|
- : "A"(usdiff * 1000), [divisor] "rm"(vcpu->arch.virtual_tsc_khz));
|
|
|
|
-
|
|
|
|
-#endif
|
|
|
|
- do_div(elapsed, 1000);
|
|
|
|
- usdiff -= elapsed;
|
|
|
|
- if (usdiff < 0)
|
|
|
|
- usdiff = -usdiff;
|
|
|
|
-
|
|
|
|
- /* idivl overflow => difference is larger than USEC_PER_SEC */
|
|
|
|
- if (faulted)
|
|
|
|
- usdiff = USEC_PER_SEC;
|
|
|
|
- } else
|
|
|
|
- usdiff = USEC_PER_SEC; /* disable TSC match window below */
|
|
|
|
|
|
+ u64 tsc_exp = kvm->arch.last_tsc_write +
|
|
|
|
+ nsec_to_cycles(vcpu, elapsed);
|
|
|
|
+ u64 tsc_hz = vcpu->arch.virtual_tsc_khz * 1000LL;
|
|
|
|
+ /*
|
|
|
|
+ * Special case: TSC write with a small delta (1 second)
|
|
|
|
+ * of virtual cycle time against real time is
|
|
|
|
+ * interpreted as an attempt to synchronize the CPU.
|
|
|
|
+ */
|
|
|
|
+ synchronizing = data < tsc_exp + tsc_hz &&
|
|
|
|
+ data + tsc_hz > tsc_exp;
|
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
/*
|
|
- * Special case: TSC write with a small delta (1 second) of virtual
|
|
|
|
- * cycle time against real time is interpreted as an attempt to
|
|
|
|
- * synchronize the CPU.
|
|
|
|
- *
|
|
|
|
* For a reliable TSC, we can match TSC offsets, and for an unstable
|
|
* For a reliable TSC, we can match TSC offsets, and for an unstable
|
|
* TSC, we add elapsed time in this computation. We could let the
|
|
* TSC, we add elapsed time in this computation. We could let the
|
|
* compensation code attempt to catch up if we fall behind, but
|
|
* compensation code attempt to catch up if we fall behind, but
|
|
* it's better to try to match offsets from the beginning.
|
|
* it's better to try to match offsets from the beginning.
|
|
*/
|
|
*/
|
|
- if (usdiff < USEC_PER_SEC &&
|
|
|
|
|
|
+ if (synchronizing &&
|
|
vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
|
|
vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
|
|
if (!check_tsc_unstable()) {
|
|
if (!check_tsc_unstable()) {
|
|
offset = kvm->arch.cur_tsc_offset;
|
|
offset = kvm->arch.cur_tsc_offset;
|