|
@@ -1215,6 +1215,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
|
|
|
unsigned long flags;
|
|
|
s64 usdiff;
|
|
|
bool matched;
|
|
|
+ bool already_matched;
|
|
|
u64 data = msr->data;
|
|
|
|
|
|
raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
|
|
@@ -1279,6 +1280,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
|
|
|
pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
|
|
|
}
|
|
|
matched = true;
|
|
|
+ already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation);
|
|
|
} else {
|
|
|
/*
|
|
|
* We split periods of matched TSC writes into generations.
|
|
@@ -1294,7 +1296,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
|
|
|
kvm->arch.cur_tsc_write = data;
|
|
|
kvm->arch.cur_tsc_offset = offset;
|
|
|
matched = false;
|
|
|
- pr_debug("kvm: new tsc generation %u, clock %llu\n",
|
|
|
+ pr_debug("kvm: new tsc generation %llu, clock %llu\n",
|
|
|
kvm->arch.cur_tsc_generation, data);
|
|
|
}
|
|
|
|
|
@@ -1319,10 +1321,11 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
|
|
|
raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
|
|
|
|
|
|
spin_lock(&kvm->arch.pvclock_gtod_sync_lock);
|
|
|
- if (matched)
|
|
|
- kvm->arch.nr_vcpus_matched_tsc++;
|
|
|
- else
|
|
|
+ if (!matched) {
|
|
|
kvm->arch.nr_vcpus_matched_tsc = 0;
|
|
|
+ } else if (!already_matched) {
|
|
|
+ kvm->arch.nr_vcpus_matched_tsc++;
|
|
|
+ }
|
|
|
|
|
|
kvm_track_tsc_matching(vcpu);
|
|
|
spin_unlock(&kvm->arch.pvclock_gtod_sync_lock);
|