|
@@ -2122,6 +2122,12 @@ static void kvmclock_reset(struct kvm_vcpu *vcpu)
|
|
|
vcpu->arch.pv_time_enabled = false;
|
|
|
}
|
|
|
|
|
|
+static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
|
|
|
+{
|
|
|
+ ++vcpu->stat.tlb_flush;
|
|
|
+ kvm_x86_ops->tlb_flush(vcpu, invalidate_gpa);
|
|
|
+}
|
|
|
+
|
|
|
static void record_steal_time(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
|
|
@@ -2131,7 +2137,12 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
|
|
|
&vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
|
|
|
return;
|
|
|
|
|
|
- vcpu->arch.st.steal.preempted = 0;
|
|
|
+ /*
|
|
|
+ * Doing a TLB flush here, on the guest's behalf, can avoid
|
|
|
+ * expensive IPIs.
|
|
|
+ */
|
|
|
+ if (xchg(&vcpu->arch.st.steal.preempted, 0) & KVM_VCPU_FLUSH_TLB)
|
|
|
+ kvm_vcpu_flush_tlb(vcpu, false);
|
|
|
|
|
|
if (vcpu->arch.st.steal.version & 1)
|
|
|
vcpu->arch.st.steal.version += 1; /* first time write, random junk */
|
|
@@ -6781,12 +6792,6 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
|
|
|
kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap);
|
|
|
}
|
|
|
|
|
|
-static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
|
|
|
-{
|
|
|
- ++vcpu->stat.tlb_flush;
|
|
|
- kvm_x86_ops->tlb_flush(vcpu, invalidate_gpa);
|
|
|
-}
|
|
|
-
|
|
|
void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
|
|
|
unsigned long start, unsigned long end)
|
|
|
{
|