|
@@ -4544,12 +4544,6 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
|
|
|
__vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa);
|
|
|
}
|
|
|
|
|
|
-static void vmx_flush_tlb_ept_only(struct kvm_vcpu *vcpu)
|
|
|
-{
|
|
|
- if (enable_ept)
|
|
|
- vmx_flush_tlb(vcpu, true);
|
|
|
-}
|
|
|
-
|
|
|
static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
|
|
@@ -9278,7 +9272,7 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
|
|
|
} else {
|
|
|
sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
|
|
|
sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
|
|
|
- vmx_flush_tlb_ept_only(vcpu);
|
|
|
+ vmx_flush_tlb(vcpu, true);
|
|
|
}
|
|
|
vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);
|
|
|
|
|
@@ -9306,7 +9300,7 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
|
|
|
!nested_cpu_has2(get_vmcs12(&vmx->vcpu),
|
|
|
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
|
|
|
vmcs_write64(APIC_ACCESS_ADDR, hpa);
|
|
|
- vmx_flush_tlb_ept_only(vcpu);
|
|
|
+ vmx_flush_tlb(vcpu, true);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -11220,7 +11214,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
|
|
|
}
|
|
|
} else if (nested_cpu_has2(vmcs12,
|
|
|
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
|
|
|
- vmx_flush_tlb_ept_only(vcpu);
|
|
|
+ vmx_flush_tlb(vcpu, true);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -12073,7 +12067,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
|
|
|
} else if (!nested_cpu_has_ept(vmcs12) &&
|
|
|
nested_cpu_has2(vmcs12,
|
|
|
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
|
|
|
- vmx_flush_tlb_ept_only(vcpu);
|
|
|
+ vmx_flush_tlb(vcpu, true);
|
|
|
}
|
|
|
|
|
|
/* This is needed for same reason as it was needed in prepare_vmcs02 */
|