|
@@ -1239,6 +1239,11 @@ static inline bool cpu_has_vmx_invvpid_global(void)
|
|
|
return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
|
|
|
}
|
|
|
|
|
|
+static inline bool cpu_has_vmx_invvpid(void)
|
|
|
+{
|
|
|
+ return vmx_capability.vpid & VMX_VPID_INVVPID_BIT;
|
|
|
+}
|
|
|
+
|
|
|
static inline bool cpu_has_vmx_ept(void)
|
|
|
{
|
|
|
return vmcs_config.cpu_based_2nd_exec_ctrl &
|
|
@@ -2753,7 +2758,6 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
|
|
|
SECONDARY_EXEC_RDTSCP |
|
|
|
SECONDARY_EXEC_DESC |
|
|
|
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
|
|
|
- SECONDARY_EXEC_ENABLE_VPID |
|
|
|
SECONDARY_EXEC_APIC_REGISTER_VIRT |
|
|
|
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
|
|
|
SECONDARY_EXEC_WBINVD_EXITING |
|
|
@@ -2781,10 +2785,12 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
|
|
|
* though it is treated as global context. The alternative is
|
|
|
* not failing the single-context invvpid, and it is worse.
|
|
|
*/
|
|
|
- if (enable_vpid)
|
|
|
+ if (enable_vpid) {
|
|
|
+ vmx->nested.nested_vmx_secondary_ctls_high |=
|
|
|
+ SECONDARY_EXEC_ENABLE_VPID;
|
|
|
vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT |
|
|
|
VMX_VPID_EXTENT_SUPPORTED_MASK;
|
|
|
- else
|
|
|
+ } else
|
|
|
vmx->nested.nested_vmx_vpid_caps = 0;
|
|
|
|
|
|
if (enable_unrestricted_guest)
|
|
@@ -4024,6 +4030,12 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
|
|
|
__vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid);
|
|
|
}
|
|
|
|
|
|
+static void vmx_flush_tlb_ept_only(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ if (enable_ept)
|
|
|
+ vmx_flush_tlb(vcpu);
|
|
|
+}
|
|
|
+
|
|
|
static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
|
|
@@ -6517,8 +6529,10 @@ static __init int hardware_setup(void)
|
|
|
if (boot_cpu_has(X86_FEATURE_NX))
|
|
|
kvm_enable_efer_bits(EFER_NX);
|
|
|
|
|
|
- if (!cpu_has_vmx_vpid())
|
|
|
+ if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() ||
|
|
|
+ !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global()))
|
|
|
enable_vpid = 0;
|
|
|
+
|
|
|
if (!cpu_has_vmx_shadow_vmcs())
|
|
|
enable_shadow_vmcs = 0;
|
|
|
if (enable_shadow_vmcs)
|
|
@@ -8501,7 +8515,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
|
|
|
&& kvm_vmx_exit_handlers[exit_reason])
|
|
|
return kvm_vmx_exit_handlers[exit_reason](vcpu);
|
|
|
else {
|
|
|
- WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_reason);
|
|
|
+ vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n",
|
|
|
+ exit_reason);
|
|
|
kvm_queue_exception(vcpu, UD_VECTOR);
|
|
|
return 1;
|
|
|
}
|
|
@@ -8547,6 +8562,7 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
|
|
|
} else {
|
|
|
sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
|
|
|
sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
|
|
|
+ vmx_flush_tlb_ept_only(vcpu);
|
|
|
}
|
|
|
vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);
|
|
|
|
|
@@ -8572,8 +8588,10 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
|
|
|
*/
|
|
|
if (!is_guest_mode(vcpu) ||
|
|
|
!nested_cpu_has2(get_vmcs12(&vmx->vcpu),
|
|
|
- SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
|
|
|
+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
|
|
|
vmcs_write64(APIC_ACCESS_ADDR, hpa);
|
|
|
+ vmx_flush_tlb_ept_only(vcpu);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
|
|
@@ -9974,7 +9992,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
|
|
|
{
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
u32 exec_control;
|
|
|
- bool nested_ept_enabled = false;
|
|
|
|
|
|
vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
|
|
|
vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
|
|
@@ -10121,8 +10138,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
|
|
|
vmcs12->guest_intr_status);
|
|
|
}
|
|
|
|
|
|
- nested_ept_enabled = (exec_control & SECONDARY_EXEC_ENABLE_EPT) != 0;
|
|
|
-
|
|
|
/*
|
|
|
* Write an illegal value to APIC_ACCESS_ADDR. Later,
|
|
|
* nested_get_vmcs12_pages will either fix it up or
|
|
@@ -10255,6 +10270,9 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
|
|
|
if (nested_cpu_has_ept(vmcs12)) {
|
|
|
kvm_mmu_unload(vcpu);
|
|
|
nested_ept_init_mmu_context(vcpu);
|
|
|
+ } else if (nested_cpu_has2(vmcs12,
|
|
|
+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
|
|
|
+ vmx_flush_tlb_ept_only(vcpu);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -10282,12 +10300,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
|
|
|
vmx_set_efer(vcpu, vcpu->arch.efer);
|
|
|
|
|
|
/* Shadow page tables on either EPT or shadow page tables. */
|
|
|
- if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_ept_enabled,
|
|
|
+ if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
|
|
|
entry_failure_code))
|
|
|
return 1;
|
|
|
|
|
|
- kvm_mmu_reset_context(vcpu);
|
|
|
-
|
|
|
if (!enable_ept)
|
|
|
vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
|
|
|
|
|
@@ -11056,6 +11072,10 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
|
|
|
vmx->nested.change_vmcs01_virtual_x2apic_mode = false;
|
|
|
vmx_set_virtual_x2apic_mode(vcpu,
|
|
|
vcpu->arch.apic_base & X2APIC_ENABLE);
|
|
|
+ } else if (!nested_cpu_has_ept(vmcs12) &&
|
|
|
+ nested_cpu_has2(vmcs12,
|
|
|
+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
|
|
|
+ vmx_flush_tlb_ept_only(vcpu);
|
|
|
}
|
|
|
|
|
|
/* This is needed for same reason as it was needed in prepare_vmcs02 */
|