|
@@ -3134,9 +3134,17 @@ static __init int hardware_setup(void)
|
|
|
if (!cpu_has_vmx_unrestricted_guest())
|
|
|
enable_unrestricted_guest = 0;
|
|
|
|
|
|
- if (!cpu_has_vmx_flexpriority())
|
|
|
+ if (!cpu_has_vmx_flexpriority()) {
|
|
|
flexpriority_enabled = 0;
|
|
|
|
|
|
+ /*
|
|
|
+ * set_apic_access_page_addr() is used to reload apic access
|
|
|
+ * page upon invalidation. No need to do anything if the
|
|
|
+ * processor does not have the APIC_ACCESS_ADDR VMCS field.
|
|
|
+ */
|
|
|
+ kvm_x86_ops->set_apic_access_page_addr = NULL;
|
|
|
+ }
|
|
|
+
|
|
|
if (!cpu_has_vmx_tpr_shadow())
|
|
|
kvm_x86_ops->update_cr8_intercept = NULL;
|
|
|
|
|
@@ -4557,9 +4565,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
|
|
|
vmcs_write32(TPR_THRESHOLD, 0);
|
|
|
}
|
|
|
|
|
|
- if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
|
|
|
- vmcs_write64(APIC_ACCESS_ADDR,
|
|
|
- page_to_phys(vmx->vcpu.kvm->arch.apic_access_page));
|
|
|
+ kvm_vcpu_reload_apic_access_page(vcpu);
|
|
|
|
|
|
if (vmx_vm_has_apicv(vcpu->kvm))
|
|
|
memset(&vmx->pi_desc, 0, sizeof(struct pi_desc));
|
|
@@ -7198,6 +7204,29 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
|
|
|
vmx_set_msr_bitmap(vcpu);
|
|
|
}
|
|
|
|
|
|
+static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
|
|
|
+{
|
|
|
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Currently we do not handle the nested case where L2 has an
|
|
|
+ * APIC access page of its own; that page is still pinned.
|
|
|
+ * Hence, we skip the case where the VCPU is in guest mode _and_
|
|
|
+ * L1 prepared an APIC access page for L2.
|
|
|
+ *
|
|
|
+ * For the case where L1 and L2 share the same APIC access page
|
|
|
+ * (flexpriority=Y but SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES clear
|
|
|
+ * in the vmcs12), this function will only update either the vmcs01
|
|
|
+ * or the vmcs02. If the former, the vmcs02 will be updated by
|
|
|
+ * prepare_vmcs02. If the latter, the vmcs01 will be updated in
|
|
|
+ * the next L2->L1 exit.
|
|
|
+ */
|
|
|
+ if (!is_guest_mode(vcpu) ||
|
|
|
+ !nested_cpu_has2(vmx->nested.current_vmcs12,
|
|
|
+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
|
|
|
+ vmcs_write64(APIC_ACCESS_ADDR, hpa);
|
|
|
+}
|
|
|
+
|
|
|
static void vmx_hwapic_isr_update(struct kvm *kvm, int isr)
|
|
|
{
|
|
|
u16 status;
|
|
@@ -8140,8 +8169,7 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
|
|
|
} else if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm)) {
|
|
|
exec_control |=
|
|
|
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
|
|
|
- vmcs_write64(APIC_ACCESS_ADDR,
|
|
|
- page_to_phys(vcpu->kvm->arch.apic_access_page));
|
|
|
+ kvm_vcpu_reload_apic_access_page(vcpu);
|
|
|
}
|
|
|
|
|
|
vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
|
|
@@ -8949,6 +8977,12 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
|
|
|
vmx->nested.virtual_apic_page = NULL;
|
|
|
}
|
|
|
|
|
|
+ /*
|
|
|
+ * We are now running in L2, mmu_notifier will force to reload the
|
|
|
+ * page's hpa for L2 vmcs. Need to reload it for L1 before entering L1.
|
|
|
+ */
|
|
|
+ kvm_vcpu_reload_apic_access_page(vcpu);
|
|
|
+
|
|
|
/*
|
|
|
* Exiting from L2 to L1, we're now back to L1 which thinks it just
|
|
|
* finished a VMLAUNCH or VMRESUME instruction, so we need to set the
|
|
@@ -9074,6 +9108,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
|
|
|
.enable_irq_window = enable_irq_window,
|
|
|
.update_cr8_intercept = update_cr8_intercept,
|
|
|
.set_virtual_x2apic_mode = vmx_set_virtual_x2apic_mode,
|
|
|
+ .set_apic_access_page_addr = vmx_set_apic_access_page_addr,
|
|
|
.vm_has_apicv = vmx_vm_has_apicv,
|
|
|
.load_eoi_exitmap = vmx_load_eoi_exitmap,
|
|
|
.hwapic_irr_update = vmx_hwapic_irr_update,
|