|
@@ -8630,17 +8630,20 @@ static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
|
|
|
{
|
|
|
- u32 exit_intr_info;
|
|
|
+ u32 exit_intr_info = 0;
|
|
|
+ u16 basic_exit_reason = (u16)vmx->exit_reason;
|
|
|
|
|
|
- if (!(vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY
|
|
|
- || vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI))
|
|
|
+ if (!(basic_exit_reason == EXIT_REASON_MCE_DURING_VMENTRY
|
|
|
+ || basic_exit_reason == EXIT_REASON_EXCEPTION_NMI))
|
|
|
return;
|
|
|
|
|
|
- vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
|
|
|
- exit_intr_info = vmx->exit_intr_info;
|
|
|
+ if (!(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
|
|
|
+ exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
|
|
|
+ vmx->exit_intr_info = exit_intr_info;
|
|
|
|
|
|
/* Handle machine checks before interrupts are enabled */
|
|
|
- if (is_machine_check(exit_intr_info))
|
|
|
+ if (basic_exit_reason == EXIT_REASON_MCE_DURING_VMENTRY ||
|
|
|
+ is_machine_check(exit_intr_info))
|
|
|
kvm_machine_check();
|
|
|
|
|
|
/* We need to handle NMIs before interrupts are enabled */
|