|
@@ -4631,22 +4631,8 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
|
|
|
|
|
|
static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
- if (is_guest_mode(vcpu)) {
|
|
|
- if (to_vmx(vcpu)->nested.nested_run_pending)
|
|
|
- return 0;
|
|
|
- if (nested_exit_on_nmi(vcpu)) {
|
|
|
- nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
|
|
|
- NMI_VECTOR | INTR_TYPE_NMI_INTR |
|
|
|
- INTR_INFO_VALID_MASK, 0);
|
|
|
- /*
|
|
|
- * The NMI-triggered VM exit counts as injection:
|
|
|
- * clear this one and block further NMIs.
|
|
|
- */
|
|
|
- vcpu->arch.nmi_pending = 0;
|
|
|
- vmx_set_nmi_mask(vcpu, true);
|
|
|
- return 0;
|
|
|
- }
|
|
|
- }
|
|
|
+ if (to_vmx(vcpu)->nested.nested_run_pending)
|
|
|
+ return 0;
|
|
|
|
|
|
if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked)
|
|
|
return 0;
|
|
@@ -4658,19 +4644,8 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
- if (is_guest_mode(vcpu)) {
|
|
|
- if (to_vmx(vcpu)->nested.nested_run_pending)
|
|
|
- return 0;
|
|
|
- if (nested_exit_on_intr(vcpu)) {
|
|
|
- nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT,
|
|
|
- 0, 0);
|
|
|
- /*
|
|
|
- * fall through to normal code, but now in L1, not L2
|
|
|
- */
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
|
|
|
+ return (!to_vmx(vcpu)->nested.nested_run_pending &&
|
|
|
+ vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
|
|
|
!(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
|
|
|
(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
|
|
|
}
|
|
@@ -8172,6 +8147,35 @@ static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
|
|
|
+{
|
|
|
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
+
|
|
|
+ if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) {
|
|
|
+ if (vmx->nested.nested_run_pending)
|
|
|
+ return -EBUSY;
|
|
|
+ nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
|
|
|
+ NMI_VECTOR | INTR_TYPE_NMI_INTR |
|
|
|
+ INTR_INFO_VALID_MASK, 0);
|
|
|
+ /*
|
|
|
+ * The NMI-triggered VM exit counts as injection:
|
|
|
+ * clear this one and block further NMIs.
|
|
|
+ */
|
|
|
+ vcpu->arch.nmi_pending = 0;
|
|
|
+ vmx_set_nmi_mask(vcpu, true);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ if ((kvm_cpu_has_interrupt(vcpu) || external_intr) &&
|
|
|
+ nested_exit_on_intr(vcpu)) {
|
|
|
+ if (vmx->nested.nested_run_pending)
|
|
|
+ return -EBUSY;
|
|
|
+ nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* prepare_vmcs12 is part of what we need to do when the nested L2 guest exits
|
|
|
* and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12),
|
|
@@ -8512,6 +8516,9 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
|
|
|
nested_vmx_succeed(vcpu);
|
|
|
if (enable_shadow_vmcs)
|
|
|
vmx->nested.sync_shadow_vmcs = true;
|
|
|
+
|
|
|
+ /* in case we halted in L2 */
|
|
|
+ vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -8652,6 +8659,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
|
|
|
.check_intercept = vmx_check_intercept,
|
|
|
.handle_external_intr = vmx_handle_external_intr,
|
|
|
.mpx_supported = vmx_mpx_supported,
|
|
|
+
|
|
|
+ .check_nested_events = vmx_check_nested_events,
|
|
|
};
|
|
|
|
|
|
static int __init vmx_init(void)
|