|
@@ -198,7 +198,8 @@ struct loaded_vmcs {
|
|
struct vmcs *vmcs;
|
|
struct vmcs *vmcs;
|
|
struct vmcs *shadow_vmcs;
|
|
struct vmcs *shadow_vmcs;
|
|
int cpu;
|
|
int cpu;
|
|
- int launched;
|
|
|
|
|
|
+ bool launched;
|
|
|
|
+ bool nmi_known_unmasked;
|
|
struct list_head loaded_vmcss_on_cpu_link;
|
|
struct list_head loaded_vmcss_on_cpu_link;
|
|
};
|
|
};
|
|
|
|
|
|
@@ -5510,10 +5511,8 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
|
|
{
|
|
{
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
|
|
- if (!is_guest_mode(vcpu)) {
|
|
|
|
- ++vcpu->stat.nmi_injections;
|
|
|
|
- vmx->nmi_known_unmasked = false;
|
|
|
|
- }
|
|
|
|
|
|
+ ++vcpu->stat.nmi_injections;
|
|
|
|
+ vmx->loaded_vmcs->nmi_known_unmasked = false;
|
|
|
|
|
|
if (vmx->rmode.vm86_active) {
|
|
if (vmx->rmode.vm86_active) {
|
|
if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0) != EMULATE_DONE)
|
|
if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0) != EMULATE_DONE)
|
|
@@ -5527,16 +5526,21 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
|
|
static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
|
|
{
|
|
{
|
|
- if (to_vmx(vcpu)->nmi_known_unmasked)
|
|
|
|
|
|
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
+ bool masked;
|
|
|
|
+
|
|
|
|
+ if (vmx->loaded_vmcs->nmi_known_unmasked)
|
|
return false;
|
|
return false;
|
|
- return vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
|
|
|
|
|
|
+ masked = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
|
|
|
|
+ vmx->loaded_vmcs->nmi_known_unmasked = !masked;
|
|
|
|
+ return masked;
|
|
}
|
|
}
|
|
|
|
|
|
static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
|
|
static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
|
|
{
|
|
{
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
|
|
- vmx->nmi_known_unmasked = !masked;
|
|
|
|
|
|
+ vmx->loaded_vmcs->nmi_known_unmasked = !masked;
|
|
if (masked)
|
|
if (masked)
|
|
vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
|
|
vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
|
|
GUEST_INTR_STATE_NMI);
|
|
GUEST_INTR_STATE_NMI);
|
|
@@ -8736,7 +8740,7 @@ static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
|
|
|
|
|
|
idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
|
|
idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
|
|
|
|
|
|
- if (vmx->nmi_known_unmasked)
|
|
|
|
|
|
+ if (vmx->loaded_vmcs->nmi_known_unmasked)
|
|
return;
|
|
return;
|
|
/*
|
|
/*
|
|
* Can't use vmx->exit_intr_info since we're not sure what
|
|
* Can't use vmx->exit_intr_info since we're not sure what
|
|
@@ -8760,7 +8764,7 @@ static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
|
|
vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
|
|
vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
|
|
GUEST_INTR_STATE_NMI);
|
|
GUEST_INTR_STATE_NMI);
|
|
else
|
|
else
|
|
- vmx->nmi_known_unmasked =
|
|
|
|
|
|
+ vmx->loaded_vmcs->nmi_known_unmasked =
|
|
!(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
|
|
!(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
|
|
& GUEST_INTR_STATE_NMI);
|
|
& GUEST_INTR_STATE_NMI);
|
|
}
|
|
}
|