|
@@ -3114,6 +3114,12 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
|
|
|
(events->exception.nr > 31 || events->exception.nr == NMI_VECTOR))
|
|
(events->exception.nr > 31 || events->exception.nr == NMI_VECTOR))
|
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
+ /* INITs are latched while in SMM */
|
|
|
|
|
+ if (events->flags & KVM_VCPUEVENT_VALID_SMM &&
|
|
|
|
|
+ (events->smi.smm || events->smi.pending) &&
|
|
|
|
|
+ vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED)
|
|
|
|
|
+ return -EINVAL;
|
|
|
|
|
+
|
|
|
process_nmi(vcpu);
|
|
process_nmi(vcpu);
|
|
|
vcpu->arch.exception.pending = events->exception.injected;
|
|
vcpu->arch.exception.pending = events->exception.injected;
|
|
|
vcpu->arch.exception.nr = events->exception.nr;
|
|
vcpu->arch.exception.nr = events->exception.nr;
|
|
@@ -7342,6 +7348,12 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
|
|
|
mp_state->mp_state != KVM_MP_STATE_RUNNABLE)
|
|
mp_state->mp_state != KVM_MP_STATE_RUNNABLE)
|
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
+ /* INITs are latched while in SMM */
|
|
|
|
|
+ if ((is_smm(vcpu) || vcpu->arch.smi_pending) &&
|
|
|
|
|
+ (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED ||
|
|
|
|
|
+ mp_state->mp_state == KVM_MP_STATE_INIT_RECEIVED))
|
|
|
|
|
+ return -EINVAL;
|
|
|
|
|
+
|
|
|
if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
|
|
if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
|
|
|
vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
|
|
vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
|
|
|
set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events);
|
|
set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events);
|