|
@@ -3101,6 +3101,11 @@ static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static int kvm_vcpu_ioctl_smi(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
|
|
|
struct kvm_tpr_access_ctl *tac)
|
|
|
{
|
|
@@ -3206,8 +3211,15 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
|
|
|
|
|
|
events->sipi_vector = 0; /* never valid when reporting to user space */
|
|
|
|
|
|
+ events->smi.smm = is_smm(vcpu);
|
|
|
+ events->smi.pending = vcpu->arch.smi_pending;
|
|
|
+ events->smi.smm_inside_nmi =
|
|
|
+ !!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK);
|
|
|
+ events->smi.latched_init = kvm_lapic_latched_init(vcpu);
|
|
|
+
|
|
|
events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
|
|
|
- | KVM_VCPUEVENT_VALID_SHADOW);
|
|
|
+ | KVM_VCPUEVENT_VALID_SHADOW
|
|
|
+ | KVM_VCPUEVENT_VALID_SMM);
|
|
|
memset(&events->reserved, 0, sizeof(events->reserved));
|
|
|
}
|
|
|
|
|
@@ -3216,7 +3228,8 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
|
|
|
{
|
|
|
if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
|
|
|
| KVM_VCPUEVENT_VALID_SIPI_VECTOR
|
|
|
- | KVM_VCPUEVENT_VALID_SHADOW))
|
|
|
+ | KVM_VCPUEVENT_VALID_SHADOW
|
|
|
+ | KVM_VCPUEVENT_VALID_SMM))
|
|
|
return -EINVAL;
|
|
|
|
|
|
process_nmi(vcpu);
|
|
@@ -3241,6 +3254,24 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
|
|
|
kvm_vcpu_has_lapic(vcpu))
|
|
|
vcpu->arch.apic->sipi_vector = events->sipi_vector;
|
|
|
|
|
|
+ if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
|
|
|
+ if (events->smi.smm)
|
|
|
+ vcpu->arch.hflags |= HF_SMM_MASK;
|
|
|
+ else
|
|
|
+ vcpu->arch.hflags &= ~HF_SMM_MASK;
|
|
|
+ vcpu->arch.smi_pending = events->smi.pending;
|
|
|
+ if (events->smi.smm_inside_nmi)
|
|
|
+ vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
|
|
|
+ else
|
|
|
+ vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK;
|
|
|
+ if (kvm_vcpu_has_lapic(vcpu)) {
|
|
|
+ if (events->smi.latched_init)
|
|
|
+ set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
|
|
|
+ else
|
|
|
+ clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
|
|
|
|
|
return 0;
|
|
@@ -3500,6 +3531,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
|
|
|
r = kvm_vcpu_ioctl_nmi(vcpu);
|
|
|
break;
|
|
|
}
|
|
|
+ case KVM_SMI: {
|
|
|
+ r = kvm_vcpu_ioctl_smi(vcpu);
|
|
|
+ break;
|
|
|
+ }
|
|
|
case KVM_SET_CPUID: {
|
|
|
struct kvm_cpuid __user *cpuid_arg = argp;
|
|
|
struct kvm_cpuid cpuid;
|
|
@@ -6182,6 +6217,7 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
|
|
|
struct kvm_run *kvm_run = vcpu->run;
|
|
|
|
|
|
kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
|
|
|
+ kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0;
|
|
|
kvm_run->cr8 = kvm_get_cr8(vcpu);
|
|
|
kvm_run->apic_base = kvm_get_apic_base(vcpu);
|
|
|
if (irqchip_in_kernel(vcpu->kvm))
|