|
|
@@ -91,6 +91,7 @@ static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
|
|
|
|
|
|
static void update_cr8_intercept(struct kvm_vcpu *vcpu);
|
|
|
static void process_nmi(struct kvm_vcpu *vcpu);
|
|
|
+static void process_smi(struct kvm_vcpu *vcpu);
|
|
|
static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
|
|
|
|
|
|
struct kvm_x86_ops *kvm_x86_ops __read_mostly;
|
|
|
@@ -5302,13 +5303,8 @@ static void kvm_smm_changed(struct kvm_vcpu *vcpu)
|
|
|
/* This is a good place to trace that we are exiting SMM. */
|
|
|
trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, false);
|
|
|
|
|
|
- if (unlikely(vcpu->arch.smi_pending)) {
|
|
|
- kvm_make_request(KVM_REQ_SMI, vcpu);
|
|
|
- vcpu->arch.smi_pending = 0;
|
|
|
- } else {
|
|
|
- /* Process a latched INIT, if any. */
|
|
|
- kvm_make_request(KVM_REQ_EVENT, vcpu);
|
|
|
- }
|
|
|
+ /* Process a latched INIT or SMI, if any. */
|
|
|
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
|
|
|
}
|
|
|
|
|
|
kvm_mmu_reset_context(vcpu);
|
|
|
@@ -6108,7 +6104,10 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
|
|
|
}
|
|
|
|
|
|
/* try to inject new event if pending */
|
|
|
- if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {
|
|
|
+ if (vcpu->arch.smi_pending && !is_smm(vcpu)) {
|
|
|
+ vcpu->arch.smi_pending = false;
|
|
|
+ process_smi(vcpu);
|
|
|
+ } else if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {
|
|
|
--vcpu->arch.nmi_pending;
|
|
|
vcpu->arch.nmi_injected = true;
|
|
|
kvm_x86_ops->set_nmi(vcpu);
|
|
|
@@ -6318,11 +6317,6 @@ static void process_smi(struct kvm_vcpu *vcpu)
|
|
|
char buf[512];
|
|
|
u32 cr0;
|
|
|
|
|
|
- if (is_smm(vcpu)) {
|
|
|
- vcpu->arch.smi_pending = true;
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
|
|
|
vcpu->arch.hflags |= HF_SMM_MASK;
|
|
|
memset(buf, 0, 512);
|
|
|
@@ -6385,6 +6379,12 @@ static void process_smi(struct kvm_vcpu *vcpu)
|
|
|
kvm_mmu_reset_context(vcpu);
|
|
|
}
|
|
|
|
|
|
+static void process_smi_request(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ vcpu->arch.smi_pending = true;
|
|
|
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
|
|
|
+}
|
|
|
+
|
|
|
void kvm_make_scan_ioapic_request(struct kvm *kvm)
|
|
|
{
|
|
|
kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC);
|
|
|
@@ -6506,7 +6506,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|
|
if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
|
|
|
record_steal_time(vcpu);
|
|
|
if (kvm_check_request(KVM_REQ_SMI, vcpu))
|
|
|
- process_smi(vcpu);
|
|
|
+ process_smi_request(vcpu);
|
|
|
if (kvm_check_request(KVM_REQ_NMI, vcpu))
|
|
|
process_nmi(vcpu);
|
|
|
if (kvm_check_request(KVM_REQ_PMU, vcpu))
|
|
|
@@ -6579,8 +6579,18 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
if (inject_pending_event(vcpu, req_int_win) != 0)
|
|
|
req_immediate_exit = true;
|
|
|
- /* enable NMI/IRQ window open exits if needed */
|
|
|
else {
|
|
|
+ /* Enable NMI/IRQ window open exits if needed.
|
|
|
+ *
|
|
|
+ * SMIs have two cases: 1) they can be nested, and
|
|
|
+ * then there is nothing to do here because RSM will
|
|
|
+ * cause a vmexit anyway; 2) or the SMI can be pending
|
|
|
+ * because inject_pending_event has completed the
|
|
|
+ * injection of an IRQ or NMI from the previous vmexit,
|
|
|
+ * and then we request an immediate exit to inject the SMI.
|
|
|
+ */
|
|
|
+ if (vcpu->arch.smi_pending && !is_smm(vcpu))
|
|
|
+ req_immediate_exit = true;
|
|
|
if (vcpu->arch.nmi_pending)
|
|
|
kvm_x86_ops->enable_nmi_window(vcpu);
|
|
|
if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
|
|
|
@@ -6631,8 +6641,10 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
kvm_load_guest_xcr0(vcpu);
|
|
|
|
|
|
- if (req_immediate_exit)
|
|
|
+ if (req_immediate_exit) {
|
|
|
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
|
|
|
smp_send_reschedule(vcpu->cpu);
|
|
|
+ }
|
|
|
|
|
|
trace_kvm_entry(vcpu->vcpu_id);
|
|
|
wait_lapic_expire(vcpu);
|
|
|
@@ -7433,6 +7445,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
|
|
|
{
|
|
|
vcpu->arch.hflags = 0;
|
|
|
|
|
|
+ vcpu->arch.smi_pending = 0;
|
|
|
atomic_set(&vcpu->arch.nmi_queued, 0);
|
|
|
vcpu->arch.nmi_pending = 0;
|
|
|
vcpu->arch.nmi_injected = false;
|