|
@@ -5281,6 +5281,11 @@ static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_fla
|
|
kvm_set_hflags(emul_to_vcpu(ctxt), emul_flags);
|
|
kvm_set_hflags(emul_to_vcpu(ctxt), emul_flags);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt, u64 smbase)
|
|
|
|
+{
|
|
|
|
+ return kvm_x86_ops->pre_leave_smm(emul_to_vcpu(ctxt), smbase);
|
|
|
|
+}
|
|
|
|
+
|
|
static const struct x86_emulate_ops emulate_ops = {
|
|
static const struct x86_emulate_ops emulate_ops = {
|
|
.read_gpr = emulator_read_gpr,
|
|
.read_gpr = emulator_read_gpr,
|
|
.write_gpr = emulator_write_gpr,
|
|
.write_gpr = emulator_write_gpr,
|
|
@@ -5322,6 +5327,7 @@ static const struct x86_emulate_ops emulate_ops = {
|
|
.set_nmi_mask = emulator_set_nmi_mask,
|
|
.set_nmi_mask = emulator_set_nmi_mask,
|
|
.get_hflags = emulator_get_hflags,
|
|
.get_hflags = emulator_get_hflags,
|
|
.set_hflags = emulator_set_hflags,
|
|
.set_hflags = emulator_set_hflags,
|
|
|
|
+ .pre_leave_smm = emulator_pre_leave_smm,
|
|
};
|
|
};
|
|
|
|
|
|
static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
|
|
static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
|
|
@@ -6647,13 +6653,20 @@ static void enter_smm(struct kvm_vcpu *vcpu)
|
|
u32 cr0;
|
|
u32 cr0;
|
|
|
|
|
|
trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
|
|
trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
|
|
- vcpu->arch.hflags |= HF_SMM_MASK;
|
|
|
|
memset(buf, 0, 512);
|
|
memset(buf, 0, 512);
|
|
if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
|
|
if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
|
|
enter_smm_save_state_64(vcpu, buf);
|
|
enter_smm_save_state_64(vcpu, buf);
|
|
else
|
|
else
|
|
enter_smm_save_state_32(vcpu, buf);
|
|
enter_smm_save_state_32(vcpu, buf);
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Give pre_enter_smm() a chance to make ISA-specific changes to the
|
|
|
|
+ * vCPU state (e.g. leave guest mode) after we've saved the state into
|
|
|
|
+ * the SMM state-save area.
|
|
|
|
+ */
|
|
|
|
+ kvm_x86_ops->pre_enter_smm(vcpu, buf);
|
|
|
|
+
|
|
|
|
+ vcpu->arch.hflags |= HF_SMM_MASK;
|
|
kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf));
|
|
kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf));
|
|
|
|
|
|
if (kvm_x86_ops->get_nmi_mask(vcpu))
|
|
if (kvm_x86_ops->get_nmi_mask(vcpu))
|