|
@@ -5479,6 +5479,9 @@ static int complete_emulated_pio(struct kvm_vcpu *vcpu);
|
|
static void kvm_smm_changed(struct kvm_vcpu *vcpu)
|
|
static void kvm_smm_changed(struct kvm_vcpu *vcpu)
|
|
{
|
|
{
|
|
if (!(vcpu->arch.hflags & HF_SMM_MASK)) {
|
|
if (!(vcpu->arch.hflags & HF_SMM_MASK)) {
|
|
|
|
+ /* This is a good place to trace that we are exiting SMM. */
|
|
|
|
+ trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, false);
|
|
|
|
+
|
|
if (unlikely(vcpu->arch.smi_pending)) {
|
|
if (unlikely(vcpu->arch.smi_pending)) {
|
|
kvm_make_request(KVM_REQ_SMI, vcpu);
|
|
kvm_make_request(KVM_REQ_SMI, vcpu);
|
|
vcpu->arch.smi_pending = 0;
|
|
vcpu->arch.smi_pending = 0;
|
|
@@ -6390,14 +6393,231 @@ static void process_nmi(struct kvm_vcpu *vcpu)
|
|
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
|
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+#define put_smstate(type, buf, offset, val) \
|
|
|
|
+ *(type *)((buf) + (offset) - 0x7e00) = val
|
|
|
|
+
|
|
|
|
+static u32 process_smi_get_segment_flags(struct kvm_segment *seg)
|
|
|
|
+{
|
|
|
|
+ u32 flags = 0;
|
|
|
|
+ flags |= seg->g << 23;
|
|
|
|
+ flags |= seg->db << 22;
|
|
|
|
+ flags |= seg->l << 21;
|
|
|
|
+ flags |= seg->avl << 20;
|
|
|
|
+ flags |= seg->present << 15;
|
|
|
|
+ flags |= seg->dpl << 13;
|
|
|
|
+ flags |= seg->s << 12;
|
|
|
|
+ flags |= seg->type << 8;
|
|
|
|
+ return flags;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void process_smi_save_seg_32(struct kvm_vcpu *vcpu, char *buf, int n)
|
|
|
|
+{
|
|
|
|
+ struct kvm_segment seg;
|
|
|
|
+ int offset;
|
|
|
|
+
|
|
|
|
+ kvm_get_segment(vcpu, &seg, n);
|
|
|
|
+ put_smstate(u32, buf, 0x7fa8 + n * 4, seg.selector);
|
|
|
|
+
|
|
|
|
+ if (n < 3)
|
|
|
|
+ offset = 0x7f84 + n * 12;
|
|
|
|
+ else
|
|
|
|
+ offset = 0x7f2c + (n - 3) * 12;
|
|
|
|
+
|
|
|
|
+ put_smstate(u32, buf, offset + 8, seg.base);
|
|
|
|
+ put_smstate(u32, buf, offset + 4, seg.limit);
|
|
|
|
+ put_smstate(u32, buf, offset, process_smi_get_segment_flags(&seg));
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void process_smi_save_seg_64(struct kvm_vcpu *vcpu, char *buf, int n)
|
|
|
|
+{
|
|
|
|
+ struct kvm_segment seg;
|
|
|
|
+ int offset;
|
|
|
|
+ u16 flags;
|
|
|
|
+
|
|
|
|
+ kvm_get_segment(vcpu, &seg, n);
|
|
|
|
+ offset = 0x7e00 + n * 16;
|
|
|
|
+
|
|
|
|
+ flags = process_smi_get_segment_flags(&seg) >> 8;
|
|
|
|
+ put_smstate(u16, buf, offset, seg.selector);
|
|
|
|
+ put_smstate(u16, buf, offset + 2, flags);
|
|
|
|
+ put_smstate(u32, buf, offset + 4, seg.limit);
|
|
|
|
+ put_smstate(u64, buf, offset + 8, seg.base);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void process_smi_save_state_32(struct kvm_vcpu *vcpu, char *buf)
|
|
|
|
+{
|
|
|
|
+ struct desc_ptr dt;
|
|
|
|
+ struct kvm_segment seg;
|
|
|
|
+ unsigned long val;
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ put_smstate(u32, buf, 0x7ffc, kvm_read_cr0(vcpu));
|
|
|
|
+ put_smstate(u32, buf, 0x7ff8, kvm_read_cr3(vcpu));
|
|
|
|
+ put_smstate(u32, buf, 0x7ff4, kvm_get_rflags(vcpu));
|
|
|
|
+ put_smstate(u32, buf, 0x7ff0, kvm_rip_read(vcpu));
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < 8; i++)
|
|
|
|
+ put_smstate(u32, buf, 0x7fd0 + i * 4, kvm_register_read(vcpu, i));
|
|
|
|
+
|
|
|
|
+ kvm_get_dr(vcpu, 6, &val);
|
|
|
|
+ put_smstate(u32, buf, 0x7fcc, (u32)val);
|
|
|
|
+ kvm_get_dr(vcpu, 7, &val);
|
|
|
|
+ put_smstate(u32, buf, 0x7fc8, (u32)val);
|
|
|
|
+
|
|
|
|
+ kvm_get_segment(vcpu, &seg, VCPU_SREG_TR);
|
|
|
|
+ put_smstate(u32, buf, 0x7fc4, seg.selector);
|
|
|
|
+ put_smstate(u32, buf, 0x7f64, seg.base);
|
|
|
|
+ put_smstate(u32, buf, 0x7f60, seg.limit);
|
|
|
|
+ put_smstate(u32, buf, 0x7f5c, process_smi_get_segment_flags(&seg));
|
|
|
|
+
|
|
|
|
+ kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR);
|
|
|
|
+ put_smstate(u32, buf, 0x7fc0, seg.selector);
|
|
|
|
+ put_smstate(u32, buf, 0x7f80, seg.base);
|
|
|
|
+ put_smstate(u32, buf, 0x7f7c, seg.limit);
|
|
|
|
+ put_smstate(u32, buf, 0x7f78, process_smi_get_segment_flags(&seg));
|
|
|
|
+
|
|
|
|
+ kvm_x86_ops->get_gdt(vcpu, &dt);
|
|
|
|
+ put_smstate(u32, buf, 0x7f74, dt.address);
|
|
|
|
+ put_smstate(u32, buf, 0x7f70, dt.size);
|
|
|
|
+
|
|
|
|
+ kvm_x86_ops->get_idt(vcpu, &dt);
|
|
|
|
+ put_smstate(u32, buf, 0x7f58, dt.address);
|
|
|
|
+ put_smstate(u32, buf, 0x7f54, dt.size);
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < 6; i++)
|
|
|
|
+ process_smi_save_seg_32(vcpu, buf, i);
|
|
|
|
+
|
|
|
|
+ put_smstate(u32, buf, 0x7f14, kvm_read_cr4(vcpu));
|
|
|
|
+
|
|
|
|
+ /* revision id */
|
|
|
|
+ put_smstate(u32, buf, 0x7efc, 0x00020000);
|
|
|
|
+ put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void process_smi_save_state_64(struct kvm_vcpu *vcpu, char *buf)
|
|
|
|
+{
|
|
|
|
+#ifdef CONFIG_X86_64
|
|
|
|
+ struct desc_ptr dt;
|
|
|
|
+ struct kvm_segment seg;
|
|
|
|
+ unsigned long val;
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < 16; i++)
|
|
|
|
+ put_smstate(u64, buf, 0x7ff8 - i * 8, kvm_register_read(vcpu, i));
|
|
|
|
+
|
|
|
|
+ put_smstate(u64, buf, 0x7f78, kvm_rip_read(vcpu));
|
|
|
|
+ put_smstate(u32, buf, 0x7f70, kvm_get_rflags(vcpu));
|
|
|
|
+
|
|
|
|
+ kvm_get_dr(vcpu, 6, &val);
|
|
|
|
+ put_smstate(u64, buf, 0x7f68, val);
|
|
|
|
+ kvm_get_dr(vcpu, 7, &val);
|
|
|
|
+ put_smstate(u64, buf, 0x7f60, val);
|
|
|
|
+
|
|
|
|
+ put_smstate(u64, buf, 0x7f58, kvm_read_cr0(vcpu));
|
|
|
|
+ put_smstate(u64, buf, 0x7f50, kvm_read_cr3(vcpu));
|
|
|
|
+ put_smstate(u64, buf, 0x7f48, kvm_read_cr4(vcpu));
|
|
|
|
+
|
|
|
|
+ put_smstate(u32, buf, 0x7f00, vcpu->arch.smbase);
|
|
|
|
+
|
|
|
|
+ /* revision id */
|
|
|
|
+ put_smstate(u32, buf, 0x7efc, 0x00020064);
|
|
|
|
+
|
|
|
|
+ put_smstate(u64, buf, 0x7ed0, vcpu->arch.efer);
|
|
|
|
+
|
|
|
|
+ kvm_get_segment(vcpu, &seg, VCPU_SREG_TR);
|
|
|
|
+ put_smstate(u16, buf, 0x7e90, seg.selector);
|
|
|
|
+ put_smstate(u16, buf, 0x7e92, process_smi_get_segment_flags(&seg) >> 8);
|
|
|
|
+ put_smstate(u32, buf, 0x7e94, seg.limit);
|
|
|
|
+ put_smstate(u64, buf, 0x7e98, seg.base);
|
|
|
|
+
|
|
|
|
+ kvm_x86_ops->get_idt(vcpu, &dt);
|
|
|
|
+ put_smstate(u32, buf, 0x7e84, dt.size);
|
|
|
|
+ put_smstate(u64, buf, 0x7e88, dt.address);
|
|
|
|
+
|
|
|
|
+ kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR);
|
|
|
|
+ put_smstate(u16, buf, 0x7e70, seg.selector);
|
|
|
|
+ put_smstate(u16, buf, 0x7e72, process_smi_get_segment_flags(&seg) >> 8);
|
|
|
|
+ put_smstate(u32, buf, 0x7e74, seg.limit);
|
|
|
|
+ put_smstate(u64, buf, 0x7e78, seg.base);
|
|
|
|
+
|
|
|
|
+ kvm_x86_ops->get_gdt(vcpu, &dt);
|
|
|
|
+ put_smstate(u32, buf, 0x7e64, dt.size);
|
|
|
|
+ put_smstate(u64, buf, 0x7e68, dt.address);
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < 6; i++)
|
|
|
|
+ process_smi_save_seg_64(vcpu, buf, i);
|
|
|
|
+#else
|
|
|
|
+ WARN_ON_ONCE(1);
|
|
|
|
+#endif
|
|
|
|
+}
|
|
|
|
+
|
|
static void process_smi(struct kvm_vcpu *vcpu)
|
|
static void process_smi(struct kvm_vcpu *vcpu)
|
|
{
|
|
{
|
|
|
|
+ struct kvm_segment cs, ds;
|
|
|
|
+ char buf[512];
|
|
|
|
+ u32 cr0;
|
|
|
|
+
|
|
if (is_smm(vcpu)) {
|
|
if (is_smm(vcpu)) {
|
|
vcpu->arch.smi_pending = true;
|
|
vcpu->arch.smi_pending = true;
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
- printk_once(KERN_DEBUG "Ignoring guest SMI\n");
|
|
|
|
|
|
+ trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
|
|
|
|
+ vcpu->arch.hflags |= HF_SMM_MASK;
|
|
|
|
+ memset(buf, 0, 512);
|
|
|
|
+ if (guest_cpuid_has_longmode(vcpu))
|
|
|
|
+ process_smi_save_state_64(vcpu, buf);
|
|
|
|
+ else
|
|
|
|
+ process_smi_save_state_32(vcpu, buf);
|
|
|
|
+
|
|
|
|
+ kvm_write_guest(vcpu->kvm, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf));
|
|
|
|
+
|
|
|
|
+ if (kvm_x86_ops->get_nmi_mask(vcpu))
|
|
|
|
+ vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
|
|
|
|
+ else
|
|
|
|
+ kvm_x86_ops->set_nmi_mask(vcpu, true);
|
|
|
|
+
|
|
|
|
+ kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
|
|
|
|
+ kvm_rip_write(vcpu, 0x8000);
|
|
|
|
+
|
|
|
|
+ cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG);
|
|
|
|
+ kvm_x86_ops->set_cr0(vcpu, cr0);
|
|
|
|
+ vcpu->arch.cr0 = cr0;
|
|
|
|
+
|
|
|
|
+ kvm_x86_ops->set_cr4(vcpu, 0);
|
|
|
|
+
|
|
|
|
+ __kvm_set_dr(vcpu, 7, DR7_FIXED_1);
|
|
|
|
+
|
|
|
|
+ cs.selector = (vcpu->arch.smbase >> 4) & 0xffff;
|
|
|
|
+ cs.base = vcpu->arch.smbase;
|
|
|
|
+
|
|
|
|
+ ds.selector = 0;
|
|
|
|
+ ds.base = 0;
|
|
|
|
+
|
|
|
|
+ cs.limit = ds.limit = 0xffffffff;
|
|
|
|
+ cs.type = ds.type = 0x3;
|
|
|
|
+ cs.dpl = ds.dpl = 0;
|
|
|
|
+ cs.db = ds.db = 0;
|
|
|
|
+ cs.s = ds.s = 1;
|
|
|
|
+ cs.l = ds.l = 0;
|
|
|
|
+ cs.g = ds.g = 1;
|
|
|
|
+ cs.avl = ds.avl = 0;
|
|
|
|
+ cs.present = ds.present = 1;
|
|
|
|
+ cs.unusable = ds.unusable = 0;
|
|
|
|
+ cs.padding = ds.padding = 0;
|
|
|
|
+
|
|
|
|
+ kvm_set_segment(vcpu, &cs, VCPU_SREG_CS);
|
|
|
|
+ kvm_set_segment(vcpu, &ds, VCPU_SREG_DS);
|
|
|
|
+ kvm_set_segment(vcpu, &ds, VCPU_SREG_ES);
|
|
|
|
+ kvm_set_segment(vcpu, &ds, VCPU_SREG_FS);
|
|
|
|
+ kvm_set_segment(vcpu, &ds, VCPU_SREG_GS);
|
|
|
|
+ kvm_set_segment(vcpu, &ds, VCPU_SREG_SS);
|
|
|
|
+
|
|
|
|
+ if (guest_cpuid_has_longmode(vcpu))
|
|
|
|
+ kvm_x86_ops->set_efer(vcpu, 0);
|
|
|
|
+
|
|
|
|
+ kvm_update_cpuid(vcpu);
|
|
|
|
+ kvm_mmu_reset_context(vcpu);
|
|
}
|
|
}
|
|
|
|
|
|
static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
|
|
static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
|