|
@@ -87,6 +87,7 @@ static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
|
|
|
|
|
|
static void update_cr8_intercept(struct kvm_vcpu *vcpu);
|
|
|
static void process_nmi(struct kvm_vcpu *vcpu);
|
|
|
+static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
|
|
|
|
|
|
struct kvm_x86_ops *kvm_x86_ops;
|
|
|
EXPORT_SYMBOL_GPL(kvm_x86_ops);
|
|
@@ -4868,8 +4869,11 @@ static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
|
|
|
*/
|
|
|
if (int_shadow & mask)
|
|
|
mask = 0;
|
|
|
- if (unlikely(int_shadow || mask))
|
|
|
+ if (unlikely(int_shadow || mask)) {
|
|
|
kvm_x86_ops->set_interrupt_shadow(vcpu, mask);
|
|
|
+ if (!mask)
|
|
|
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
static void inject_emulated_exception(struct kvm_vcpu *vcpu)
|
|
@@ -5095,20 +5099,18 @@ static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
|
|
|
return dr6;
|
|
|
}
|
|
|
|
|
|
-static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, int *r)
|
|
|
+static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflags, int *r)
|
|
|
{
|
|
|
struct kvm_run *kvm_run = vcpu->run;
|
|
|
|
|
|
/*
|
|
|
- * Use the "raw" value to see if TF was passed to the processor.
|
|
|
- * Note that the new value of the flags has not been saved yet.
|
|
|
+ * rflags is the old, "raw" value of the flags. The new value has
|
|
|
+ * not been saved yet.
|
|
|
*
|
|
|
* This is correct even for TF set by the guest, because "the
|
|
|
* processor will not generate this exception after the instruction
|
|
|
* that sets the TF flag".
|
|
|
*/
|
|
|
- unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
|
|
|
-
|
|
|
if (unlikely(rflags & X86_EFLAGS_TF)) {
|
|
|
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
|
|
|
kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1;
|
|
@@ -5275,13 +5277,22 @@ restart:
|
|
|
r = EMULATE_DONE;
|
|
|
|
|
|
if (writeback) {
|
|
|
+ unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
|
|
|
toggle_interruptibility(vcpu, ctxt->interruptibility);
|
|
|
- kvm_make_request(KVM_REQ_EVENT, vcpu);
|
|
|
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
|
|
|
kvm_rip_write(vcpu, ctxt->eip);
|
|
|
if (r == EMULATE_DONE)
|
|
|
- kvm_vcpu_check_singlestep(vcpu, &r);
|
|
|
- kvm_set_rflags(vcpu, ctxt->eflags);
|
|
|
+ kvm_vcpu_check_singlestep(vcpu, rflags, &r);
|
|
|
+ __kvm_set_rflags(vcpu, ctxt->eflags);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * For STI, interrupts are shadowed; so KVM_REQ_EVENT will
|
|
|
+ * do nothing, and it will be requested again as soon as
|
|
|
+ * the shadow expires. But we still need to check here,
|
|
|
+ * because POPF has no interrupt shadow.
|
|
|
+ */
|
|
|
+ if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF))
|
|
|
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
|
|
|
} else
|
|
|
vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
|
|
|
|
|
@@ -7406,12 +7417,17 @@ unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(kvm_get_rflags);
|
|
|
|
|
|
-void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
|
|
|
+static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
|
|
|
{
|
|
|
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
|
|
|
kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
|
|
|
rflags |= X86_EFLAGS_TF;
|
|
|
kvm_x86_ops->set_rflags(vcpu, rflags);
|
|
|
+}
|
|
|
+
|
|
|
+void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
|
|
|
+{
|
|
|
+ __kvm_set_rflags(vcpu, rflags);
|
|
|
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(kvm_set_rflags);
|