|
|
@@ -5887,6 +5887,18 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
|
|
|
kvm_x86_ops->set_nmi(vcpu);
|
|
|
}
|
|
|
} else if (kvm_cpu_has_injectable_intr(vcpu)) {
|
|
|
+ /*
|
|
|
+ * Because interrupts can be injected asynchronously, we are
|
|
|
+ * calling check_nested_events again here to avoid a race condition.
|
|
|
+ * See https://lkml.org/lkml/2014/7/2/60 for discussion about this
|
|
|
+ * proposal and current concerns. Perhaps we should be setting
|
|
|
+ * KVM_REQ_EVENT only on certain events and not unconditionally?
|
|
|
+ */
|
|
|
+ if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
|
|
|
+ r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
|
|
|
+ if (r != 0)
|
|
|
+ return r;
|
|
|
+ }
|
|
|
if (kvm_x86_ops->interrupt_allowed(vcpu)) {
|
|
|
kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
|
|
|
false);
|