|
@@ -389,15 +389,28 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
|
|
|
|
|
|
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
|
|
|
|
|
- if (!vcpu->arch.exception.pending) {
|
|
|
+ if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) {
|
|
|
queue:
|
|
|
if (has_error && !is_protmode(vcpu))
|
|
|
has_error = false;
|
|
|
- vcpu->arch.exception.pending = true;
|
|
|
+ if (reinject) {
|
|
|
+ /*
|
|
|
+ * On vmentry, vcpu->arch.exception.pending is only
|
|
|
+ * true if an event injection was blocked by
|
|
|
+ * nested_run_pending. In that case, however,
|
|
|
+ * vcpu_enter_guest requests an immediate exit,
|
|
|
+ * and the guest shouldn't proceed far enough to
|
|
|
+ * need reinjection.
|
|
|
+ */
|
|
|
+ WARN_ON_ONCE(vcpu->arch.exception.pending);
|
|
|
+ vcpu->arch.exception.injected = true;
|
|
|
+ } else {
|
|
|
+ vcpu->arch.exception.pending = true;
|
|
|
+ vcpu->arch.exception.injected = false;
|
|
|
+ }
|
|
|
vcpu->arch.exception.has_error_code = has_error;
|
|
|
vcpu->arch.exception.nr = nr;
|
|
|
vcpu->arch.exception.error_code = error_code;
|
|
|
- vcpu->arch.exception.reinject = reinject;
|
|
|
return;
|
|
|
}
|
|
|
|
|
@@ -412,8 +425,13 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
|
|
|
class2 = exception_class(nr);
|
|
|
if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY)
|
|
|
|| (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
|
|
|
- /* generate double fault per SDM Table 5-5 */
|
|
|
+ /*
|
|
|
+ * Generate double fault per SDM Table 5-5. Set
|
|
|
+ * exception.pending = true so that the double fault
|
|
|
+ * can trigger a nested vmexit.
|
|
|
+ */
|
|
|
vcpu->arch.exception.pending = true;
|
|
|
+ vcpu->arch.exception.injected = false;
|
|
|
vcpu->arch.exception.has_error_code = true;
|
|
|
vcpu->arch.exception.nr = DF_VECTOR;
|
|
|
vcpu->arch.exception.error_code = 0;
|
|
@@ -3072,8 +3090,14 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
|
|
|
struct kvm_vcpu_events *events)
|
|
|
{
|
|
|
process_nmi(vcpu);
|
|
|
+ /*
|
|
|
+ * FIXME: pass injected and pending separately. This is only
|
|
|
+ * needed for nested virtualization, whose state cannot be
|
|
|
+ * migrated yet. For now we can combine them.
|
|
|
+ */
|
|
|
events->exception.injected =
|
|
|
- vcpu->arch.exception.pending &&
|
|
|
+ (vcpu->arch.exception.pending ||
|
|
|
+ vcpu->arch.exception.injected) &&
|
|
|
!kvm_exception_is_soft(vcpu->arch.exception.nr);
|
|
|
events->exception.nr = vcpu->arch.exception.nr;
|
|
|
events->exception.has_error_code = vcpu->arch.exception.has_error_code;
|
|
@@ -3128,6 +3152,7 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
|
|
|
return -EINVAL;
|
|
|
|
|
|
process_nmi(vcpu);
|
|
|
+ vcpu->arch.exception.injected = false;
|
|
|
vcpu->arch.exception.pending = events->exception.injected;
|
|
|
vcpu->arch.exception.nr = events->exception.nr;
|
|
|
vcpu->arch.exception.has_error_code = events->exception.has_error_code;
|
|
@@ -6344,11 +6369,42 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
|
|
|
int r;
|
|
|
|
|
|
/* try to reinject previous events if any */
|
|
|
+ if (vcpu->arch.exception.injected) {
|
|
|
+ kvm_x86_ops->queue_exception(vcpu);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Exceptions must be injected immediately, or the exception
|
|
|
+ * frame will have the address of the NMI or interrupt handler.
|
|
|
+ */
|
|
|
+ if (!vcpu->arch.exception.pending) {
|
|
|
+ if (vcpu->arch.nmi_injected) {
|
|
|
+ kvm_x86_ops->set_nmi(vcpu);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (vcpu->arch.interrupt.pending) {
|
|
|
+ kvm_x86_ops->set_irq(vcpu);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
|
|
|
+ r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
|
|
|
+ if (r != 0)
|
|
|
+ return r;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* try to inject new event if pending */
|
|
|
if (vcpu->arch.exception.pending) {
|
|
|
trace_kvm_inj_exception(vcpu->arch.exception.nr,
|
|
|
vcpu->arch.exception.has_error_code,
|
|
|
vcpu->arch.exception.error_code);
|
|
|
|
|
|
+ vcpu->arch.exception.pending = false;
|
|
|
+ vcpu->arch.exception.injected = true;
|
|
|
+
|
|
|
if (exception_type(vcpu->arch.exception.nr) == EXCPT_FAULT)
|
|
|
__kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) |
|
|
|
X86_EFLAGS_RF);
|
|
@@ -6360,27 +6416,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
|
|
|
}
|
|
|
|
|
|
kvm_x86_ops->queue_exception(vcpu);
|
|
|
- return 0;
|
|
|
- }
|
|
|
-
|
|
|
- if (vcpu->arch.nmi_injected) {
|
|
|
- kvm_x86_ops->set_nmi(vcpu);
|
|
|
- return 0;
|
|
|
- }
|
|
|
-
|
|
|
- if (vcpu->arch.interrupt.pending) {
|
|
|
- kvm_x86_ops->set_irq(vcpu);
|
|
|
- return 0;
|
|
|
- }
|
|
|
-
|
|
|
- if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
|
|
|
- r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
|
|
|
- if (r != 0)
|
|
|
- return r;
|
|
|
- }
|
|
|
-
|
|
|
- /* try to inject new event if pending */
|
|
|
- if (vcpu->arch.smi_pending && !is_smm(vcpu)) {
|
|
|
+ } else if (vcpu->arch.smi_pending && !is_smm(vcpu)) {
|
|
|
vcpu->arch.smi_pending = false;
|
|
|
enter_smm(vcpu);
|
|
|
} else if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {
|
|
@@ -6856,6 +6892,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|
|
kvm_x86_ops->enable_nmi_window(vcpu);
|
|
|
if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
|
|
|
kvm_x86_ops->enable_irq_window(vcpu);
|
|
|
+ WARN_ON(vcpu->arch.exception.pending);
|
|
|
}
|
|
|
|
|
|
if (kvm_lapic_enabled(vcpu)) {
|
|
@@ -7730,6 +7767,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
|
|
|
vcpu->arch.nmi_injected = false;
|
|
|
kvm_clear_interrupt_queue(vcpu);
|
|
|
kvm_clear_exception_queue(vcpu);
|
|
|
+ vcpu->arch.exception.pending = false;
|
|
|
|
|
|
memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
|
|
|
kvm_update_dr0123(vcpu);
|