|
@@ -408,12 +408,14 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(kvm_inject_page_fault);
|
|
|
|
|
|
-void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
|
|
|
+static bool kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
|
|
|
{
|
|
|
if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
|
|
|
vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
|
|
|
else
|
|
|
vcpu->arch.mmu.inject_page_fault(vcpu, fault);
|
|
|
+
|
|
|
+ return fault->nested_page_fault;
|
|
|
}
|
|
|
|
|
|
void kvm_inject_nmi(struct kvm_vcpu *vcpu)
|
|
@@ -4929,16 +4931,18 @@ static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static void inject_emulated_exception(struct kvm_vcpu *vcpu)
|
|
|
+static bool inject_emulated_exception(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
|
|
|
if (ctxt->exception.vector == PF_VECTOR)
|
|
|
- kvm_propagate_fault(vcpu, &ctxt->exception);
|
|
|
- else if (ctxt->exception.error_code_valid)
|
|
|
+ return kvm_propagate_fault(vcpu, &ctxt->exception);
|
|
|
+
|
|
|
+ if (ctxt->exception.error_code_valid)
|
|
|
kvm_queue_exception_e(vcpu, ctxt->exception.vector,
|
|
|
ctxt->exception.error_code);
|
|
|
else
|
|
|
kvm_queue_exception(vcpu, ctxt->exception.vector);
|
|
|
+ return false;
|
|
|
}
|
|
|
|
|
|
static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
|
|
@@ -5300,8 +5304,9 @@ restart:
|
|
|
}
|
|
|
|
|
|
if (ctxt->have_exception) {
|
|
|
- inject_emulated_exception(vcpu);
|
|
|
r = EMULATE_DONE;
|
|
|
+ if (inject_emulated_exception(vcpu))
|
|
|
+ return r;
|
|
|
} else if (vcpu->arch.pio.count) {
|
|
|
if (!vcpu->arch.pio.in) {
|
|
|
/* FIXME: return into emulator if single-stepping. */
|