|
@@ -178,6 +178,12 @@ static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store)
|
|
}
|
|
}
|
|
} else if (KVM_GUEST_KERNEL_MODE(vcpu)
|
|
} else if (KVM_GUEST_KERNEL_MODE(vcpu)
|
|
&& (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
|
|
&& (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
|
|
|
|
+ /* A code fetch fault doesn't count as an MMIO */
|
|
|
|
+ if (!store && kvm_is_ifetch_fault(&vcpu->arch)) {
|
|
|
|
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
|
|
|
+ return RESUME_HOST;
|
|
|
|
+ }
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* With EVA we may get a TLB exception instead of an address
|
|
* With EVA we may get a TLB exception instead of an address
|
|
* error when the guest performs MMIO to KSeg1 addresses.
|
|
* error when the guest performs MMIO to KSeg1 addresses.
|
|
@@ -255,6 +261,12 @@ static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
|
|
int ret = RESUME_GUEST;
|
|
int ret = RESUME_GUEST;
|
|
|
|
|
|
if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
|
|
if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
|
|
|
|
+ /* A code fetch fault doesn't count as an MMIO */
|
|
|
|
+ if (kvm_is_ifetch_fault(&vcpu->arch)) {
|
|
|
|
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
|
|
|
+ return RESUME_HOST;
|
|
|
|
+ }
|
|
|
|
+
|
|
kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr);
|
|
kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr);
|
|
er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
|
|
er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
|
|
if (er == EMULATE_FAIL) {
|
|
if (er == EMULATE_FAIL) {
|