|
@@ -3273,7 +3273,7 @@ static bool is_shadow_zero_bits_set(struct kvm_mmu *mmu, u64 spte, int level)
|
|
|
return __is_rsvd_bits_set(&mmu->shadow_zero_check, spte, level);
|
|
|
}
|
|
|
|
|
|
-static bool quickly_check_mmio_pf(struct kvm_vcpu *vcpu, u64 addr, bool direct)
|
|
|
+static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
|
|
|
{
|
|
|
if (direct)
|
|
|
return vcpu_match_mmio_gpa(vcpu, addr);
|
|
@@ -3332,7 +3332,7 @@ int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
|
|
|
u64 spte;
|
|
|
bool reserved;
|
|
|
|
|
|
- if (quickly_check_mmio_pf(vcpu, addr, direct))
|
|
|
+ if (mmio_info_in_cache(vcpu, addr, direct))
|
|
|
return RET_MMIO_PF_EMULATE;
|
|
|
|
|
|
reserved = walk_shadow_page_get_mmio_spte(vcpu, addr, &spte);
|
|
@@ -4354,19 +4354,12 @@ static void make_mmu_pages_available(struct kvm_vcpu *vcpu)
|
|
|
kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
|
|
|
}
|
|
|
|
|
|
-static bool is_mmio_page_fault(struct kvm_vcpu *vcpu, gva_t addr)
|
|
|
-{
|
|
|
- if (vcpu->arch.mmu.direct_map || mmu_is_nested(vcpu))
|
|
|
- return vcpu_match_mmio_gpa(vcpu, addr);
|
|
|
-
|
|
|
- return vcpu_match_mmio_gva(vcpu, addr);
|
|
|
-}
|
|
|
-
|
|
|
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
|
|
|
void *insn, int insn_len)
|
|
|
{
|
|
|
int r, emulation_type = EMULTYPE_RETRY;
|
|
|
enum emulation_result er;
|
|
|
+ bool direct = vcpu->arch.mmu.direct_map || mmu_is_nested(vcpu);
|
|
|
|
|
|
r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false);
|
|
|
if (r < 0)
|
|
@@ -4377,7 +4370,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- if (is_mmio_page_fault(vcpu, cr2))
|
|
|
+ if (mmio_info_in_cache(vcpu, cr2, direct))
|
|
|
emulation_type = 0;
|
|
|
|
|
|
er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len);
|