|
@@ -2479,6 +2479,14 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static bool kvm_is_mmio_pfn(pfn_t pfn)
|
|
|
|
+{
|
|
|
|
+ if (pfn_valid(pfn))
|
|
|
|
+ return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn));
|
|
|
|
+
|
|
|
|
+ return true;
|
|
|
|
+}
|
|
|
|
+
|
|
static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
|
|
static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
|
|
unsigned pte_access, int level,
|
|
unsigned pte_access, int level,
|
|
gfn_t gfn, pfn_t pfn, bool speculative,
|
|
gfn_t gfn, pfn_t pfn, bool speculative,
|
|
@@ -2506,7 +2514,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
|
|
spte |= PT_PAGE_SIZE_MASK;
|
|
spte |= PT_PAGE_SIZE_MASK;
|
|
if (tdp_enabled)
|
|
if (tdp_enabled)
|
|
spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
|
|
spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
|
|
- kvm_is_reserved_pfn(pfn));
|
|
|
|
|
|
+ kvm_is_mmio_pfn(pfn));
|
|
|
|
|
|
if (host_writable)
|
|
if (host_writable)
|
|
spte |= SPTE_HOST_WRITEABLE;
|
|
spte |= SPTE_HOST_WRITEABLE;
|