浏览代码

KVM: x86: reintroduce kvm_is_mmio_pfn

The call to get_mt_mask was really using kvm_is_reserved_pfn to
detect an MMIO-backed page.  In this case, we want "false" to be
returned for the zero page.

Reintroduce a separate kvm_is_mmio_pfn predicate for this use
only.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Paolo Bonzini 10 年之前
父节点
当前提交
d1fe921955
共有 1 个文件被更改,包括 9 次插入1 次删除
  1. 9 1
      arch/x86/kvm/mmu.c

+ 9 - 1
arch/x86/kvm/mmu.c

@@ -2479,6 +2479,14 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
 	return 0;
 	return 0;
 }
 }
 
 
+static bool kvm_is_mmio_pfn(pfn_t pfn)
+{
+	if (pfn_valid(pfn))
+		return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn));
+
+	return true;
+}
+
 static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
 static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
 		    unsigned pte_access, int level,
 		    unsigned pte_access, int level,
 		    gfn_t gfn, pfn_t pfn, bool speculative,
 		    gfn_t gfn, pfn_t pfn, bool speculative,
@@ -2506,7 +2514,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
 		spte |= PT_PAGE_SIZE_MASK;
 		spte |= PT_PAGE_SIZE_MASK;
 	if (tdp_enabled)
 	if (tdp_enabled)
 		spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
 		spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
-			kvm_is_reserved_pfn(pfn));
+			kvm_is_mmio_pfn(pfn));
 
 
 	if (host_writable)
 	if (host_writable)
 		spte |= SPTE_HOST_WRITEABLE;
 		spte |= SPTE_HOST_WRITEABLE;