Browse Source

KVM: x86: MMU: Move mapping_level_dirty_bitmap() call in mapping_level()

This is necessary to eliminate an extra memory slot search later.

Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Takuya Yoshikawa 10 years ago
parent
commit
fd13690218
2 changed files with 17 additions and 18 deletions
  1. 14 15
      arch/x86/kvm/mmu.c
  2. 3 3
      arch/x86/kvm/paging_tmpl.h

+ 14 - 15
arch/x86/kvm/mmu.c

@@ -870,10 +870,16 @@ static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn)
 	return !gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true);
 	return !gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true);
 }
 }
 
 
-static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
+static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn,
+			 bool *force_pt_level)
 {
 {
 	int host_level, level, max_level;
 	int host_level, level, max_level;
 
 
+	if (likely(!*force_pt_level))
+		*force_pt_level = mapping_level_dirty_bitmap(vcpu, large_gfn);
+	if (unlikely(*force_pt_level))
+		return PT_PAGE_TABLE_LEVEL;
+
 	host_level = host_mapping_level(vcpu->kvm, large_gfn);
 	host_level = host_mapping_level(vcpu->kvm, large_gfn);
 
 
 	if (host_level == PT_PAGE_TABLE_LEVEL)
 	if (host_level == PT_PAGE_TABLE_LEVEL)
@@ -2962,14 +2968,13 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
 {
 {
 	int r;
 	int r;
 	int level;
 	int level;
-	bool force_pt_level;
+	bool force_pt_level = false;
 	pfn_t pfn;
 	pfn_t pfn;
 	unsigned long mmu_seq;
 	unsigned long mmu_seq;
 	bool map_writable, write = error_code & PFERR_WRITE_MASK;
 	bool map_writable, write = error_code & PFERR_WRITE_MASK;
 
 
-	force_pt_level = mapping_level_dirty_bitmap(vcpu, gfn);
+	level = mapping_level(vcpu, gfn, &force_pt_level);
 	if (likely(!force_pt_level)) {
 	if (likely(!force_pt_level)) {
-		level = mapping_level(vcpu, gfn);
 		/*
 		/*
 		 * This path builds a PAE pagetable - so we can map
 		 * This path builds a PAE pagetable - so we can map
 		 * 2mb pages at maximum. Therefore check if the level
 		 * 2mb pages at maximum. Therefore check if the level
@@ -2979,8 +2984,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
 			level = PT_DIRECTORY_LEVEL;
 			level = PT_DIRECTORY_LEVEL;
 
 
 		gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
 		gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
-	} else
-		level = PT_PAGE_TABLE_LEVEL;
+	}
 
 
 	if (fast_page_fault(vcpu, v, level, error_code))
 	if (fast_page_fault(vcpu, v, level, error_code))
 		return 0;
 		return 0;
@@ -3495,20 +3499,15 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
 	if (r)
 	if (r)
 		return r;
 		return r;
 
 
-	if (mapping_level_dirty_bitmap(vcpu, gfn) ||
-	    !check_hugepage_cache_consistency(vcpu, gfn, PT_DIRECTORY_LEVEL))
-		force_pt_level = true;
-	else
-		force_pt_level = false;
-
+	force_pt_level = !check_hugepage_cache_consistency(vcpu, gfn,
+							   PT_DIRECTORY_LEVEL);
+	level = mapping_level(vcpu, gfn, &force_pt_level);
 	if (likely(!force_pt_level)) {
 	if (likely(!force_pt_level)) {
-		level = mapping_level(vcpu, gfn);
 		if (level > PT_DIRECTORY_LEVEL &&
 		if (level > PT_DIRECTORY_LEVEL &&
 		    !check_hugepage_cache_consistency(vcpu, gfn, level))
 		    !check_hugepage_cache_consistency(vcpu, gfn, level))
 			level = PT_DIRECTORY_LEVEL;
 			level = PT_DIRECTORY_LEVEL;
 		gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
 		gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
-	} else
-		level = PT_PAGE_TABLE_LEVEL;
+	}
 
 
 	if (fast_page_fault(vcpu, gpa, level, error_code))
 	if (fast_page_fault(vcpu, gpa, level, error_code))
 		return 0;
 		return 0;

+ 3 - 3
arch/x86/kvm/paging_tmpl.h

@@ -744,9 +744,9 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
 	      &walker, user_fault, &vcpu->arch.write_fault_to_shadow_pgtable);
 	      &walker, user_fault, &vcpu->arch.write_fault_to_shadow_pgtable);
 
 
 	if (walker.level >= PT_DIRECTORY_LEVEL && !is_self_change_mapping) {
 	if (walker.level >= PT_DIRECTORY_LEVEL && !is_self_change_mapping) {
-		force_pt_level = mapping_level_dirty_bitmap(vcpu, walker.gfn);
-		if (!force_pt_level) {
-			level = min(walker.level, mapping_level(vcpu, walker.gfn));
+		level = mapping_level(vcpu, walker.gfn, &force_pt_level);
+		if (likely(!force_pt_level)) {
+			level = min(walker.level, level);
 			walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
 			walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
 		}
 		}
 	} else
 	} else