Browse Source

KVM: MMU: Segregate shadow pages with different cr0.wp

When cr0.wp=0, we may shadow a gpte having u/s=1 and r/w=0 with an spte
having u/s=0 and r/w=1.  This allows excessive access if the guest sets
cr0.wp=1 and accesses through this spte.

Fix by making cr0.wp part of the base role; we'll have different sptes for
the two cases and the problem disappears.

Signed-off-by: Avi Kivity <avi@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Avi Kivity 15 years ago
parent
commit
3dbe141595
3 changed files with 5 additions and 1 deletions
  1. 2 0
      Documentation/kvm/mmu.txt
  2. 1 0
      arch/x86/include/asm/kvm_host.h
  3. 2 1
      arch/x86/kvm/mmu.c

+ 2 - 0
Documentation/kvm/mmu.txt

@@ -163,6 +163,8 @@ Shadow pages contain the following information:
     32-bit or 64-bit gptes are in use).
     32-bit or 64-bit gptes are in use).
   role.cr4_nxe:
   role.cr4_nxe:
     Contains the value of efer.nxe for which the page is valid.
     Contains the value of efer.nxe for which the page is valid.
+  role.cr0_wp:
+    Contains the value of cr0.wp for which the page is valid.
   gfn:
   gfn:
     Either the guest page table containing the translations shadowed by this
     Either the guest page table containing the translations shadowed by this
     page, or the base page frame for linear translations.  See role.direct.
     page, or the base page frame for linear translations.  See role.direct.

+ 1 - 0
arch/x86/include/asm/kvm_host.h

@@ -179,6 +179,7 @@ union kvm_mmu_page_role {
 		unsigned access:3;
 		unsigned access:3;
 		unsigned invalid:1;
 		unsigned invalid:1;
 		unsigned nxe:1;
 		unsigned nxe:1;
+		unsigned cr0_wp:1;
 	};
 	};
 };
 };
 
 

+ 2 - 1
arch/x86/kvm/mmu.c

@@ -217,7 +217,7 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
 }
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
 EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
 
 
-static int is_write_protection(struct kvm_vcpu *vcpu)
+static bool is_write_protection(struct kvm_vcpu *vcpu)
 {
 {
 	return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
 	return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
 }
 }
@@ -2432,6 +2432,7 @@ static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
 		r = paging32_init_context(vcpu);
 		r = paging32_init_context(vcpu);
 
 
 	vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu);
 	vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu);
+	vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu);
 
 
 	return r;
 	return r;
 }
 }