|
@@ -3763,7 +3763,7 @@ static void paging32E_init_context(struct kvm_vcpu *vcpu,
|
|
|
|
|
|
static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
- struct kvm_mmu *context = vcpu->arch.walk_mmu;
|
|
|
+ struct kvm_mmu *context = &vcpu->arch.mmu;
|
|
|
|
|
|
context->base_role.word = 0;
|
|
|
context->page_fault = tdp_page_fault;
|
|
@@ -3803,11 +3803,13 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
|
|
|
update_last_pte_bitmap(vcpu, context);
|
|
|
}
|
|
|
|
|
|
-void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
|
|
|
+void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
|
|
|
+ struct kvm_mmu *context = &vcpu->arch.mmu;
|
|
|
+
|
|
|
ASSERT(vcpu);
|
|
|
- ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
|
|
|
+ ASSERT(!VALID_PAGE(context->root_hpa));
|
|
|
|
|
|
if (!is_paging(vcpu))
|
|
|
nonpaging_init_context(vcpu, context);
|
|
@@ -3818,19 +3820,20 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
|
|
|
else
|
|
|
paging32_init_context(vcpu, context);
|
|
|
|
|
|
- vcpu->arch.mmu.base_role.nxe = is_nx(vcpu);
|
|
|
- vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu);
|
|
|
- vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu);
|
|
|
- vcpu->arch.mmu.base_role.smep_andnot_wp
|
|
|
+ context->base_role.nxe = is_nx(vcpu);
|
|
|
+ context->base_role.cr4_pae = !!is_pae(vcpu);
|
|
|
+ context->base_role.cr0_wp = is_write_protection(vcpu);
|
|
|
+ context->base_role.smep_andnot_wp
|
|
|
= smep && !is_write_protection(vcpu);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
|
|
|
|
|
|
-void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
|
|
|
- bool execonly)
|
|
|
+void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly)
|
|
|
{
|
|
|
+ struct kvm_mmu *context = &vcpu->arch.mmu;
|
|
|
+
|
|
|
ASSERT(vcpu);
|
|
|
- ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
|
|
|
+ ASSERT(!VALID_PAGE(context->root_hpa));
|
|
|
|
|
|
context->shadow_root_level = kvm_x86_ops->get_tdp_level();
|
|
|
|
|
@@ -3851,11 +3854,13 @@ EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
|
|
|
|
|
|
static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
- kvm_init_shadow_mmu(vcpu, vcpu->arch.walk_mmu);
|
|
|
- vcpu->arch.walk_mmu->set_cr3 = kvm_x86_ops->set_cr3;
|
|
|
- vcpu->arch.walk_mmu->get_cr3 = get_cr3;
|
|
|
- vcpu->arch.walk_mmu->get_pdptr = kvm_pdptr_read;
|
|
|
- vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
|
|
|
+ struct kvm_mmu *context = &vcpu->arch.mmu;
|
|
|
+
|
|
|
+ kvm_init_shadow_mmu(vcpu);
|
|
|
+ context->set_cr3 = kvm_x86_ops->set_cr3;
|
|
|
+ context->get_cr3 = get_cr3;
|
|
|
+ context->get_pdptr = kvm_pdptr_read;
|
|
|
+ context->inject_page_fault = kvm_inject_page_fault;
|
|
|
}
|
|
|
|
|
|
static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
|