|
@@ -2181,7 +2181,7 @@ static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
|
|
struct list_head *invalid_list)
|
|
|
{
|
|
|
if (sp->role.cr4_pae != !!is_pae(vcpu)
|
|
|
- || vcpu->arch.mmu.sync_page(vcpu, sp) == 0) {
|
|
|
+ || vcpu->arch.mmu->sync_page(vcpu, sp) == 0) {
|
|
|
kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
|
|
|
return false;
|
|
|
}
|
|
@@ -2375,14 +2375,14 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
|
|
|
int collisions = 0;
|
|
|
LIST_HEAD(invalid_list);
|
|
|
|
|
|
- role = vcpu->arch.mmu.base_role;
|
|
|
+ role = vcpu->arch.mmu->base_role;
|
|
|
role.level = level;
|
|
|
role.direct = direct;
|
|
|
if (role.direct)
|
|
|
role.cr4_pae = 0;
|
|
|
role.access = access;
|
|
|
- if (!vcpu->arch.mmu.direct_map
|
|
|
- && vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
|
|
|
+ if (!vcpu->arch.mmu->direct_map
|
|
|
+ && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) {
|
|
|
quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
|
|
|
quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
|
|
|
role.quadrant = quadrant;
|
|
@@ -2457,11 +2457,11 @@ static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterato
|
|
|
{
|
|
|
iterator->addr = addr;
|
|
|
iterator->shadow_addr = root;
|
|
|
- iterator->level = vcpu->arch.mmu.shadow_root_level;
|
|
|
+ iterator->level = vcpu->arch.mmu->shadow_root_level;
|
|
|
|
|
|
if (iterator->level == PT64_ROOT_4LEVEL &&
|
|
|
- vcpu->arch.mmu.root_level < PT64_ROOT_4LEVEL &&
|
|
|
- !vcpu->arch.mmu.direct_map)
|
|
|
+ vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL &&
|
|
|
+ !vcpu->arch.mmu->direct_map)
|
|
|
--iterator->level;
|
|
|
|
|
|
if (iterator->level == PT32E_ROOT_LEVEL) {
|
|
@@ -2469,10 +2469,10 @@ static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterato
|
|
|
* prev_root is currently only used for 64-bit hosts. So only
|
|
|
* the active root_hpa is valid here.
|
|
|
*/
|
|
|
- BUG_ON(root != vcpu->arch.mmu.root_hpa);
|
|
|
+ BUG_ON(root != vcpu->arch.mmu->root_hpa);
|
|
|
|
|
|
iterator->shadow_addr
|
|
|
- = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
|
|
|
+ = vcpu->arch.mmu->pae_root[(addr >> 30) & 3];
|
|
|
iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
|
|
|
--iterator->level;
|
|
|
if (!iterator->shadow_addr)
|
|
@@ -2483,7 +2483,7 @@ static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterato
|
|
|
static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
|
|
|
struct kvm_vcpu *vcpu, u64 addr)
|
|
|
{
|
|
|
- shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu.root_hpa,
|
|
|
+ shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root_hpa,
|
|
|
addr);
|
|
|
}
|
|
|
|
|
@@ -3095,7 +3095,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, int write, int map_writable,
|
|
|
int emulate = 0;
|
|
|
gfn_t pseudo_gfn;
|
|
|
|
|
|
- if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
|
|
|
+ if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
|
|
|
return 0;
|
|
|
|
|
|
for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
|
|
@@ -3310,7 +3310,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
|
|
|
u64 spte = 0ull;
|
|
|
uint retry_count = 0;
|
|
|
|
|
|
- if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
|
|
|
+ if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
|
|
|
return false;
|
|
|
|
|
|
if (!page_fault_can_be_fast(error_code))
|
|
@@ -3484,7 +3484,7 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, ulong roots_to_free)
|
|
|
{
|
|
|
int i;
|
|
|
LIST_HEAD(invalid_list);
|
|
|
- struct kvm_mmu *mmu = &vcpu->arch.mmu;
|
|
|
+ struct kvm_mmu *mmu = vcpu->arch.mmu;
|
|
|
bool free_active_root = roots_to_free & KVM_MMU_ROOT_CURRENT;
|
|
|
|
|
|
BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG);
|
|
@@ -3544,20 +3544,20 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
|
|
|
struct kvm_mmu_page *sp;
|
|
|
unsigned i;
|
|
|
|
|
|
- if (vcpu->arch.mmu.shadow_root_level >= PT64_ROOT_4LEVEL) {
|
|
|
+ if (vcpu->arch.mmu->shadow_root_level >= PT64_ROOT_4LEVEL) {
|
|
|
spin_lock(&vcpu->kvm->mmu_lock);
|
|
|
if(make_mmu_pages_available(vcpu) < 0) {
|
|
|
spin_unlock(&vcpu->kvm->mmu_lock);
|
|
|
return -ENOSPC;
|
|
|
}
|
|
|
sp = kvm_mmu_get_page(vcpu, 0, 0,
|
|
|
- vcpu->arch.mmu.shadow_root_level, 1, ACC_ALL);
|
|
|
+ vcpu->arch.mmu->shadow_root_level, 1, ACC_ALL);
|
|
|
++sp->root_count;
|
|
|
spin_unlock(&vcpu->kvm->mmu_lock);
|
|
|
- vcpu->arch.mmu.root_hpa = __pa(sp->spt);
|
|
|
- } else if (vcpu->arch.mmu.shadow_root_level == PT32E_ROOT_LEVEL) {
|
|
|
+ vcpu->arch.mmu->root_hpa = __pa(sp->spt);
|
|
|
+ } else if (vcpu->arch.mmu->shadow_root_level == PT32E_ROOT_LEVEL) {
|
|
|
for (i = 0; i < 4; ++i) {
|
|
|
- hpa_t root = vcpu->arch.mmu.pae_root[i];
|
|
|
+ hpa_t root = vcpu->arch.mmu->pae_root[i];
|
|
|
|
|
|
MMU_WARN_ON(VALID_PAGE(root));
|
|
|
spin_lock(&vcpu->kvm->mmu_lock);
|
|
@@ -3570,9 +3570,9 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
|
|
|
root = __pa(sp->spt);
|
|
|
++sp->root_count;
|
|
|
spin_unlock(&vcpu->kvm->mmu_lock);
|
|
|
- vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
|
|
|
+ vcpu->arch.mmu->pae_root[i] = root | PT_PRESENT_MASK;
|
|
|
}
|
|
|
- vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
|
|
|
+ vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
|
|
|
} else
|
|
|
BUG();
|
|
|
|
|
@@ -3586,7 +3586,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
|
|
|
gfn_t root_gfn;
|
|
|
int i;
|
|
|
|
|
|
- root_gfn = vcpu->arch.mmu.get_cr3(vcpu) >> PAGE_SHIFT;
|
|
|
+ root_gfn = vcpu->arch.mmu->get_cr3(vcpu) >> PAGE_SHIFT;
|
|
|
|
|
|
if (mmu_check_root(vcpu, root_gfn))
|
|
|
return 1;
|
|
@@ -3595,8 +3595,8 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
|
|
|
* Do we shadow a long mode page table? If so we need to
|
|
|
* write-protect the guests page table root.
|
|
|
*/
|
|
|
- if (vcpu->arch.mmu.root_level >= PT64_ROOT_4LEVEL) {
|
|
|
- hpa_t root = vcpu->arch.mmu.root_hpa;
|
|
|
+ if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
|
|
|
+ hpa_t root = vcpu->arch.mmu->root_hpa;
|
|
|
|
|
|
MMU_WARN_ON(VALID_PAGE(root));
|
|
|
|
|
@@ -3606,11 +3606,11 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
|
|
|
return -ENOSPC;
|
|
|
}
|
|
|
sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
|
|
|
- vcpu->arch.mmu.shadow_root_level, 0, ACC_ALL);
|
|
|
+ vcpu->arch.mmu->shadow_root_level, 0, ACC_ALL);
|
|
|
root = __pa(sp->spt);
|
|
|
++sp->root_count;
|
|
|
spin_unlock(&vcpu->kvm->mmu_lock);
|
|
|
- vcpu->arch.mmu.root_hpa = root;
|
|
|
+ vcpu->arch.mmu->root_hpa = root;
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -3620,17 +3620,17 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
|
|
|
* the shadow page table may be a PAE or a long mode page table.
|
|
|
*/
|
|
|
pm_mask = PT_PRESENT_MASK;
|
|
|
- if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_4LEVEL)
|
|
|
+ if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL)
|
|
|
pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
|
|
|
|
|
|
for (i = 0; i < 4; ++i) {
|
|
|
- hpa_t root = vcpu->arch.mmu.pae_root[i];
|
|
|
+ hpa_t root = vcpu->arch.mmu->pae_root[i];
|
|
|
|
|
|
MMU_WARN_ON(VALID_PAGE(root));
|
|
|
- if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
|
|
|
- pdptr = vcpu->arch.mmu.get_pdptr(vcpu, i);
|
|
|
+ if (vcpu->arch.mmu->root_level == PT32E_ROOT_LEVEL) {
|
|
|
+ pdptr = vcpu->arch.mmu->get_pdptr(vcpu, i);
|
|
|
if (!(pdptr & PT_PRESENT_MASK)) {
|
|
|
- vcpu->arch.mmu.pae_root[i] = 0;
|
|
|
+ vcpu->arch.mmu->pae_root[i] = 0;
|
|
|
continue;
|
|
|
}
|
|
|
root_gfn = pdptr >> PAGE_SHIFT;
|
|
@@ -3648,16 +3648,16 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
|
|
|
++sp->root_count;
|
|
|
spin_unlock(&vcpu->kvm->mmu_lock);
|
|
|
|
|
|
- vcpu->arch.mmu.pae_root[i] = root | pm_mask;
|
|
|
+ vcpu->arch.mmu->pae_root[i] = root | pm_mask;
|
|
|
}
|
|
|
- vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
|
|
|
+ vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
|
|
|
|
|
|
/*
|
|
|
* If we shadow a 32 bit page table with a long mode page
|
|
|
* table we enter this path.
|
|
|
*/
|
|
|
- if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_4LEVEL) {
|
|
|
- if (vcpu->arch.mmu.lm_root == NULL) {
|
|
|
+ if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
|
|
|
+ if (vcpu->arch.mmu->lm_root == NULL) {
|
|
|
/*
|
|
|
* The additional page necessary for this is only
|
|
|
* allocated on demand.
|
|
@@ -3669,12 +3669,12 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
|
|
|
if (lm_root == NULL)
|
|
|
return 1;
|
|
|
|
|
|
- lm_root[0] = __pa(vcpu->arch.mmu.pae_root) | pm_mask;
|
|
|
+ lm_root[0] = __pa(vcpu->arch.mmu->pae_root) | pm_mask;
|
|
|
|
|
|
- vcpu->arch.mmu.lm_root = lm_root;
|
|
|
+ vcpu->arch.mmu->lm_root = lm_root;
|
|
|
}
|
|
|
|
|
|
- vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.lm_root);
|
|
|
+ vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root);
|
|
|
}
|
|
|
|
|
|
return 0;
|
|
@@ -3682,7 +3682,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
- if (vcpu->arch.mmu.direct_map)
|
|
|
+ if (vcpu->arch.mmu->direct_map)
|
|
|
return mmu_alloc_direct_roots(vcpu);
|
|
|
else
|
|
|
return mmu_alloc_shadow_roots(vcpu);
|
|
@@ -3693,17 +3693,16 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
|
|
|
int i;
|
|
|
struct kvm_mmu_page *sp;
|
|
|
|
|
|
- if (vcpu->arch.mmu.direct_map)
|
|
|
+ if (vcpu->arch.mmu->direct_map)
|
|
|
return;
|
|
|
|
|
|
- if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
|
|
|
+ if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
|
|
|
return;
|
|
|
|
|
|
vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
|
|
|
|
|
|
- if (vcpu->arch.mmu.root_level >= PT64_ROOT_4LEVEL) {
|
|
|
- hpa_t root = vcpu->arch.mmu.root_hpa;
|
|
|
-
|
|
|
+ if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
|
|
|
+ hpa_t root = vcpu->arch.mmu->root_hpa;
|
|
|
sp = page_header(root);
|
|
|
|
|
|
/*
|
|
@@ -3734,7 +3733,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
|
|
|
kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
|
|
|
|
|
|
for (i = 0; i < 4; ++i) {
|
|
|
- hpa_t root = vcpu->arch.mmu.pae_root[i];
|
|
|
+ hpa_t root = vcpu->arch.mmu->pae_root[i];
|
|
|
|
|
|
if (root && VALID_PAGE(root)) {
|
|
|
root &= PT64_BASE_ADDR_MASK;
|
|
@@ -3808,7 +3807,7 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
|
|
|
int root, leaf;
|
|
|
bool reserved = false;
|
|
|
|
|
|
- if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
|
|
|
+ if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
|
|
|
goto exit;
|
|
|
|
|
|
walk_shadow_page_lockless_begin(vcpu);
|
|
@@ -3825,7 +3824,7 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
|
|
|
if (!is_shadow_present_pte(spte))
|
|
|
break;
|
|
|
|
|
|
- reserved |= is_shadow_zero_bits_set(&vcpu->arch.mmu, spte,
|
|
|
+ reserved |= is_shadow_zero_bits_set(vcpu->arch.mmu, spte,
|
|
|
iterator.level);
|
|
|
}
|
|
|
|
|
@@ -3904,7 +3903,7 @@ static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
|
|
|
struct kvm_shadow_walk_iterator iterator;
|
|
|
u64 spte;
|
|
|
|
|
|
- if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
|
|
|
+ if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
|
|
|
return;
|
|
|
|
|
|
walk_shadow_page_lockless_begin(vcpu);
|
|
@@ -3931,7 +3930,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
|
|
|
if (r)
|
|
|
return r;
|
|
|
|
|
|
- MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
|
|
|
+ MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa));
|
|
|
|
|
|
|
|
|
return nonpaging_map(vcpu, gva & PAGE_MASK,
|
|
@@ -3944,8 +3943,8 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
|
|
|
|
|
|
arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
|
|
|
arch.gfn = gfn;
|
|
|
- arch.direct_map = vcpu->arch.mmu.direct_map;
|
|
|
- arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu);
|
|
|
+ arch.direct_map = vcpu->arch.mmu->direct_map;
|
|
|
+ arch.cr3 = vcpu->arch.mmu->get_cr3(vcpu);
|
|
|
|
|
|
return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
|
|
|
}
|
|
@@ -4051,7 +4050,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
|
|
|
int write = error_code & PFERR_WRITE_MASK;
|
|
|
bool map_writable;
|
|
|
|
|
|
- MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
|
|
|
+ MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa));
|
|
|
|
|
|
if (page_fault_handle_page_track(vcpu, error_code, gfn))
|
|
|
return RET_PF_EMULATE;
|
|
@@ -4127,7 +4126,7 @@ static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3,
|
|
|
{
|
|
|
uint i;
|
|
|
struct kvm_mmu_root_info root;
|
|
|
- struct kvm_mmu *mmu = &vcpu->arch.mmu;
|
|
|
+ struct kvm_mmu *mmu = vcpu->arch.mmu;
|
|
|
|
|
|
root.cr3 = mmu->get_cr3(vcpu);
|
|
|
root.hpa = mmu->root_hpa;
|
|
@@ -4150,7 +4149,7 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3,
|
|
|
union kvm_mmu_page_role new_role,
|
|
|
bool skip_tlb_flush)
|
|
|
{
|
|
|
- struct kvm_mmu *mmu = &vcpu->arch.mmu;
|
|
|
+ struct kvm_mmu *mmu = vcpu->arch.mmu;
|
|
|
|
|
|
/*
|
|
|
* For now, limit the fast switch to 64-bit hosts+VMs in order to avoid
|
|
@@ -4219,7 +4218,7 @@ static unsigned long get_cr3(struct kvm_vcpu *vcpu)
|
|
|
static void inject_page_fault(struct kvm_vcpu *vcpu,
|
|
|
struct x86_exception *fault)
|
|
|
{
|
|
|
- vcpu->arch.mmu.inject_page_fault(vcpu, fault);
|
|
|
+ vcpu->arch.mmu->inject_page_fault(vcpu, fault);
|
|
|
}
|
|
|
|
|
|
static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
|
|
@@ -4740,7 +4739,7 @@ kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
- struct kvm_mmu *context = &vcpu->arch.mmu;
|
|
|
+ struct kvm_mmu *context = vcpu->arch.mmu;
|
|
|
|
|
|
context->base_role.word = mmu_base_role_mask.word &
|
|
|
kvm_calc_tdp_mmu_root_page_role(vcpu).word;
|
|
@@ -4812,7 +4811,7 @@ kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
- struct kvm_mmu *context = &vcpu->arch.mmu;
|
|
|
+ struct kvm_mmu *context = vcpu->arch.mmu;
|
|
|
|
|
|
if (!is_paging(vcpu))
|
|
|
nonpaging_init_context(vcpu, context);
|
|
@@ -4832,7 +4831,7 @@ EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
|
|
|
static union kvm_mmu_page_role
|
|
|
kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty)
|
|
|
{
|
|
|
- union kvm_mmu_page_role role = vcpu->arch.mmu.base_role;
|
|
|
+ union kvm_mmu_page_role role = vcpu->arch.mmu->base_role;
|
|
|
|
|
|
role.level = PT64_ROOT_4LEVEL;
|
|
|
role.direct = false;
|
|
@@ -4846,7 +4845,7 @@ kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty)
|
|
|
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
|
|
|
bool accessed_dirty, gpa_t new_eptp)
|
|
|
{
|
|
|
- struct kvm_mmu *context = &vcpu->arch.mmu;
|
|
|
+ struct kvm_mmu *context = vcpu->arch.mmu;
|
|
|
union kvm_mmu_page_role root_page_role =
|
|
|
kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty);
|
|
|
|
|
@@ -4873,7 +4872,7 @@ EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
|
|
|
|
|
|
static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
- struct kvm_mmu *context = &vcpu->arch.mmu;
|
|
|
+ struct kvm_mmu *context = vcpu->arch.mmu;
|
|
|
|
|
|
kvm_init_shadow_mmu(vcpu);
|
|
|
context->set_cr3 = kvm_x86_ops->set_cr3;
|
|
@@ -4891,7 +4890,7 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
|
|
|
g_context->inject_page_fault = kvm_inject_page_fault;
|
|
|
|
|
|
/*
|
|
|
- * Note that arch.mmu.gva_to_gpa translates l2_gpa to l1_gpa using
|
|
|
+ * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using
|
|
|
* L1's nested page tables (e.g. EPT12). The nested translation
|
|
|
* of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using
|
|
|
* L2's page tables as the first level of translation and L1's
|
|
@@ -4930,10 +4929,10 @@ void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots)
|
|
|
if (reset_roots) {
|
|
|
uint i;
|
|
|
|
|
|
- vcpu->arch.mmu.root_hpa = INVALID_PAGE;
|
|
|
+ vcpu->arch.mmu->root_hpa = INVALID_PAGE;
|
|
|
|
|
|
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
|
|
|
- vcpu->arch.mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
|
|
|
+ vcpu->arch.mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
|
|
|
}
|
|
|
|
|
|
if (mmu_is_nested(vcpu))
|
|
@@ -4982,7 +4981,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_load);
|
|
|
void kvm_mmu_unload(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
kvm_mmu_free_roots(vcpu, KVM_MMU_ROOTS_ALL);
|
|
|
- WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa));
|
|
|
+ WARN_ON(VALID_PAGE(vcpu->arch.mmu->root_hpa));
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(kvm_mmu_unload);
|
|
|
|
|
@@ -4996,7 +4995,7 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
|
|
|
}
|
|
|
|
|
|
++vcpu->kvm->stat.mmu_pte_updated;
|
|
|
- vcpu->arch.mmu.update_pte(vcpu, sp, spte, new);
|
|
|
+ vcpu->arch.mmu->update_pte(vcpu, sp, spte, new);
|
|
|
}
|
|
|
|
|
|
static bool need_remote_flush(u64 old, u64 new)
|
|
@@ -5176,7 +5175,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
|
|
|
entry = *spte;
|
|
|
mmu_page_zap_pte(vcpu->kvm, sp, spte);
|
|
|
if (gentry &&
|
|
|
- !((sp->role.word ^ vcpu->arch.mmu.base_role.word)
|
|
|
+ !((sp->role.word ^ vcpu->arch.mmu->base_role.word)
|
|
|
& mmu_base_role_mask.word) && rmap_can_add(vcpu))
|
|
|
mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
|
|
|
if (need_remote_flush(entry, *spte))
|
|
@@ -5194,7 +5193,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
|
|
|
gpa_t gpa;
|
|
|
int r;
|
|
|
|
|
|
- if (vcpu->arch.mmu.direct_map)
|
|
|
+ if (vcpu->arch.mmu->direct_map)
|
|
|
return 0;
|
|
|
|
|
|
gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
|
|
@@ -5230,10 +5229,10 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
|
|
|
{
|
|
|
int r, emulation_type = 0;
|
|
|
enum emulation_result er;
|
|
|
- bool direct = vcpu->arch.mmu.direct_map;
|
|
|
+ bool direct = vcpu->arch.mmu->direct_map;
|
|
|
|
|
|
/* With shadow page tables, fault_address contains a GVA or nGPA. */
|
|
|
- if (vcpu->arch.mmu.direct_map) {
|
|
|
+ if (vcpu->arch.mmu->direct_map) {
|
|
|
vcpu->arch.gpa_available = true;
|
|
|
vcpu->arch.gpa_val = cr2;
|
|
|
}
|
|
@@ -5246,8 +5245,9 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
|
|
|
}
|
|
|
|
|
|
if (r == RET_PF_INVALID) {
|
|
|
- r = vcpu->arch.mmu.page_fault(vcpu, cr2, lower_32_bits(error_code),
|
|
|
- false);
|
|
|
+ r = vcpu->arch.mmu->page_fault(vcpu, cr2,
|
|
|
+ lower_32_bits(error_code),
|
|
|
+ false);
|
|
|
WARN_ON(r == RET_PF_INVALID);
|
|
|
}
|
|
|
|
|
@@ -5263,7 +5263,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
|
|
|
* paging in both guests. If true, we simply unprotect the page
|
|
|
* and resume the guest.
|
|
|
*/
|
|
|
- if (vcpu->arch.mmu.direct_map &&
|
|
|
+ if (vcpu->arch.mmu->direct_map &&
|
|
|
(error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) {
|
|
|
kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2));
|
|
|
return 1;
|
|
@@ -5311,7 +5311,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
|
|
|
|
|
|
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
|
|
|
{
|
|
|
- struct kvm_mmu *mmu = &vcpu->arch.mmu;
|
|
|
+ struct kvm_mmu *mmu = vcpu->arch.mmu;
|
|
|
int i;
|
|
|
|
|
|
/* INVLPG on a * non-canonical address is a NOP according to the SDM. */
|
|
@@ -5342,7 +5342,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
|
|
|
|
|
|
void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
|
|
|
{
|
|
|
- struct kvm_mmu *mmu = &vcpu->arch.mmu;
|
|
|
+ struct kvm_mmu *mmu = vcpu->arch.mmu;
|
|
|
bool tlb_flush = false;
|
|
|
uint i;
|
|
|
|
|
@@ -5386,8 +5386,8 @@ EXPORT_SYMBOL_GPL(kvm_disable_tdp);
|
|
|
|
|
|
static void free_mmu_pages(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
- free_page((unsigned long)vcpu->arch.mmu.pae_root);
|
|
|
- free_page((unsigned long)vcpu->arch.mmu.lm_root);
|
|
|
+ free_page((unsigned long)vcpu->arch.mmu->pae_root);
|
|
|
+ free_page((unsigned long)vcpu->arch.mmu->lm_root);
|
|
|
}
|
|
|
|
|
|
static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
|
|
@@ -5407,9 +5407,9 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
|
|
|
if (!page)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
- vcpu->arch.mmu.pae_root = page_address(page);
|
|
|
+ vcpu->arch.mmu->pae_root = page_address(page);
|
|
|
for (i = 0; i < 4; ++i)
|
|
|
- vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
|
|
|
+ vcpu->arch.mmu->pae_root[i] = INVALID_PAGE;
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -5418,20 +5418,21 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
uint i;
|
|
|
|
|
|
- vcpu->arch.walk_mmu = &vcpu->arch.mmu;
|
|
|
- vcpu->arch.mmu.root_hpa = INVALID_PAGE;
|
|
|
- vcpu->arch.mmu.translate_gpa = translate_gpa;
|
|
|
+ vcpu->arch.mmu = &vcpu->arch.root_mmu;
|
|
|
+ vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
|
|
|
+ vcpu->arch.root_mmu.root_hpa = INVALID_PAGE;
|
|
|
+ vcpu->arch.root_mmu.translate_gpa = translate_gpa;
|
|
|
vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
|
|
|
|
|
|
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
|
|
|
- vcpu->arch.mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
|
|
|
+ vcpu->arch.root_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
|
|
|
|
|
|
return alloc_mmu_pages(vcpu);
|
|
|
}
|
|
|
|
|
|
void kvm_mmu_setup(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
- MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa));
|
|
|
+ MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->root_hpa));
|
|
|
|
|
|
/*
|
|
|
* kvm_mmu_setup() is called only on vCPU initialization.
|