|
|
@@ -804,12 +804,14 @@ static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
|
|
|
return &slot->arch.lpage_info[level - 2][idx];
|
|
|
}
|
|
|
|
|
|
-static void account_shadowed(struct kvm *kvm, gfn_t gfn)
|
|
|
+static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
|
|
|
{
|
|
|
struct kvm_memory_slot *slot;
|
|
|
struct kvm_lpage_info *linfo;
|
|
|
+ gfn_t gfn;
|
|
|
int i;
|
|
|
|
|
|
+ gfn = sp->gfn;
|
|
|
slot = gfn_to_memslot(kvm, gfn);
|
|
|
for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
|
|
|
linfo = lpage_info_slot(gfn, slot, i);
|
|
|
@@ -818,12 +820,14 @@ static void account_shadowed(struct kvm *kvm, gfn_t gfn)
|
|
|
kvm->arch.indirect_shadow_pages++;
|
|
|
}
|
|
|
|
|
|
-static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
|
|
|
+static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
|
|
|
{
|
|
|
struct kvm_memory_slot *slot;
|
|
|
struct kvm_lpage_info *linfo;
|
|
|
+ gfn_t gfn;
|
|
|
int i;
|
|
|
|
|
|
+ gfn = sp->gfn;
|
|
|
slot = gfn_to_memslot(kvm, gfn);
|
|
|
for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
|
|
|
linfo = lpage_info_slot(gfn, slot, i);
|
|
|
@@ -2131,7 +2135,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
|
|
|
if (level > PT_PAGE_TABLE_LEVEL && need_sync)
|
|
|
kvm_sync_pages(vcpu, gfn);
|
|
|
|
|
|
- account_shadowed(vcpu->kvm, gfn);
|
|
|
+ account_shadowed(vcpu->kvm, sp);
|
|
|
}
|
|
|
sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
|
|
|
init_shadow_page_table(sp);
|
|
|
@@ -2312,7 +2316,7 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
|
|
|
kvm_mmu_unlink_parents(kvm, sp);
|
|
|
|
|
|
if (!sp->role.invalid && !sp->role.direct)
|
|
|
- unaccount_shadowed(kvm, sp->gfn);
|
|
|
+ unaccount_shadowed(kvm, sp);
|
|
|
|
|
|
if (sp->unsync)
|
|
|
kvm_unlink_unsync_page(kvm, sp);
|