|
@@ -1726,8 +1726,7 @@ static void drop_parent_pte(struct kvm_mmu_page *sp,
|
|
|
mmu_spte_clear_no_track(parent_pte);
|
|
mmu_spte_clear_no_track(parent_pte);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
-static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
|
|
|
|
|
- u64 *parent_pte, int direct)
|
|
|
|
|
|
|
+static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct)
|
|
|
{
|
|
{
|
|
|
struct kvm_mmu_page *sp;
|
|
struct kvm_mmu_page *sp;
|
|
|
|
|
|
|
@@ -1743,8 +1742,6 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
|
|
|
* this feature. See the comments in kvm_zap_obsolete_pages().
|
|
* this feature. See the comments in kvm_zap_obsolete_pages().
|
|
|
*/
|
|
*/
|
|
|
list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
|
|
list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
|
|
|
- sp->parent_ptes.val = 0;
|
|
|
|
|
- mmu_page_add_parent_pte(vcpu, sp, parent_pte);
|
|
|
|
|
kvm_mod_used_mmu_pages(vcpu->kvm, +1);
|
|
kvm_mod_used_mmu_pages(vcpu->kvm, +1);
|
|
|
return sp;
|
|
return sp;
|
|
|
}
|
|
}
|
|
@@ -2133,10 +2130,13 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
|
|
|
trace_kvm_mmu_get_page(sp, false);
|
|
trace_kvm_mmu_get_page(sp, false);
|
|
|
return sp;
|
|
return sp;
|
|
|
}
|
|
}
|
|
|
|
|
+
|
|
|
++vcpu->kvm->stat.mmu_cache_miss;
|
|
++vcpu->kvm->stat.mmu_cache_miss;
|
|
|
- sp = kvm_mmu_alloc_page(vcpu, parent_pte, direct);
|
|
|
|
|
- if (!sp)
|
|
|
|
|
- return sp;
|
|
|
|
|
|
|
+
|
|
|
|
|
+ sp = kvm_mmu_alloc_page(vcpu, direct);
|
|
|
|
|
+
|
|
|
|
|
+ mmu_page_add_parent_pte(vcpu, sp, parent_pte);
|
|
|
|
|
+
|
|
|
sp->gfn = gfn;
|
|
sp->gfn = gfn;
|
|
|
sp->role = role;
|
|
sp->role = role;
|
|
|
hlist_add_head(&sp->hash_link,
|
|
hlist_add_head(&sp->hash_link,
|