|
@@ -2802,9 +2802,9 @@ static bool page_fault_can_be_fast(u32 error_code)
|
|
|
}
|
|
|
|
|
|
static bool
|
|
|
-fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 spte)
|
|
|
+fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
|
|
+ u64 *sptep, u64 spte)
|
|
|
{
|
|
|
- struct kvm_mmu_page *sp = page_header(__pa(sptep));
|
|
|
gfn_t gfn;
|
|
|
|
|
|
WARN_ON(!sp->role.direct);
|
|
@@ -2830,6 +2830,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
|
|
|
u32 error_code)
|
|
|
{
|
|
|
struct kvm_shadow_walk_iterator iterator;
|
|
|
+ struct kvm_mmu_page *sp;
|
|
|
bool ret = false;
|
|
|
u64 spte = 0ull;
|
|
|
|
|
@@ -2853,7 +2854,8 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
|
|
|
goto exit;
|
|
|
}
|
|
|
|
|
|
- if (!is_last_spte(spte, level))
|
|
|
+ sp = page_header(__pa(iterator.sptep));
|
|
|
+ if (!is_last_spte(spte, sp->role.level))
|
|
|
goto exit;
|
|
|
|
|
|
/*
|
|
@@ -2879,7 +2881,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
|
|
|
* the gfn is not stable for indirect shadow page.
|
|
|
* See Documentation/virtual/kvm/locking.txt to get more detail.
|
|
|
*/
|
|
|
- ret = fast_pf_fix_direct_spte(vcpu, iterator.sptep, spte);
|
|
|
+ ret = fast_pf_fix_direct_spte(vcpu, sp, iterator.sptep, spte);
|
|
|
exit:
|
|
|
trace_fast_page_fault(vcpu, gva, error_code, iterator.sptep,
|
|
|
spte, ret);
|