|
@@ -3088,14 +3088,16 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
|
|
return false;
|
|
return false;
|
|
|
|
|
|
walk_shadow_page_lockless_begin(vcpu);
|
|
walk_shadow_page_lockless_begin(vcpu);
|
|
- for_each_shadow_entry_lockless(vcpu, gva, iterator, spte)
|
|
|
|
- if (!is_shadow_present_pte(spte) || iterator.level < level)
|
|
|
|
- break;
|
|
|
|
|
|
|
|
do {
|
|
do {
|
|
bool remove_write_prot = false;
|
|
bool remove_write_prot = false;
|
|
bool remove_acc_track;
|
|
bool remove_acc_track;
|
|
|
|
|
|
|
|
+ for_each_shadow_entry_lockless(vcpu, gva, iterator, spte)
|
|
|
|
+ if (!is_shadow_present_pte(spte) ||
|
|
|
|
+ iterator.level < level)
|
|
|
|
+ break;
|
|
|
|
+
|
|
sp = page_header(__pa(iterator.sptep));
|
|
sp = page_header(__pa(iterator.sptep));
|
|
if (!is_last_spte(spte, sp->role.level))
|
|
if (!is_last_spte(spte, sp->role.level))
|
|
break;
|
|
break;
|
|
@@ -3176,8 +3178,6 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
|
|
|
|
- spte = mmu_spte_get_lockless(iterator.sptep);
|
|
|
|
-
|
|
|
|
} while (true);
|
|
} while (true);
|
|
|
|
|
|
trace_fast_page_fault(vcpu, gva, error_code, iterator.sptep,
|
|
trace_fast_page_fault(vcpu, gva, error_code, iterator.sptep,
|