|
@@ -2832,6 +2832,9 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
|
|
|
bool ret = false;
|
|
|
u64 spte = 0ull;
|
|
|
|
|
|
+ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
|
|
|
+ return false;
|
|
|
+
|
|
|
if (!page_fault_can_be_fast(error_code))
|
|
|
return false;
|
|
|
|
|
@@ -3227,6 +3230,9 @@ static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr)
|
|
|
struct kvm_shadow_walk_iterator iterator;
|
|
|
u64 spte = 0ull;
|
|
|
|
|
|
+ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
|
|
|
+ return spte;
|
|
|
+
|
|
|
walk_shadow_page_lockless_begin(vcpu);
|
|
|
for_each_shadow_entry_lockless(vcpu, addr, iterator, spte)
|
|
|
if (!is_shadow_present_pte(spte))
|
|
@@ -4513,6 +4519,9 @@ int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
|
|
|
u64 spte;
|
|
|
int nr_sptes = 0;
|
|
|
|
|
|
+ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
|
|
|
+ return nr_sptes;
|
|
|
+
|
|
|
walk_shadow_page_lockless_begin(vcpu);
|
|
|
for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
|
|
|
sptes[iterator.level-1] = spte;
|