|
|
@@ -292,11 +292,18 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
|
|
|
phys_addr_t addr = start, end = start + size;
|
|
|
phys_addr_t next;
|
|
|
|
|
|
+ assert_spin_locked(&kvm->mmu_lock);
|
|
|
pgd = kvm->arch.pgd + stage2_pgd_index(addr);
|
|
|
do {
|
|
|
next = stage2_pgd_addr_end(addr, end);
|
|
|
if (!stage2_pgd_none(*pgd))
|
|
|
unmap_stage2_puds(kvm, pgd, addr, next);
|
|
|
+ /*
|
|
|
+ * If the range is too large, release the kvm->mmu_lock
|
|
|
+ * to prevent starvation and lockup detector warnings.
|
|
|
+ */
|
|
|
+ if (next != end)
|
|
|
+ cond_resched_lock(&kvm->mmu_lock);
|
|
|
} while (pgd++, addr = next, addr != end);
|
|
|
}
|
|
|
|
|
|
@@ -831,7 +838,10 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
|
|
|
if (kvm->arch.pgd == NULL)
|
|
|
return;
|
|
|
|
|
|
+ spin_lock(&kvm->mmu_lock);
|
|
|
unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
|
|
|
+ spin_unlock(&kvm->mmu_lock);
|
|
|
+
|
|
|
/* Free the HW pgd, one page at a time */
|
|
|
free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE);
|
|
|
kvm->arch.pgd = NULL;
|