|
@@ -292,11 +292,18 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
|
|
|
phys_addr_t addr = start, end = start + size;
|
|
|
phys_addr_t next;
|
|
|
|
|
|
+ assert_spin_locked(&kvm->mmu_lock);
|
|
|
pgd = kvm->arch.pgd + stage2_pgd_index(addr);
|
|
|
do {
|
|
|
next = stage2_pgd_addr_end(addr, end);
|
|
|
if (!stage2_pgd_none(*pgd))
|
|
|
unmap_stage2_puds(kvm, pgd, addr, next);
|
|
|
+ /*
|
|
|
+ * If the range is too large, release the kvm->mmu_lock
|
|
|
+ * to prevent starvation and lockup detector warnings.
|
|
|
+ */
|
|
|
+ if (next != end)
|
|
|
+ cond_resched_lock(&kvm->mmu_lock);
|
|
|
} while (pgd++, addr = next, addr != end);
|
|
|
}
|
|
|
|
|
@@ -803,6 +810,7 @@ void stage2_unmap_vm(struct kvm *kvm)
|
|
|
int idx;
|
|
|
|
|
|
idx = srcu_read_lock(&kvm->srcu);
|
|
|
+ down_read(¤t->mm->mmap_sem);
|
|
|
spin_lock(&kvm->mmu_lock);
|
|
|
|
|
|
slots = kvm_memslots(kvm);
|
|
@@ -810,6 +818,7 @@ void stage2_unmap_vm(struct kvm *kvm)
|
|
|
stage2_unmap_memslot(kvm, memslot);
|
|
|
|
|
|
spin_unlock(&kvm->mmu_lock);
|
|
|
+ up_read(¤t->mm->mmap_sem);
|
|
|
srcu_read_unlock(&kvm->srcu, idx);
|
|
|
}
|
|
|
|
|
@@ -829,7 +838,10 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
|
|
|
if (kvm->arch.pgd == NULL)
|
|
|
return;
|
|
|
|
|
|
+ spin_lock(&kvm->mmu_lock);
|
|
|
unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
|
|
|
+ spin_unlock(&kvm->mmu_lock);
|
|
|
+
|
|
|
/* Free the HW pgd, one page at a time */
|
|
|
free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE);
|
|
|
kvm->arch.pgd = NULL;
|
|
@@ -1801,6 +1813,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
|
|
(KVM_PHYS_SIZE >> PAGE_SHIFT))
|
|
|
return -EFAULT;
|
|
|
|
|
|
+ down_read(¤t->mm->mmap_sem);
|
|
|
/*
|
|
|
* A memory region could potentially cover multiple VMAs, and any holes
|
|
|
* between them, so iterate over all of them to find out if we can map
|
|
@@ -1844,8 +1857,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
|
|
pa += vm_start - vma->vm_start;
|
|
|
|
|
|
/* IO region dirty page logging not allowed */
|
|
|
- if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES)
|
|
|
- return -EINVAL;
|
|
|
+ if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
|
|
|
+ ret = -EINVAL;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
|
|
|
ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
|
|
|
vm_end - vm_start,
|
|
@@ -1857,7 +1872,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
|
|
} while (hva < reg_end);
|
|
|
|
|
|
if (change == KVM_MR_FLAGS_ONLY)
|
|
|
- return ret;
|
|
|
+ goto out;
|
|
|
|
|
|
spin_lock(&kvm->mmu_lock);
|
|
|
if (ret)
|
|
@@ -1865,6 +1880,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
|
|
else
|
|
|
stage2_flush_memslot(kvm, memslot);
|
|
|
spin_unlock(&kvm->mmu_lock);
|
|
|
+out:
|
|
|
+ up_read(¤t->mm->mmap_sem);
|
|
|
return ret;
|
|
|
}
|
|
|
|