|
@@ -2639,13 +2639,6 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
|
|
|
if (vma->vm_start >= end)
|
|
|
return 0;
|
|
|
|
|
|
- if (uf) {
|
|
|
- int error = userfaultfd_unmap_prep(vma, start, end, uf);
|
|
|
-
|
|
|
- if (error)
|
|
|
- return error;
|
|
|
- }
|
|
|
-
|
|
|
/*
|
|
|
* If we need to split any vma, do it now to save pain later.
|
|
|
*
|
|
@@ -2679,6 +2672,21 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
|
|
|
}
|
|
|
vma = prev ? prev->vm_next : mm->mmap;
|
|
|
|
|
|
+ if (unlikely(uf)) {
|
|
|
+ /*
|
|
|
+ * If userfaultfd_unmap_prep returns an error the vmas
|
|
|
+ * will remain splitted, but userland will get a
|
|
|
+ * highly unexpected error anyway. This is no
|
|
|
+ * different than the case where the first of the two
|
|
|
+ * __split_vma fails, but we don't undo the first
|
|
|
+ * split, despite we could. This is unlikely enough
|
|
|
+ * failure that it's not worth optimizing it for.
|
|
|
+ */
|
|
|
+ int error = userfaultfd_unmap_prep(vma, start, end, uf);
|
|
|
+ if (error)
|
|
|
+ return error;
|
|
|
+ }
|
|
|
+
|
|
|
/*
|
|
|
* unlock any mlock()ed ranges before detaching vmas
|
|
|
*/
|