|
@@ -276,6 +276,12 @@ static unsigned long move_vma(struct vm_area_struct *vma,
|
|
|
moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
|
|
|
need_rmap_locks);
|
|
|
if (moved_len < old_len) {
|
|
|
+ err = -ENOMEM;
|
|
|
+ } else if (vma->vm_file && vma->vm_file->f_op->mremap) {
|
|
|
+ err = vma->vm_file->f_op->mremap(vma->vm_file, new_vma);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (unlikely(err)) {
|
|
|
/*
|
|
|
* On error, move entries back from new area to old,
|
|
|
* which will succeed since page tables still there,
|
|
@@ -286,16 +292,8 @@ static unsigned long move_vma(struct vm_area_struct *vma,
|
|
|
vma = new_vma;
|
|
|
old_len = new_len;
|
|
|
old_addr = new_addr;
|
|
|
- new_addr = -ENOMEM;
|
|
|
+ new_addr = err;
|
|
|
} else {
|
|
|
- if (vma->vm_file && vma->vm_file->f_op->mremap) {
|
|
|
- err = vma->vm_file->f_op->mremap(vma->vm_file, new_vma);
|
|
|
- if (err < 0) {
|
|
|
- move_page_tables(new_vma, new_addr, vma,
|
|
|
- old_addr, moved_len, true);
|
|
|
- return err;
|
|
|
- }
|
|
|
- }
|
|
|
arch_remap(mm, old_addr, old_addr + old_len,
|
|
|
new_addr, new_addr + new_len);
|
|
|
}
|