|
@@ -1965,6 +1965,7 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
|
|
|
vmf.pgoff = page->index;
|
|
|
vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
|
|
|
vmf.page = page;
|
|
|
+ vmf.cow_page = NULL;
|
|
|
|
|
|
ret = vma->vm_ops->page_mkwrite(vma, &vmf);
|
|
|
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
|
|
@@ -2639,7 +2640,8 @@ oom:
|
|
|
* See filemap_fault() and __lock_page_retry().
|
|
|
*/
|
|
|
static int __do_fault(struct vm_area_struct *vma, unsigned long address,
|
|
|
- pgoff_t pgoff, unsigned int flags, struct page **page)
|
|
|
+ pgoff_t pgoff, unsigned int flags,
|
|
|
+ struct page *cow_page, struct page **page)
|
|
|
{
|
|
|
struct vm_fault vmf;
|
|
|
int ret;
|
|
@@ -2648,10 +2650,13 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address,
|
|
|
vmf.pgoff = pgoff;
|
|
|
vmf.flags = flags;
|
|
|
vmf.page = NULL;
|
|
|
+ vmf.cow_page = cow_page;
|
|
|
|
|
|
ret = vma->vm_ops->fault(vma, &vmf);
|
|
|
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
|
|
|
return ret;
|
|
|
+ if (!vmf.page)
|
|
|
+ goto out;
|
|
|
|
|
|
if (unlikely(PageHWPoison(vmf.page))) {
|
|
|
if (ret & VM_FAULT_LOCKED)
|
|
@@ -2665,6 +2670,7 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address,
|
|
|
else
|
|
|
VM_BUG_ON_PAGE(!PageLocked(vmf.page), vmf.page);
|
|
|
|
|
|
+ out:
|
|
|
*page = vmf.page;
|
|
|
return ret;
|
|
|
}
|
|
@@ -2835,7 +2841,7 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
pte_unmap_unlock(pte, ptl);
|
|
|
}
|
|
|
|
|
|
- ret = __do_fault(vma, address, pgoff, flags, &fault_page);
|
|
|
+ ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page);
|
|
|
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
|
|
|
return ret;
|
|
|
|
|
@@ -2875,26 +2881,43 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
return VM_FAULT_OOM;
|
|
|
}
|
|
|
|
|
|
- ret = __do_fault(vma, address, pgoff, flags, &fault_page);
|
|
|
+ ret = __do_fault(vma, address, pgoff, flags, new_page, &fault_page);
|
|
|
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
|
|
|
goto uncharge_out;
|
|
|
|
|
|
- copy_user_highpage(new_page, fault_page, address, vma);
|
|
|
+ if (fault_page)
|
|
|
+ copy_user_highpage(new_page, fault_page, address, vma);
|
|
|
__SetPageUptodate(new_page);
|
|
|
|
|
|
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
|
|
|
if (unlikely(!pte_same(*pte, orig_pte))) {
|
|
|
pte_unmap_unlock(pte, ptl);
|
|
|
- unlock_page(fault_page);
|
|
|
- page_cache_release(fault_page);
|
|
|
+ if (fault_page) {
|
|
|
+ unlock_page(fault_page);
|
|
|
+ page_cache_release(fault_page);
|
|
|
+ } else {
|
|
|
+ /*
|
|
|
+ * The fault handler has no page to lock, so it holds
|
|
|
+ * i_mmap_lock for read to protect against truncate.
|
|
|
+ */
|
|
|
+ i_mmap_unlock_read(vma->vm_file->f_mapping);
|
|
|
+ }
|
|
|
goto uncharge_out;
|
|
|
}
|
|
|
do_set_pte(vma, address, new_page, pte, true, true);
|
|
|
mem_cgroup_commit_charge(new_page, memcg, false);
|
|
|
lru_cache_add_active_or_unevictable(new_page, vma);
|
|
|
pte_unmap_unlock(pte, ptl);
|
|
|
- unlock_page(fault_page);
|
|
|
- page_cache_release(fault_page);
|
|
|
+ if (fault_page) {
|
|
|
+ unlock_page(fault_page);
|
|
|
+ page_cache_release(fault_page);
|
|
|
+ } else {
|
|
|
+ /*
|
|
|
+ * The fault handler has no page to lock, so it holds
|
|
|
+ * i_mmap_lock for read to protect against truncate.
|
|
|
+ */
|
|
|
+ i_mmap_unlock_read(vma->vm_file->f_mapping);
|
|
|
+ }
|
|
|
return ret;
|
|
|
uncharge_out:
|
|
|
mem_cgroup_cancel_charge(new_page, memcg);
|
|
@@ -2913,7 +2936,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
int dirtied = 0;
|
|
|
int ret, tmp;
|
|
|
|
|
|
- ret = __do_fault(vma, address, pgoff, flags, &fault_page);
|
|
|
+ ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page);
|
|
|
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
|
|
|
return ret;
|
|
|
|