|
@@ -2269,6 +2269,38 @@ oom:
|
|
|
return VM_FAULT_OOM;
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE
|
|
|
+ * writeable once the page is prepared
|
|
|
+ *
|
|
|
+ * @vmf: structure describing the fault
|
|
|
+ *
|
|
|
+ * This function handles all that is needed to finish a write page fault in a
|
|
|
+ * shared mapping due to PTE being read-only once the mapped page is prepared.
|
|
|
+ * It handles locking of PTE and modifying it. The function returns
|
|
|
+ * VM_FAULT_WRITE on success, 0 when PTE got changed before we acquired PTE
|
|
|
+ * lock.
|
|
|
+ *
|
|
|
+ * The function expects the page to be locked or other protection against
|
|
|
+ * concurrent faults / writeback (such as DAX radix tree locks).
|
|
|
+ */
|
|
|
+int finish_mkwrite_fault(struct vm_fault *vmf)
|
|
|
+{
|
|
|
+ WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
|
|
|
+ vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
|
|
|
+ &vmf->ptl);
|
|
|
+ /*
|
|
|
+ * We might have raced with another page fault while we released the
|
|
|
+ * pte_offset_map_lock.
|
|
|
+ */
|
|
|
+ if (!pte_same(*vmf->pte, vmf->orig_pte)) {
|
|
|
+ pte_unmap_unlock(vmf->pte, vmf->ptl);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+ wp_page_reuse(vmf);
|
|
|
+ return VM_FAULT_WRITE;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
|
|
|
* mapping
|
|
@@ -2285,16 +2317,7 @@ static int wp_pfn_shared(struct vm_fault *vmf)
|
|
|
ret = vma->vm_ops->pfn_mkwrite(vma, vmf);
|
|
|
if (ret & VM_FAULT_ERROR)
|
|
|
return ret;
|
|
|
- vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
|
|
|
- vmf->address, &vmf->ptl);
|
|
|
- /*
|
|
|
- * We might have raced with another page fault while we
|
|
|
- * released the pte_offset_map_lock.
|
|
|
- */
|
|
|
- if (!pte_same(*vmf->pte, vmf->orig_pte)) {
|
|
|
- pte_unmap_unlock(vmf->pte, vmf->ptl);
|
|
|
- return 0;
|
|
|
- }
|
|
|
+ return finish_mkwrite_fault(vmf);
|
|
|
}
|
|
|
wp_page_reuse(vmf);
|
|
|
return VM_FAULT_WRITE;
|
|
@@ -2304,7 +2327,6 @@ static int wp_page_shared(struct vm_fault *vmf)
|
|
|
__releases(vmf->ptl)
|
|
|
{
|
|
|
struct vm_area_struct *vma = vmf->vma;
|
|
|
- int page_mkwrite = 0;
|
|
|
|
|
|
get_page(vmf->page);
|
|
|
|
|
@@ -2318,26 +2340,17 @@ static int wp_page_shared(struct vm_fault *vmf)
|
|
|
put_page(vmf->page);
|
|
|
return tmp;
|
|
|
}
|
|
|
- /*
|
|
|
- * Since we dropped the lock we need to revalidate
|
|
|
- * the PTE as someone else may have changed it. If
|
|
|
- * they did, we just return, as we can count on the
|
|
|
- * MMU to tell us if they didn't also make it writable.
|
|
|
- */
|
|
|
- vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
|
|
|
- vmf->address, &vmf->ptl);
|
|
|
- if (!pte_same(*vmf->pte, vmf->orig_pte)) {
|
|
|
+ tmp = finish_mkwrite_fault(vmf);
|
|
|
+ if (unlikely(!tmp || (tmp &
|
|
|
+ (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
|
|
|
unlock_page(vmf->page);
|
|
|
- pte_unmap_unlock(vmf->pte, vmf->ptl);
|
|
|
put_page(vmf->page);
|
|
|
- return 0;
|
|
|
+ return tmp;
|
|
|
}
|
|
|
- page_mkwrite = 1;
|
|
|
- }
|
|
|
-
|
|
|
- wp_page_reuse(vmf);
|
|
|
- if (!page_mkwrite)
|
|
|
+ } else {
|
|
|
+ wp_page_reuse(vmf);
|
|
|
lock_page(vmf->page);
|
|
|
+ }
|
|
|
fault_dirty_shared_page(vma, vmf->page);
|
|
|
put_page(vmf->page);
|
|
|
|