|
|
@@ -2062,6 +2062,41 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Handle dirtying of a page in shared file mapping on a write fault.
|
|
|
+ *
|
|
|
+ * The function expects the page to be locked and unlocks it.
|
|
|
+ */
|
|
|
+static void fault_dirty_shared_page(struct vm_area_struct *vma,
|
|
|
+ struct page *page)
|
|
|
+{
|
|
|
+ struct address_space *mapping;
|
|
|
+ bool dirtied;
|
|
|
+ bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
|
|
|
+
|
|
|
+ dirtied = set_page_dirty(page);
|
|
|
+ VM_BUG_ON_PAGE(PageAnon(page), page);
|
|
|
+ /*
|
|
|
+ * Take a local copy of the address_space - page.mapping may be zeroed
|
|
|
+ * by truncate after unlock_page(). The address_space itself remains
|
|
|
+ * pinned by vma->vm_file's reference. We rely on unlock_page()'s
|
|
|
+ * release semantics to prevent the compiler from undoing this copying.
|
|
|
+ */
|
|
|
+ mapping = page_rmapping(page);
|
|
|
+ unlock_page(page);
|
|
|
+
|
|
|
+ if ((dirtied || page_mkwrite) && mapping) {
|
|
|
+ /*
|
|
|
+ * Some device drivers do not set page.mapping
|
|
|
+ * but still dirty their pages
|
|
|
+ */
|
|
|
+ balance_dirty_pages_ratelimited(mapping);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (!page_mkwrite)
|
|
|
+ file_update_time(vma->vm_file);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Handle write page faults for pages that can be reused in the current vma
|
|
|
*
|
|
|
@@ -2092,28 +2127,11 @@ static inline int wp_page_reuse(struct vm_fault *vmf, struct page *page,
|
|
|
pte_unmap_unlock(vmf->pte, vmf->ptl);
|
|
|
|
|
|
if (dirty_shared) {
|
|
|
- struct address_space *mapping;
|
|
|
- int dirtied;
|
|
|
-
|
|
|
if (!page_mkwrite)
|
|
|
lock_page(page);
|
|
|
|
|
|
- dirtied = set_page_dirty(page);
|
|
|
- VM_BUG_ON_PAGE(PageAnon(page), page);
|
|
|
- mapping = page->mapping;
|
|
|
- unlock_page(page);
|
|
|
+ fault_dirty_shared_page(vma, page);
|
|
|
put_page(page);
|
|
|
-
|
|
|
- if ((dirtied || page_mkwrite) && mapping) {
|
|
|
- /*
|
|
|
- * Some device drivers do not set page.mapping
|
|
|
- * but still dirty their pages
|
|
|
- */
|
|
|
- balance_dirty_pages_ratelimited(mapping);
|
|
|
- }
|
|
|
-
|
|
|
- if (!page_mkwrite)
|
|
|
- file_update_time(vma->vm_file);
|
|
|
}
|
|
|
|
|
|
return VM_FAULT_WRITE;
|
|
|
@@ -3294,8 +3312,6 @@ uncharge_out:
|
|
|
static int do_shared_fault(struct vm_fault *vmf)
|
|
|
{
|
|
|
struct vm_area_struct *vma = vmf->vma;
|
|
|
- struct address_space *mapping;
|
|
|
- int dirtied = 0;
|
|
|
int ret, tmp;
|
|
|
|
|
|
ret = __do_fault(vmf);
|
|
|
@@ -3324,27 +3340,7 @@ static int do_shared_fault(struct vm_fault *vmf)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
- if (set_page_dirty(vmf->page))
|
|
|
- dirtied = 1;
|
|
|
- /*
|
|
|
- * Take a local copy of the address_space - page.mapping may be zeroed
|
|
|
- * by truncate after unlock_page(). The address_space itself remains
|
|
|
- * pinned by vma->vm_file's reference. We rely on unlock_page()'s
|
|
|
- * release semantics to prevent the compiler from undoing this copying.
|
|
|
- */
|
|
|
- mapping = page_rmapping(vmf->page);
|
|
|
- unlock_page(vmf->page);
|
|
|
- if ((dirtied || vma->vm_ops->page_mkwrite) && mapping) {
|
|
|
- /*
|
|
|
- * Some device drivers do not set page.mapping but still
|
|
|
- * dirty their pages
|
|
|
- */
|
|
|
- balance_dirty_pages_ratelimited(mapping);
|
|
|
- }
|
|
|
-
|
|
|
- if (!vma->vm_ops->page_mkwrite)
|
|
|
- file_update_time(vma->vm_file);
|
|
|
-
|
|
|
+ fault_dirty_shared_page(vma, vmf->page);
|
|
|
return ret;
|
|
|
}
|
|
|
|