|
|
@@ -1982,6 +1982,65 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Handle write page faults for pages that can be reused in the current vma
|
|
|
+ *
|
|
|
+ * This can happen either due to the mapping being with the VM_SHARED flag,
|
|
|
+ * or due to us being the last reference standing to the page. In either
|
|
|
+ * case, all we need to do here is to mark the page as writable and update
|
|
|
+ * any related book-keeping.
|
|
|
+ */
|
|
|
+static inline int wp_page_reuse(struct mm_struct *mm,
|
|
|
+ struct vm_area_struct *vma, unsigned long address,
|
|
|
+ pte_t *page_table, spinlock_t *ptl, pte_t orig_pte,
|
|
|
+ struct page *page, int page_mkwrite,
|
|
|
+ int dirty_shared)
|
|
|
+ __releases(ptl)
|
|
|
+{
|
|
|
+ pte_t entry;
|
|
|
+ /*
|
|
|
+ * Clear the pages cpupid information as the existing
|
|
|
+ * information potentially belongs to a now completely
|
|
|
+ * unrelated process.
|
|
|
+ */
|
|
|
+ if (page)
|
|
|
+ page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1);
|
|
|
+
|
|
|
+ flush_cache_page(vma, address, pte_pfn(orig_pte));
|
|
|
+ entry = pte_mkyoung(orig_pte);
|
|
|
+ entry = maybe_mkwrite(pte_mkdirty(entry), vma);
|
|
|
+ if (ptep_set_access_flags(vma, address, page_table, entry, 1))
|
|
|
+ update_mmu_cache(vma, address, page_table);
|
|
|
+ pte_unmap_unlock(page_table, ptl);
|
|
|
+
|
|
|
+ if (dirty_shared) {
|
|
|
+ struct address_space *mapping;
|
|
|
+ int dirtied;
|
|
|
+
|
|
|
+ if (!page_mkwrite)
|
|
|
+ lock_page(page);
|
|
|
+
|
|
|
+ dirtied = set_page_dirty(page);
|
|
|
+ VM_BUG_ON_PAGE(PageAnon(page), page);
|
|
|
+ mapping = page->mapping;
|
|
|
+ unlock_page(page);
|
|
|
+ page_cache_release(page);
|
|
|
+
|
|
|
+ if ((dirtied || page_mkwrite) && mapping) {
|
|
|
+ /*
|
|
|
+ * Some device drivers do not set page.mapping
|
|
|
+ * but still dirty their pages
|
|
|
+ */
|
|
|
+ balance_dirty_pages_ratelimited(mapping);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (!page_mkwrite)
|
|
|
+ file_update_time(vma->vm_file);
|
|
|
+ }
|
|
|
+
|
|
|
+ return VM_FAULT_WRITE;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* This routine handles present pages, when users try to write
|
|
|
* to a shared page. It is done by copying the page to a new address
|
|
|
@@ -2008,8 +2067,6 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
struct page *old_page, *new_page = NULL;
|
|
|
pte_t entry;
|
|
|
int ret = 0;
|
|
|
- int page_mkwrite = 0;
|
|
|
- bool dirty_shared = false;
|
|
|
unsigned long mmun_start = 0; /* For mmu_notifiers */
|
|
|
unsigned long mmun_end = 0; /* For mmu_notifiers */
|
|
|
struct mem_cgroup *memcg;
|
|
|
@@ -2026,7 +2083,8 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
*/
|
|
|
if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
|
|
|
(VM_WRITE|VM_SHARED))
|
|
|
- goto reuse;
|
|
|
+ return wp_page_reuse(mm, vma, address, page_table, ptl,
|
|
|
+ orig_pte, old_page, 0, 0);
|
|
|
goto gotten;
|
|
|
}
|
|
|
|
|
|
@@ -2055,12 +2113,16 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
*/
|
|
|
page_move_anon_rmap(old_page, vma, address);
|
|
|
unlock_page(old_page);
|
|
|
- goto reuse;
|
|
|
+ return wp_page_reuse(mm, vma, address, page_table, ptl,
|
|
|
+ orig_pte, old_page, 0, 0);
|
|
|
}
|
|
|
unlock_page(old_page);
|
|
|
} else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
|
|
|
(VM_WRITE|VM_SHARED))) {
|
|
|
+ int page_mkwrite = 0;
|
|
|
+
|
|
|
page_cache_get(old_page);
|
|
|
+
|
|
|
/*
|
|
|
* Only catch write-faults on shared writable pages,
|
|
|
* read-only shared pages can get COWed by
|
|
|
@@ -2091,51 +2153,8 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
page_mkwrite = 1;
|
|
|
}
|
|
|
|
|
|
- dirty_shared = true;
|
|
|
-
|
|
|
-reuse:
|
|
|
- /*
|
|
|
- * Clear the pages cpupid information as the existing
|
|
|
- * information potentially belongs to a now completely
|
|
|
- * unrelated process.
|
|
|
- */
|
|
|
- if (old_page)
|
|
|
- page_cpupid_xchg_last(old_page, (1 << LAST_CPUPID_SHIFT) - 1);
|
|
|
-
|
|
|
- flush_cache_page(vma, address, pte_pfn(orig_pte));
|
|
|
- entry = pte_mkyoung(orig_pte);
|
|
|
- entry = maybe_mkwrite(pte_mkdirty(entry), vma);
|
|
|
- if (ptep_set_access_flags(vma, address, page_table, entry,1))
|
|
|
- update_mmu_cache(vma, address, page_table);
|
|
|
- pte_unmap_unlock(page_table, ptl);
|
|
|
- ret |= VM_FAULT_WRITE;
|
|
|
-
|
|
|
- if (dirty_shared) {
|
|
|
- struct address_space *mapping;
|
|
|
- int dirtied;
|
|
|
-
|
|
|
- if (!page_mkwrite)
|
|
|
- lock_page(old_page);
|
|
|
-
|
|
|
- dirtied = set_page_dirty(old_page);
|
|
|
- VM_BUG_ON_PAGE(PageAnon(old_page), old_page);
|
|
|
- mapping = old_page->mapping;
|
|
|
- unlock_page(old_page);
|
|
|
- page_cache_release(old_page);
|
|
|
-
|
|
|
- if ((dirtied || page_mkwrite) && mapping) {
|
|
|
- /*
|
|
|
- * Some device drivers do not set page.mapping
|
|
|
- * but still dirty their pages
|
|
|
- */
|
|
|
- balance_dirty_pages_ratelimited(mapping);
|
|
|
- }
|
|
|
-
|
|
|
- if (!page_mkwrite)
|
|
|
- file_update_time(vma->vm_file);
|
|
|
- }
|
|
|
-
|
|
|
- return ret;
|
|
|
+ return wp_page_reuse(mm, vma, address, page_table, ptl,
|
|
|
+ orig_pte, old_page, page_mkwrite, 1);
|
|
|
}
|
|
|
|
|
|
/*
|