|
@@ -1899,12 +1899,11 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
|
|
|
EXPORT_SYMBOL_GPL(apply_to_page_range);
|
|
|
|
|
|
/*
|
|
|
- * handle_pte_fault chooses page fault handler according to an entry
|
|
|
- * which was read non-atomically. Before making any commitment, on
|
|
|
- * those architectures or configurations (e.g. i386 with PAE) which
|
|
|
- * might give a mix of unmatched parts, do_swap_page and do_nonlinear_fault
|
|
|
- * must check under lock before unmapping the pte and proceeding
|
|
|
- * (but do_wp_page is only called after already making such a check;
|
|
|
+ * handle_pte_fault chooses page fault handler according to an entry which was
|
|
|
+ * read non-atomically. Before making any commitment, on those architectures
|
|
|
+ * or configurations (e.g. i386 with PAE) which might give a mix of unmatched
|
|
|
+ * parts, do_swap_page must check under lock before unmapping the pte and
|
|
|
+ * proceeding (but do_wp_page is only called after already making such a check;
|
|
|
* and do_anonymous_page can safely check later on).
|
|
|
*/
|
|
|
static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
|
|
@@ -2710,8 +2709,6 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address,
|
|
|
entry = mk_pte(page, vma->vm_page_prot);
|
|
|
if (write)
|
|
|
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
|
|
|
- else if (pte_file(*pte) && pte_file_soft_dirty(*pte))
|
|
|
- entry = pte_mksoft_dirty(entry);
|
|
|
if (anon) {
|
|
|
inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
|
|
|
page_add_new_anon_rmap(page, vma, address);
|
|
@@ -2846,8 +2843,7 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
* if page by the offset is not ready to be mapped (cold cache or
|
|
|
* something).
|
|
|
*/
|
|
|
- if (vma->vm_ops->map_pages && !(flags & FAULT_FLAG_NONLINEAR) &&
|
|
|
- fault_around_bytes >> PAGE_SHIFT > 1) {
|
|
|
+ if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
|
|
|
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
|
|
|
do_fault_around(vma, address, pte, pgoff, flags);
|
|
|
if (!pte_same(*pte, orig_pte))
|
|
@@ -2992,7 +2988,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
* The mmap_sem may have been released depending on flags and our
|
|
|
* return value. See filemap_fault() and __lock_page_or_retry().
|
|
|
*/
|
|
|
-static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
+static int do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
unsigned long address, pte_t *page_table, pmd_t *pmd,
|
|
|
unsigned int flags, pte_t orig_pte)
|
|
|
{
|
|
@@ -3009,46 +3005,6 @@ static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
return do_shared_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Fault of a previously existing named mapping. Repopulate the pte
|
|
|
- * from the encoded file_pte if possible. This enables swappable
|
|
|
- * nonlinear vmas.
|
|
|
- *
|
|
|
- * We enter with non-exclusive mmap_sem (to exclude vma changes,
|
|
|
- * but allow concurrent faults), and pte mapped but not yet locked.
|
|
|
- * We return with pte unmapped and unlocked.
|
|
|
- * The mmap_sem may have been released depending on flags and our
|
|
|
- * return value. See filemap_fault() and __lock_page_or_retry().
|
|
|
- */
|
|
|
-static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
- unsigned long address, pte_t *page_table, pmd_t *pmd,
|
|
|
- unsigned int flags, pte_t orig_pte)
|
|
|
-{
|
|
|
- pgoff_t pgoff;
|
|
|
-
|
|
|
- flags |= FAULT_FLAG_NONLINEAR;
|
|
|
-
|
|
|
- if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
|
|
|
- return 0;
|
|
|
-
|
|
|
- if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) {
|
|
|
- /*
|
|
|
- * Page table corrupted: show pte and kill process.
|
|
|
- */
|
|
|
- print_bad_pte(vma, address, orig_pte, NULL);
|
|
|
- return VM_FAULT_SIGBUS;
|
|
|
- }
|
|
|
-
|
|
|
- pgoff = pte_to_pgoff(orig_pte);
|
|
|
- if (!(flags & FAULT_FLAG_WRITE))
|
|
|
- return do_read_fault(mm, vma, address, pmd, pgoff, flags,
|
|
|
- orig_pte);
|
|
|
- if (!(vma->vm_flags & VM_SHARED))
|
|
|
- return do_cow_fault(mm, vma, address, pmd, pgoff, flags,
|
|
|
- orig_pte);
|
|
|
- return do_shared_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
|
|
|
-}
|
|
|
-
|
|
|
static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
|
|
|
unsigned long addr, int page_nid,
|
|
|
int *flags)
|
|
@@ -3176,15 +3132,12 @@ static int handle_pte_fault(struct mm_struct *mm,
|
|
|
if (pte_none(entry)) {
|
|
|
if (vma->vm_ops) {
|
|
|
if (likely(vma->vm_ops->fault))
|
|
|
- return do_linear_fault(mm, vma, address,
|
|
|
- pte, pmd, flags, entry);
|
|
|
+ return do_fault(mm, vma, address, pte,
|
|
|
+ pmd, flags, entry);
|
|
|
}
|
|
|
return do_anonymous_page(mm, vma, address,
|
|
|
pte, pmd, flags);
|
|
|
}
|
|
|
- if (pte_file(entry))
|
|
|
- return do_nonlinear_fault(mm, vma, address,
|
|
|
- pte, pmd, flags, entry);
|
|
|
return do_swap_page(mm, vma, address,
|
|
|
pte, pmd, flags, entry);
|
|
|
}
|