|
@@ -2877,7 +2877,7 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address,
|
|
|
* vm_ops->map_pages.
|
|
|
*/
|
|
|
void do_set_pte(struct vm_area_struct *vma, unsigned long address,
|
|
|
- struct page *page, pte_t *pte, bool write, bool anon, bool old)
|
|
|
+ struct page *page, pte_t *pte, bool write, bool anon)
|
|
|
{
|
|
|
pte_t entry;
|
|
|
|
|
@@ -2885,8 +2885,6 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address,
|
|
|
entry = mk_pte(page, vma->vm_page_prot);
|
|
|
if (write)
|
|
|
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
|
|
|
- if (old)
|
|
|
- entry = pte_mkold(entry);
|
|
|
if (anon) {
|
|
|
inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
|
|
|
page_add_new_anon_rmap(page, vma, address, false);
|
|
@@ -3032,20 +3030,9 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
*/
|
|
|
if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
|
|
|
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
|
|
|
- if (!pte_same(*pte, orig_pte))
|
|
|
- goto unlock_out;
|
|
|
do_fault_around(vma, address, pte, pgoff, flags);
|
|
|
- /* Check if the fault is handled by faultaround */
|
|
|
- if (!pte_same(*pte, orig_pte)) {
|
|
|
- /*
|
|
|
- * Faultaround produce old pte, but the pte we've
|
|
|
- * handler fault for should be young.
|
|
|
- */
|
|
|
- pte_t entry = pte_mkyoung(*pte);
|
|
|
- if (ptep_set_access_flags(vma, address, pte, entry, 0))
|
|
|
- update_mmu_cache(vma, address, pte);
|
|
|
+ if (!pte_same(*pte, orig_pte))
|
|
|
goto unlock_out;
|
|
|
- }
|
|
|
pte_unmap_unlock(pte, ptl);
|
|
|
}
|
|
|
|
|
@@ -3060,7 +3047,7 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
put_page(fault_page);
|
|
|
return ret;
|
|
|
}
|
|
|
- do_set_pte(vma, address, fault_page, pte, false, false, false);
|
|
|
+ do_set_pte(vma, address, fault_page, pte, false, false);
|
|
|
unlock_page(fault_page);
|
|
|
unlock_out:
|
|
|
pte_unmap_unlock(pte, ptl);
|
|
@@ -3111,7 +3098,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
}
|
|
|
goto uncharge_out;
|
|
|
}
|
|
|
- do_set_pte(vma, address, new_page, pte, true, true, false);
|
|
|
+ do_set_pte(vma, address, new_page, pte, true, true);
|
|
|
mem_cgroup_commit_charge(new_page, memcg, false, false);
|
|
|
lru_cache_add_active_or_unevictable(new_page, vma);
|
|
|
pte_unmap_unlock(pte, ptl);
|
|
@@ -3164,7 +3151,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
put_page(fault_page);
|
|
|
return ret;
|
|
|
}
|
|
|
- do_set_pte(vma, address, fault_page, pte, true, false, false);
|
|
|
+ do_set_pte(vma, address, fault_page, pte, true, false);
|
|
|
pte_unmap_unlock(pte, ptl);
|
|
|
|
|
|
if (set_page_dirty(fault_page))
|