|
@@ -104,11 +104,13 @@ static pte_t move_soft_dirty_pte(pte_t pte)
|
|
static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
|
|
static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
|
|
unsigned long old_addr, unsigned long old_end,
|
|
unsigned long old_addr, unsigned long old_end,
|
|
struct vm_area_struct *new_vma, pmd_t *new_pmd,
|
|
struct vm_area_struct *new_vma, pmd_t *new_pmd,
|
|
- unsigned long new_addr, bool need_rmap_locks)
|
|
|
|
|
|
+ unsigned long new_addr, bool need_rmap_locks, bool *need_flush)
|
|
{
|
|
{
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
pte_t *old_pte, *new_pte, pte;
|
|
pte_t *old_pte, *new_pte, pte;
|
|
spinlock_t *old_ptl, *new_ptl;
|
|
spinlock_t *old_ptl, *new_ptl;
|
|
|
|
+ bool force_flush = false;
|
|
|
|
+ unsigned long len = old_end - old_addr;
|
|
|
|
|
|
/*
|
|
/*
|
|
* When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma
|
|
* When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma
|
|
@@ -146,6 +148,14 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
|
|
new_pte++, new_addr += PAGE_SIZE) {
|
|
new_pte++, new_addr += PAGE_SIZE) {
|
|
if (pte_none(*old_pte))
|
|
if (pte_none(*old_pte))
|
|
continue;
|
|
continue;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * We are remapping a dirty PTE, make sure to
|
|
|
|
+ * flush TLB before we drop the PTL for the
|
|
|
|
+ * old PTE or we may race with page_mkclean().
|
|
|
|
+ */
|
|
|
|
+ if (pte_present(*old_pte) && pte_dirty(*old_pte))
|
|
|
|
+ force_flush = true;
|
|
pte = ptep_get_and_clear(mm, old_addr, old_pte);
|
|
pte = ptep_get_and_clear(mm, old_addr, old_pte);
|
|
pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
|
|
pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
|
|
pte = move_soft_dirty_pte(pte);
|
|
pte = move_soft_dirty_pte(pte);
|
|
@@ -156,6 +166,10 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
|
|
if (new_ptl != old_ptl)
|
|
if (new_ptl != old_ptl)
|
|
spin_unlock(new_ptl);
|
|
spin_unlock(new_ptl);
|
|
pte_unmap(new_pte - 1);
|
|
pte_unmap(new_pte - 1);
|
|
|
|
+ if (force_flush)
|
|
|
|
+ flush_tlb_range(vma, old_end - len, old_end);
|
|
|
|
+ else
|
|
|
|
+ *need_flush = true;
|
|
pte_unmap_unlock(old_pte - 1, old_ptl);
|
|
pte_unmap_unlock(old_pte - 1, old_ptl);
|
|
if (need_rmap_locks)
|
|
if (need_rmap_locks)
|
|
drop_rmap_locks(vma);
|
|
drop_rmap_locks(vma);
|
|
@@ -201,13 +215,12 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
|
|
if (need_rmap_locks)
|
|
if (need_rmap_locks)
|
|
take_rmap_locks(vma);
|
|
take_rmap_locks(vma);
|
|
moved = move_huge_pmd(vma, old_addr, new_addr,
|
|
moved = move_huge_pmd(vma, old_addr, new_addr,
|
|
- old_end, old_pmd, new_pmd);
|
|
|
|
|
|
+ old_end, old_pmd, new_pmd,
|
|
|
|
+ &need_flush);
|
|
if (need_rmap_locks)
|
|
if (need_rmap_locks)
|
|
drop_rmap_locks(vma);
|
|
drop_rmap_locks(vma);
|
|
- if (moved) {
|
|
|
|
- need_flush = true;
|
|
|
|
|
|
+ if (moved)
|
|
continue;
|
|
continue;
|
|
- }
|
|
|
|
}
|
|
}
|
|
split_huge_pmd(vma, old_pmd, old_addr);
|
|
split_huge_pmd(vma, old_pmd, old_addr);
|
|
if (pmd_trans_unstable(old_pmd))
|
|
if (pmd_trans_unstable(old_pmd))
|
|
@@ -220,11 +233,10 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
|
|
extent = next - new_addr;
|
|
extent = next - new_addr;
|
|
if (extent > LATENCY_LIMIT)
|
|
if (extent > LATENCY_LIMIT)
|
|
extent = LATENCY_LIMIT;
|
|
extent = LATENCY_LIMIT;
|
|
- move_ptes(vma, old_pmd, old_addr, old_addr + extent,
|
|
|
|
- new_vma, new_pmd, new_addr, need_rmap_locks);
|
|
|
|
- need_flush = true;
|
|
|
|
|
|
+ move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma,
|
|
|
|
+ new_pmd, new_addr, need_rmap_locks, &need_flush);
|
|
}
|
|
}
|
|
- if (likely(need_flush))
|
|
|
|
|
|
+ if (need_flush)
|
|
flush_tlb_range(vma, old_end-len, old_addr);
|
|
flush_tlb_range(vma, old_end-len, old_addr);
|
|
|
|
|
|
mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
|
|
mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
|