|
@@ -115,7 +115,7 @@ static pte_t move_soft_dirty_pte(pte_t pte)
|
|
|
static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
|
|
static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
|
|
|
unsigned long old_addr, unsigned long old_end,
|
|
unsigned long old_addr, unsigned long old_end,
|
|
|
struct vm_area_struct *new_vma, pmd_t *new_pmd,
|
|
struct vm_area_struct *new_vma, pmd_t *new_pmd,
|
|
|
- unsigned long new_addr, bool need_rmap_locks, bool *need_flush)
|
|
|
|
|
|
|
+ unsigned long new_addr, bool need_rmap_locks)
|
|
|
{
|
|
{
|
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
|
pte_t *old_pte, *new_pte, pte;
|
|
pte_t *old_pte, *new_pte, pte;
|
|
@@ -163,15 +163,17 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
|
|
|
|
|
|
|
|
pte = ptep_get_and_clear(mm, old_addr, old_pte);
|
|
pte = ptep_get_and_clear(mm, old_addr, old_pte);
|
|
|
/*
|
|
/*
|
|
|
- * If we are remapping a dirty PTE, make sure
|
|
|
|
|
|
|
+ * If we are remapping a valid PTE, make sure
|
|
|
* to flush TLB before we drop the PTL for the
|
|
* to flush TLB before we drop the PTL for the
|
|
|
- * old PTE or we may race with page_mkclean().
|
|
|
|
|
|
|
+ * PTE.
|
|
|
*
|
|
*
|
|
|
- * This check has to be done after we removed the
|
|
|
|
|
- * old PTE from page tables or another thread may
|
|
|
|
|
- * dirty it after the check and before the removal.
|
|
|
|
|
|
|
+ * NOTE! Both old and new PTL matter: the old one
|
|
|
|
|
+ * for racing with page_mkclean(), the new one to
|
|
|
|
|
+ * make sure the physical page stays valid until
|
|
|
|
|
+ * the TLB entry for the old mapping has been
|
|
|
|
|
+ * flushed.
|
|
|
*/
|
|
*/
|
|
|
- if (pte_present(pte) && pte_dirty(pte))
|
|
|
|
|
|
|
+ if (pte_present(pte))
|
|
|
force_flush = true;
|
|
force_flush = true;
|
|
|
pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
|
|
pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
|
|
|
pte = move_soft_dirty_pte(pte);
|
|
pte = move_soft_dirty_pte(pte);
|
|
@@ -179,13 +181,11 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
arch_leave_lazy_mmu_mode();
|
|
arch_leave_lazy_mmu_mode();
|
|
|
|
|
+ if (force_flush)
|
|
|
|
|
+ flush_tlb_range(vma, old_end - len, old_end);
|
|
|
if (new_ptl != old_ptl)
|
|
if (new_ptl != old_ptl)
|
|
|
spin_unlock(new_ptl);
|
|
spin_unlock(new_ptl);
|
|
|
pte_unmap(new_pte - 1);
|
|
pte_unmap(new_pte - 1);
|
|
|
- if (force_flush)
|
|
|
|
|
- flush_tlb_range(vma, old_end - len, old_end);
|
|
|
|
|
- else
|
|
|
|
|
- *need_flush = true;
|
|
|
|
|
pte_unmap_unlock(old_pte - 1, old_ptl);
|
|
pte_unmap_unlock(old_pte - 1, old_ptl);
|
|
|
if (need_rmap_locks)
|
|
if (need_rmap_locks)
|
|
|
drop_rmap_locks(vma);
|
|
drop_rmap_locks(vma);
|
|
@@ -198,7 +198,6 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
|
|
|
{
|
|
{
|
|
|
unsigned long extent, next, old_end;
|
|
unsigned long extent, next, old_end;
|
|
|
pmd_t *old_pmd, *new_pmd;
|
|
pmd_t *old_pmd, *new_pmd;
|
|
|
- bool need_flush = false;
|
|
|
|
|
unsigned long mmun_start; /* For mmu_notifiers */
|
|
unsigned long mmun_start; /* For mmu_notifiers */
|
|
|
unsigned long mmun_end; /* For mmu_notifiers */
|
|
unsigned long mmun_end; /* For mmu_notifiers */
|
|
|
|
|
|
|
@@ -229,8 +228,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
|
|
|
if (need_rmap_locks)
|
|
if (need_rmap_locks)
|
|
|
take_rmap_locks(vma);
|
|
take_rmap_locks(vma);
|
|
|
moved = move_huge_pmd(vma, old_addr, new_addr,
|
|
moved = move_huge_pmd(vma, old_addr, new_addr,
|
|
|
- old_end, old_pmd, new_pmd,
|
|
|
|
|
- &need_flush);
|
|
|
|
|
|
|
+ old_end, old_pmd, new_pmd);
|
|
|
if (need_rmap_locks)
|
|
if (need_rmap_locks)
|
|
|
drop_rmap_locks(vma);
|
|
drop_rmap_locks(vma);
|
|
|
if (moved)
|
|
if (moved)
|
|
@@ -246,10 +244,8 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
|
|
|
if (extent > next - new_addr)
|
|
if (extent > next - new_addr)
|
|
|
extent = next - new_addr;
|
|
extent = next - new_addr;
|
|
|
move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma,
|
|
move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma,
|
|
|
- new_pmd, new_addr, need_rmap_locks, &need_flush);
|
|
|
|
|
|
|
+ new_pmd, new_addr, need_rmap_locks);
|
|
|
}
|
|
}
|
|
|
- if (need_flush)
|
|
|
|
|
- flush_tlb_range(vma, old_end-len, old_addr);
|
|
|
|
|
|
|
|
|
|
mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
|
|
mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
|
|
|
|
|
|