|
@@ -70,6 +70,22 @@ static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
return pmd;
|
|
return pmd;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void take_rmap_locks(struct vm_area_struct *vma)
|
|
|
|
+{
|
|
|
|
+ if (vma->vm_file)
|
|
|
|
+ i_mmap_lock_write(vma->vm_file->f_mapping);
|
|
|
|
+ if (vma->anon_vma)
|
|
|
|
+ anon_vma_lock_write(vma->anon_vma);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void drop_rmap_locks(struct vm_area_struct *vma)
|
|
|
|
+{
|
|
|
|
+ if (vma->anon_vma)
|
|
|
|
+ anon_vma_unlock_write(vma->anon_vma);
|
|
|
|
+ if (vma->vm_file)
|
|
|
|
+ i_mmap_unlock_write(vma->vm_file->f_mapping);
|
|
|
|
+}
|
|
|
|
+
|
|
static pte_t move_soft_dirty_pte(pte_t pte)
|
|
static pte_t move_soft_dirty_pte(pte_t pte)
|
|
{
|
|
{
|
|
/*
|
|
/*
|
|
@@ -90,8 +106,6 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
|
|
struct vm_area_struct *new_vma, pmd_t *new_pmd,
|
|
struct vm_area_struct *new_vma, pmd_t *new_pmd,
|
|
unsigned long new_addr, bool need_rmap_locks)
|
|
unsigned long new_addr, bool need_rmap_locks)
|
|
{
|
|
{
|
|
- struct address_space *mapping = NULL;
|
|
|
|
- struct anon_vma *anon_vma = NULL;
|
|
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
pte_t *old_pte, *new_pte, pte;
|
|
pte_t *old_pte, *new_pte, pte;
|
|
spinlock_t *old_ptl, *new_ptl;
|
|
spinlock_t *old_ptl, *new_ptl;
|
|
@@ -114,16 +128,8 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
|
|
* serialize access to individual ptes, but only rmap traversal
|
|
* serialize access to individual ptes, but only rmap traversal
|
|
* order guarantees that we won't miss both the old and new ptes).
|
|
* order guarantees that we won't miss both the old and new ptes).
|
|
*/
|
|
*/
|
|
- if (need_rmap_locks) {
|
|
|
|
- if (vma->vm_file) {
|
|
|
|
- mapping = vma->vm_file->f_mapping;
|
|
|
|
- i_mmap_lock_write(mapping);
|
|
|
|
- }
|
|
|
|
- if (vma->anon_vma) {
|
|
|
|
- anon_vma = vma->anon_vma;
|
|
|
|
- anon_vma_lock_write(anon_vma);
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
|
|
+ if (need_rmap_locks)
|
|
|
|
+ take_rmap_locks(vma);
|
|
|
|
|
|
/*
|
|
/*
|
|
* We don't have to worry about the ordering of src and dst
|
|
* We don't have to worry about the ordering of src and dst
|
|
@@ -151,10 +157,8 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
|
|
spin_unlock(new_ptl);
|
|
spin_unlock(new_ptl);
|
|
pte_unmap(new_pte - 1);
|
|
pte_unmap(new_pte - 1);
|
|
pte_unmap_unlock(old_pte - 1, old_ptl);
|
|
pte_unmap_unlock(old_pte - 1, old_ptl);
|
|
- if (anon_vma)
|
|
|
|
- anon_vma_unlock_write(anon_vma);
|
|
|
|
- if (mapping)
|
|
|
|
- i_mmap_unlock_write(mapping);
|
|
|
|
|
|
+ if (need_rmap_locks)
|
|
|
|
+ drop_rmap_locks(vma);
|
|
}
|
|
}
|
|
|
|
|
|
#define LATENCY_LIMIT (64 * PAGE_SIZE)
|
|
#define LATENCY_LIMIT (64 * PAGE_SIZE)
|
|
@@ -193,15 +197,13 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
|
|
if (pmd_trans_huge(*old_pmd)) {
|
|
if (pmd_trans_huge(*old_pmd)) {
|
|
if (extent == HPAGE_PMD_SIZE) {
|
|
if (extent == HPAGE_PMD_SIZE) {
|
|
bool moved;
|
|
bool moved;
|
|
- VM_BUG_ON_VMA(vma->vm_file || !vma->anon_vma,
|
|
|
|
- vma);
|
|
|
|
/* See comment in move_ptes() */
|
|
/* See comment in move_ptes() */
|
|
if (need_rmap_locks)
|
|
if (need_rmap_locks)
|
|
- anon_vma_lock_write(vma->anon_vma);
|
|
|
|
|
|
+ take_rmap_locks(vma);
|
|
moved = move_huge_pmd(vma, old_addr, new_addr,
|
|
moved = move_huge_pmd(vma, old_addr, new_addr,
|
|
old_end, old_pmd, new_pmd);
|
|
old_end, old_pmd, new_pmd);
|
|
if (need_rmap_locks)
|
|
if (need_rmap_locks)
|
|
- anon_vma_unlock_write(vma->anon_vma);
|
|
|
|
|
|
+ drop_rmap_locks(vma);
|
|
if (moved) {
|
|
if (moved) {
|
|
need_flush = true;
|
|
need_flush = true;
|
|
continue;
|
|
continue;
|