Browse Source

mm: numa: defer TLB flush for THP migration as long as possible

THP migration can fail for a variety of reasons.  Avoid flushing the TLB
to deal with THP migration races until the copy is ready to start.

Signed-off-by: Mel Gorman <mgorman@suse.de>
Reviewed-by: Rik van Riel <riel@redhat.com>
Cc: Alex Thorlton <athorlton@sgi.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Mel Gorman 11 years ago
parent
commit
b0943d61b8
2 changed files with 3 additions and 7 deletions
  1. 0 7
      mm/huge_memory.c
  2. 3 0
      mm/migrate.c

+ 0 - 7
mm/huge_memory.c

@@ -1376,13 +1376,6 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
 		goto clear_pmdnuma;
 		goto clear_pmdnuma;
 	}
 	}
 
 
-	/*
-	 * The page_table_lock above provides a memory barrier
-	 * with change_protection_range.
-	 */
-	if (mm_tlb_flush_pending(mm))
-		flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE);
-
 	/*
 	/*
 	 * Migrate the THP to the requested node, returns with page unlocked
 	 * Migrate the THP to the requested node, returns with page unlocked
 	 * and pmd_numa cleared.
 	 * and pmd_numa cleared.

+ 3 - 0
mm/migrate.c

@@ -1759,6 +1759,9 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
 		goto out_fail;
 		goto out_fail;
 	}
 	}
 
 
+	if (mm_tlb_flush_pending(mm))
+		flush_tlb_range(vma, mmun_start, mmun_end);
+
 	/* Prepare a page as a migration target */
 	/* Prepare a page as a migration target */
 	__set_page_locked(new_page);
 	__set_page_locked(new_page);
 	SetPageSwapBacked(new_page);
 	SetPageSwapBacked(new_page);