|
@@ -1410,7 +1410,6 @@ int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
|
|
unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
|
|
unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
|
|
int page_nid = -1, this_nid = numa_node_id();
|
|
int page_nid = -1, this_nid = numa_node_id();
|
|
int target_nid, last_cpupid = -1;
|
|
int target_nid, last_cpupid = -1;
|
|
- bool need_flush = false;
|
|
|
|
bool page_locked;
|
|
bool page_locked;
|
|
bool migrated = false;
|
|
bool migrated = false;
|
|
bool was_writable;
|
|
bool was_writable;
|
|
@@ -1496,23 +1495,19 @@ int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
|
|
goto clear_pmdnuma;
|
|
goto clear_pmdnuma;
|
|
}
|
|
}
|
|
|
|
|
|
- /*
|
|
|
|
- * The page_table_lock above provides a memory barrier
|
|
|
|
- * with change_protection_range.
|
|
|
|
- */
|
|
|
|
- if (mm_tlb_flush_pending(vma->vm_mm))
|
|
|
|
- flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE);
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* Since we took the NUMA fault, we must have observed the !accessible
|
|
* Since we took the NUMA fault, we must have observed the !accessible
|
|
* bit. Make sure all other CPUs agree with that, to avoid them
|
|
* bit. Make sure all other CPUs agree with that, to avoid them
|
|
* modifying the page we're about to migrate.
|
|
* modifying the page we're about to migrate.
|
|
*
|
|
*
|
|
* Must be done under PTL such that we'll observe the relevant
|
|
* Must be done under PTL such that we'll observe the relevant
|
|
- * set_tlb_flush_pending().
|
|
|
|
|
|
+ * inc_tlb_flush_pending().
|
|
|
|
+ *
|
|
|
|
+ * We are not sure a pending tlb flush here is for a huge page
|
|
|
|
+ * mapping or not. Hence use the tlb range variant
|
|
*/
|
|
*/
|
|
if (mm_tlb_flush_pending(vma->vm_mm))
|
|
if (mm_tlb_flush_pending(vma->vm_mm))
|
|
- need_flush = true;
|
|
|
|
|
|
+ flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE);
|
|
|
|
|
|
/*
|
|
/*
|
|
* Migrate the THP to the requested node, returns with page unlocked
|
|
* Migrate the THP to the requested node, returns with page unlocked
|
|
@@ -1520,13 +1515,6 @@ int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
|
|
*/
|
|
*/
|
|
spin_unlock(vmf->ptl);
|
|
spin_unlock(vmf->ptl);
|
|
|
|
|
|
- /*
|
|
|
|
- * We are not sure a pending tlb flush here is for a huge page
|
|
|
|
- * mapping or not. Hence use the tlb range variant
|
|
|
|
- */
|
|
|
|
- if (need_flush)
|
|
|
|
- flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE);
|
|
|
|
-
|
|
|
|
migrated = migrate_misplaced_transhuge_page(vma->vm_mm, vma,
|
|
migrated = migrate_misplaced_transhuge_page(vma->vm_mm, vma,
|
|
vmf->pmd, pmd, vmf->address, page, target_nid);
|
|
vmf->pmd, pmd, vmf->address, page, target_nid);
|
|
if (migrated) {
|
|
if (migrated) {
|