|
@@ -1223,7 +1223,12 @@ static int do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, pmd_t orig_pmd,
|
|
|
page_remove_rmap(page, true);
|
|
|
spin_unlock(vmf->ptl);
|
|
|
|
|
|
- mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
|
|
|
+ /*
|
|
|
+ * No need to double call mmu_notifier->invalidate_range() callback as
|
|
|
+ * the above pmdp_huge_clear_flush_notify() did already call it.
|
|
|
+ */
|
|
|
+ mmu_notifier_invalidate_range_only_end(vma->vm_mm, mmun_start,
|
|
|
+ mmun_end);
|
|
|
|
|
|
ret |= VM_FAULT_WRITE;
|
|
|
put_page(page);
|
|
@@ -1372,7 +1377,12 @@ alloc:
|
|
|
}
|
|
|
spin_unlock(vmf->ptl);
|
|
|
out_mn:
|
|
|
- mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
|
|
|
+ /*
|
|
|
+ * No need to double call mmu_notifier->invalidate_range() callback as
|
|
|
+ * the above pmdp_huge_clear_flush_notify() did already call it.
|
|
|
+ */
|
|
|
+ mmu_notifier_invalidate_range_only_end(vma->vm_mm, mmun_start,
|
|
|
+ mmun_end);
|
|
|
out:
|
|
|
return ret;
|
|
|
out_unlock:
|
|
@@ -2024,7 +2034,12 @@ void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
|
|
|
|
|
|
out:
|
|
|
spin_unlock(ptl);
|
|
|
- mmu_notifier_invalidate_range_end(mm, haddr, haddr + HPAGE_PUD_SIZE);
|
|
|
+ /*
|
|
|
+ * No need to double call mmu_notifier->invalidate_range() callback as
|
|
|
+ * the above pudp_huge_clear_flush_notify() did already call it.
|
|
|
+ */
|
|
|
+ mmu_notifier_invalidate_range_only_end(mm, haddr, haddr +
|
|
|
+ HPAGE_PUD_SIZE);
|
|
|
}
|
|
|
#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
|
|
|
|
|
@@ -2099,6 +2114,15 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
|
|
|
add_mm_counter(mm, MM_FILEPAGES, -HPAGE_PMD_NR);
|
|
|
return;
|
|
|
} else if (is_huge_zero_pmd(*pmd)) {
|
|
|
+ /*
|
|
|
+ * FIXME: Do we want to invalidate secondary mmu by calling
|
|
|
+ * mmu_notifier_invalidate_range() see comments below inside
|
|
|
+ * __split_huge_pmd() ?
|
|
|
+ *
|
|
|
+ * We are going from a zero huge page write protected to zero
|
|
|
+ * small page also write protected so it does not seems useful
|
|
|
+ * to invalidate secondary mmu at this time.
|
|
|
+ */
|
|
|
return __split_huge_zero_page_pmd(vma, haddr, pmd);
|
|
|
}
|
|
|
|
|
@@ -2234,7 +2258,21 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
|
|
__split_huge_pmd_locked(vma, pmd, haddr, freeze);
|
|
|
out:
|
|
|
spin_unlock(ptl);
|
|
|
- mmu_notifier_invalidate_range_end(mm, haddr, haddr + HPAGE_PMD_SIZE);
|
|
|
+ /*
|
|
|
+ * No need to double call mmu_notifier->invalidate_range() callback.
|
|
|
+ * They are 3 cases to consider inside __split_huge_pmd_locked():
|
|
|
+ * 1) pmdp_huge_clear_flush_notify() call invalidate_range() obvious
|
|
|
+ * 2) __split_huge_zero_page_pmd() read only zero page and any write
|
|
|
+ * fault will trigger a flush_notify before pointing to a new page
|
|
|
+ * (it is fine if the secondary mmu keeps pointing to the old zero
|
|
|
+ * page in the meantime)
|
|
|
+ * 3) Split a huge pmd into pte pointing to the same page. No need
|
|
|
+ * to invalidate secondary tlb entry they are all still valid.
|
|
|
+ * any further changes to individual pte will notify. So no need
|
|
|
+ * to call mmu_notifier->invalidate_range()
|
|
|
+ */
|
|
|
+ mmu_notifier_invalidate_range_only_end(mm, haddr, haddr +
|
|
|
+ HPAGE_PMD_SIZE);
|
|
|
}
|
|
|
|
|
|
void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
|