|
@@ -3938,6 +3938,14 @@ same_page:
|
|
|
return i ? i : -EFAULT;
|
|
|
}
|
|
|
|
|
|
+#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
|
|
|
+/*
|
|
|
+ * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
|
|
|
+ * implement this.
|
|
|
+ */
|
|
|
+#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
|
|
|
+#endif
|
|
|
+
|
|
|
unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
|
|
|
unsigned long address, unsigned long end, pgprot_t newprot)
|
|
|
{
|
|
@@ -3998,7 +4006,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
|
|
|
* once we release i_mmap_rwsem, another task can do the final put_page
|
|
|
* and that page table be reused and filled with junk.
|
|
|
*/
|
|
|
- flush_tlb_range(vma, start, end);
|
|
|
+ flush_hugetlb_tlb_range(vma, start, end);
|
|
|
mmu_notifier_invalidate_range(mm, start, end);
|
|
|
i_mmap_unlock_write(vma->vm_file->f_mapping);
|
|
|
mmu_notifier_invalidate_range_end(mm, start, end);
|