|
@@ -2726,9 +2726,9 @@ void __unmap_hugepage_range_final(struct mmu_gather *tlb,
|
|
|
* on its way out. We're lucky that the flag has such an appropriate
|
|
|
* name, and can in fact be safely cleared here. We could clear it
|
|
|
* before the __unmap_hugepage_range above, but all that's necessary
|
|
|
- * is to clear it before releasing the i_mmap_mutex. This works
|
|
|
+ * is to clear it before releasing the i_mmap_rwsem. This works
|
|
|
* because in the context this is called, the VMA is about to be
|
|
|
- * destroyed and the i_mmap_mutex is held.
|
|
|
+ * destroyed and the i_mmap_rwsem is held.
|
|
|
*/
|
|
|
vma->vm_flags &= ~VM_MAYSHARE;
|
|
|
}
|
|
@@ -3370,9 +3370,9 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
|
|
|
spin_unlock(ptl);
|
|
|
}
|
|
|
/*
|
|
|
- * Must flush TLB before releasing i_mmap_mutex: x86's huge_pmd_unshare
|
|
|
+ * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
|
|
|
* may have cleared our pud entry and done put_page on the page table:
|
|
|
- * once we release i_mmap_mutex, another task can do the final put_page
|
|
|
+ * once we release i_mmap_rwsem, another task can do the final put_page
|
|
|
* and that page table be reused and filled with junk.
|
|
|
*/
|
|
|
flush_tlb_range(vma, start, end);
|
|
@@ -3525,7 +3525,7 @@ static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
|
|
|
* and returns the corresponding pte. While this is not necessary for the
|
|
|
* !shared pmd case because we can allocate the pmd later as well, it makes the
|
|
|
* code much cleaner. pmd allocation is essential for the shared case because
|
|
|
- * pud has to be populated inside the same i_mmap_mutex section - otherwise
|
|
|
+ * pud has to be populated inside the same i_mmap_rwsem section - otherwise
|
|
|
* racing tasks could either miss the sharing (see huge_pte_offset) or select a
|
|
|
* bad pmd for sharing.
|
|
|
*/
|