|
@@ -3177,7 +3177,6 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
|
|
unsigned long start, unsigned long end,
|
|
|
struct page *ref_page)
|
|
|
{
|
|
|
- int force_flush = 0;
|
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
|
unsigned long address;
|
|
|
pte_t *ptep;
|
|
@@ -3196,19 +3195,22 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
|
|
tlb_start_vma(tlb, vma);
|
|
|
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
|
|
|
address = start;
|
|
|
-again:
|
|
|
for (; address < end; address += sz) {
|
|
|
ptep = huge_pte_offset(mm, address);
|
|
|
if (!ptep)
|
|
|
continue;
|
|
|
|
|
|
ptl = huge_pte_lock(h, mm, ptep);
|
|
|
- if (huge_pmd_unshare(mm, &address, ptep))
|
|
|
- goto unlock;
|
|
|
+ if (huge_pmd_unshare(mm, &address, ptep)) {
|
|
|
+ spin_unlock(ptl);
|
|
|
+ continue;
|
|
|
+ }
|
|
|
|
|
|
pte = huge_ptep_get(ptep);
|
|
|
- if (huge_pte_none(pte))
|
|
|
- goto unlock;
|
|
|
+ if (huge_pte_none(pte)) {
|
|
|
+ spin_unlock(ptl);
|
|
|
+ continue;
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
|
* Migrating hugepage or HWPoisoned hugepage is already
|
|
@@ -3216,7 +3218,8 @@ again:
|
|
|
*/
|
|
|
if (unlikely(!pte_present(pte))) {
|
|
|
huge_pte_clear(mm, address, ptep);
|
|
|
- goto unlock;
|
|
|
+ spin_unlock(ptl);
|
|
|
+ continue;
|
|
|
}
|
|
|
|
|
|
page = pte_page(pte);
|
|
@@ -3226,9 +3229,10 @@ again:
|
|
|
* are about to unmap is the actual page of interest.
|
|
|
*/
|
|
|
if (ref_page) {
|
|
|
- if (page != ref_page)
|
|
|
- goto unlock;
|
|
|
-
|
|
|
+ if (page != ref_page) {
|
|
|
+ spin_unlock(ptl);
|
|
|
+ continue;
|
|
|
+ }
|
|
|
/*
|
|
|
* Mark the VMA as having unmapped its page so that
|
|
|
* future faults in this VMA will fail rather than
|
|
@@ -3244,30 +3248,14 @@ again:
|
|
|
|
|
|
hugetlb_count_sub(pages_per_huge_page(h), mm);
|
|
|
page_remove_rmap(page, true);
|
|
|
- force_flush = !__tlb_remove_page(tlb, page);
|
|
|
- if (force_flush) {
|
|
|
- address += sz;
|
|
|
- spin_unlock(ptl);
|
|
|
- break;
|
|
|
- }
|
|
|
- /* Bail out after unmapping reference page if supplied */
|
|
|
- if (ref_page) {
|
|
|
- spin_unlock(ptl);
|
|
|
- break;
|
|
|
- }
|
|
|
-unlock:
|
|
|
+
|
|
|
spin_unlock(ptl);
|
|
|
- }
|
|
|
- /*
|
|
|
- * mmu_gather ran out of room to batch pages, we break out of
|
|
|
- * the PTE lock to avoid doing the potential expensive TLB invalidate
|
|
|
- * and page-free while holding it.
|
|
|
- */
|
|
|
- if (force_flush) {
|
|
|
- force_flush = 0;
|
|
|
- tlb_flush_mmu(tlb);
|
|
|
- if (address < end && !ref_page)
|
|
|
- goto again;
|
|
|
+ tlb_remove_page(tlb, page);
|
|
|
+ /*
|
|
|
+ * Bail out after unmapping reference page if supplied
|
|
|
+ */
|
|
|
+ if (ref_page)
|
|
|
+ break;
|
|
|
}
|
|
|
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
|
|
|
tlb_end_vma(tlb, vma);
|