|
|
@@ -443,39 +443,26 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
|
|
|
page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP,
|
|
|
&page_mask);
|
|
|
|
|
|
- if (page && !IS_ERR(page)) {
|
|
|
- if (PageTransHuge(page)) {
|
|
|
- lock_page(page);
|
|
|
- /*
|
|
|
- * Any THP page found by follow_page_mask() may
|
|
|
- * have gotten split before reaching
|
|
|
- * munlock_vma_page(), so we need to recompute
|
|
|
- * the page_mask here.
|
|
|
- */
|
|
|
- page_mask = munlock_vma_page(page);
|
|
|
- unlock_page(page);
|
|
|
- put_page(page); /* follow_page_mask() */
|
|
|
- } else {
|
|
|
- /*
|
|
|
- * Non-huge pages are handled in batches via
|
|
|
- * pagevec. The pin from follow_page_mask()
|
|
|
- * prevents them from collapsing by THP.
|
|
|
- */
|
|
|
- pagevec_add(&pvec, page);
|
|
|
- zone = page_zone(page);
|
|
|
- zoneid = page_zone_id(page);
|
|
|
+ if (page && !IS_ERR(page) && !PageTransCompound(page)) {
|
|
|
+ /*
|
|
|
+ * Non-huge pages are handled in batches via
|
|
|
+ * pagevec. The pin from follow_page_mask()
|
|
|
+ * prevents them from collapsing by THP.
|
|
|
+ */
|
|
|
+ pagevec_add(&pvec, page);
|
|
|
+ zone = page_zone(page);
|
|
|
+ zoneid = page_zone_id(page);
|
|
|
|
|
|
- /*
|
|
|
- * Try to fill the rest of pagevec using fast
|
|
|
- * pte walk. This will also update start to
|
|
|
- * the next page to process. Then munlock the
|
|
|
- * pagevec.
|
|
|
- */
|
|
|
- start = __munlock_pagevec_fill(&pvec, vma,
|
|
|
- zoneid, start, end);
|
|
|
- __munlock_pagevec(&pvec, zone);
|
|
|
- goto next;
|
|
|
- }
|
|
|
+ /*
|
|
|
+ * Try to fill the rest of pagevec using fast
|
|
|
+ * pte walk. This will also update start to
|
|
|
+ * the next page to process. Then munlock the
|
|
|
+ * pagevec.
|
|
|
+ */
|
|
|
+ start = __munlock_pagevec_fill(&pvec, vma,
|
|
|
+ zoneid, start, end);
|
|
|
+ __munlock_pagevec(&pvec, zone);
|
|
|
+ goto next;
|
|
|
}
|
|
|
/* It's a bug to munlock in the middle of a THP page */
|
|
|
VM_BUG_ON((start >> PAGE_SHIFT) & page_mask);
|