|
@@ -442,7 +442,7 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
|
|
|
|
|
|
while (start < end) {
|
|
|
struct page *page;
|
|
|
- unsigned int page_mask;
|
|
|
+ unsigned int page_mask = 0;
|
|
|
unsigned long page_increm;
|
|
|
struct pagevec pvec;
|
|
|
struct zone *zone;
|
|
@@ -456,8 +456,7 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
|
|
|
* suits munlock very well (and if somehow an abnormal page
|
|
|
* has sneaked into the range, we won't oops here: great).
|
|
|
*/
|
|
|
- page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP,
|
|
|
- &page_mask);
|
|
|
+ page = follow_page(vma, start, FOLL_GET | FOLL_DUMP);
|
|
|
|
|
|
if (page && !IS_ERR(page)) {
|
|
|
if (PageTransTail(page)) {
|
|
@@ -468,8 +467,8 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
|
|
|
/*
|
|
|
* Any THP page found by follow_page_mask() may
|
|
|
* have gotten split before reaching
|
|
|
- * munlock_vma_page(), so we need to recompute
|
|
|
- * the page_mask here.
|
|
|
+ * munlock_vma_page(), so we need to compute
|
|
|
+ * the page_mask here instead.
|
|
|
*/
|
|
|
page_mask = munlock_vma_page(page);
|
|
|
unlock_page(page);
|