|
@@ -1431,6 +1431,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
|
|
if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED))
|
|
|
goto out;
|
|
|
|
|
|
+ if (flags & TTU_SPLIT_HUGE_PMD)
|
|
|
+ split_huge_pmd_address(vma, address);
|
|
|
pte = page_check_address(page, mm, address, &ptl, 0);
|
|
|
if (!pte)
|
|
|
goto out;
|
|
@@ -1576,10 +1578,10 @@ static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
|
|
|
return is_vma_temporary_stack(vma);
|
|
|
}
|
|
|
|
|
|
-static int page_not_mapped(struct page *page)
|
|
|
+static int page_mapcount_is_zero(struct page *page)
|
|
|
{
|
|
|
- return !page_mapped(page);
|
|
|
-};
|
|
|
+ return !page_mapcount(page);
|
|
|
+}
|
|
|
|
|
|
/**
|
|
|
* try_to_unmap - try to remove all page table mappings to a page
|
|
@@ -1606,12 +1608,10 @@ int try_to_unmap(struct page *page, enum ttu_flags flags)
|
|
|
struct rmap_walk_control rwc = {
|
|
|
.rmap_one = try_to_unmap_one,
|
|
|
.arg = &rp,
|
|
|
- .done = page_not_mapped,
|
|
|
+ .done = page_mapcount_is_zero,
|
|
|
.anon_lock = page_lock_anon_vma_read,
|
|
|
};
|
|
|
|
|
|
- VM_BUG_ON_PAGE(!PageHuge(page) && PageTransHuge(page), page);
|
|
|
-
|
|
|
/*
|
|
|
* During exec, a temporary VMA is setup and later moved.
|
|
|
* The VMA is moved under the anon_vma lock but not the
|
|
@@ -1623,9 +1623,12 @@ int try_to_unmap(struct page *page, enum ttu_flags flags)
|
|
|
if ((flags & TTU_MIGRATION) && !PageKsm(page) && PageAnon(page))
|
|
|
rwc.invalid_vma = invalid_migration_vma;
|
|
|
|
|
|
- ret = rmap_walk(page, &rwc);
|
|
|
+ if (flags & TTU_RMAP_LOCKED)
|
|
|
+ ret = rmap_walk_locked(page, &rwc);
|
|
|
+ else
|
|
|
+ ret = rmap_walk(page, &rwc);
|
|
|
|
|
|
- if (ret != SWAP_MLOCK && !page_mapped(page)) {
|
|
|
+ if (ret != SWAP_MLOCK && !page_mapcount(page)) {
|
|
|
ret = SWAP_SUCCESS;
|
|
|
if (rp.lazyfreed && !PageDirty(page))
|
|
|
ret = SWAP_LZFREE;
|
|
@@ -1633,6 +1636,11 @@ int try_to_unmap(struct page *page, enum ttu_flags flags)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+static int page_not_mapped(struct page *page)
|
|
|
+{
|
|
|
+ return !page_mapped(page);
|
|
|
+};
|
|
|
+
|
|
|
/**
|
|
|
* try_to_munlock - try to munlock a page
|
|
|
* @page: the page to be munlocked
|