|
@@ -819,6 +819,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
|
|
|
goto out_unlock;
|
|
|
wait_on_page_writeback(page);
|
|
|
}
|
|
|
+
|
|
|
/*
|
|
|
* By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
|
|
|
* we cannot notice that anon_vma is freed while we migrates a page.
|
|
@@ -826,34 +827,15 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
|
|
|
* of migration. File cache pages are no problem because of page_lock()
|
|
|
* File Caches may use write_page() or lock_page() in migration, then,
|
|
|
* just care Anon page here.
|
|
|
+ *
|
|
|
+ * Only page_get_anon_vma() understands the subtleties of
|
|
|
+ * getting a hold on an anon_vma from outside one of its mms.
|
|
|
+ * But if we cannot get anon_vma, then we won't need it anyway,
|
|
|
+ * because that implies that the anon page is no longer mapped
|
|
|
+ * (and cannot be remapped so long as we hold the page lock).
|
|
|
*/
|
|
|
- if (PageAnon(page) && !PageKsm(page)) {
|
|
|
- /*
|
|
|
- * Only page_lock_anon_vma_read() understands the subtleties of
|
|
|
- * getting a hold on an anon_vma from outside one of its mms.
|
|
|
- */
|
|
|
+ if (PageAnon(page) && !PageKsm(page))
|
|
|
anon_vma = page_get_anon_vma(page);
|
|
|
- if (anon_vma) {
|
|
|
- /*
|
|
|
- * Anon page
|
|
|
- */
|
|
|
- } else if (PageSwapCache(page)) {
|
|
|
- /*
|
|
|
- * We cannot be sure that the anon_vma of an unmapped
|
|
|
- * swapcache page is safe to use because we don't
|
|
|
- * know in advance if the VMA that this page belonged
|
|
|
- * to still exists. If the VMA and others sharing the
|
|
|
- * data have been freed, then the anon_vma could
|
|
|
- * already be invalid.
|
|
|
- *
|
|
|
- * To avoid this possibility, swapcache pages get
|
|
|
- * migrated but are not remapped when migration
|
|
|
- * completes
|
|
|
- */
|
|
|
- } else {
|
|
|
- goto out_unlock;
|
|
|
- }
|
|
|
- }
|
|
|
|
|
|
/*
|
|
|
* Block others from accessing the new page when we get around to
|
|
@@ -898,6 +880,8 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
|
|
|
}
|
|
|
} else if (page_mapped(page)) {
|
|
|
/* Establish migration ptes */
|
|
|
+ VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
|
|
|
+ page);
|
|
|
try_to_unmap(page,
|
|
|
TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
|
|
|
page_was_mapped = 1;
|