|
@@ -746,7 +746,7 @@ static int fallback_migrate_page(struct address_space *mapping,
|
|
* MIGRATEPAGE_SUCCESS - success
|
|
* MIGRATEPAGE_SUCCESS - success
|
|
*/
|
|
*/
|
|
static int move_to_new_page(struct page *newpage, struct page *page,
|
|
static int move_to_new_page(struct page *newpage, struct page *page,
|
|
- int remap_swapcache, enum migrate_mode mode)
|
|
|
|
|
|
+ int page_was_mapped, enum migrate_mode mode)
|
|
{
|
|
{
|
|
struct address_space *mapping;
|
|
struct address_space *mapping;
|
|
int rc;
|
|
int rc;
|
|
@@ -784,7 +784,7 @@ static int move_to_new_page(struct page *newpage, struct page *page,
|
|
newpage->mapping = NULL;
|
|
newpage->mapping = NULL;
|
|
} else {
|
|
} else {
|
|
mem_cgroup_migrate(page, newpage, false);
|
|
mem_cgroup_migrate(page, newpage, false);
|
|
- if (remap_swapcache)
|
|
|
|
|
|
+ if (page_was_mapped)
|
|
remove_migration_ptes(page, newpage);
|
|
remove_migration_ptes(page, newpage);
|
|
page->mapping = NULL;
|
|
page->mapping = NULL;
|
|
}
|
|
}
|
|
@@ -798,7 +798,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
|
|
int force, enum migrate_mode mode)
|
|
int force, enum migrate_mode mode)
|
|
{
|
|
{
|
|
int rc = -EAGAIN;
|
|
int rc = -EAGAIN;
|
|
- int remap_swapcache = 1;
|
|
|
|
|
|
+ int page_was_mapped = 0;
|
|
struct anon_vma *anon_vma = NULL;
|
|
struct anon_vma *anon_vma = NULL;
|
|
|
|
|
|
if (!trylock_page(page)) {
|
|
if (!trylock_page(page)) {
|
|
@@ -870,7 +870,6 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
|
|
* migrated but are not remapped when migration
|
|
* migrated but are not remapped when migration
|
|
* completes
|
|
* completes
|
|
*/
|
|
*/
|
|
- remap_swapcache = 0;
|
|
|
|
} else {
|
|
} else {
|
|
goto out_unlock;
|
|
goto out_unlock;
|
|
}
|
|
}
|
|
@@ -910,13 +909,17 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
|
|
}
|
|
}
|
|
|
|
|
|
/* Establish migration ptes or remove ptes */
|
|
/* Establish migration ptes or remove ptes */
|
|
- try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
|
|
|
|
|
|
+ if (page_mapped(page)) {
|
|
|
|
+ try_to_unmap(page,
|
|
|
|
+ TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
|
|
|
|
+ page_was_mapped = 1;
|
|
|
|
+ }
|
|
|
|
|
|
skip_unmap:
|
|
skip_unmap:
|
|
if (!page_mapped(page))
|
|
if (!page_mapped(page))
|
|
- rc = move_to_new_page(newpage, page, remap_swapcache, mode);
|
|
|
|
|
|
+ rc = move_to_new_page(newpage, page, page_was_mapped, mode);
|
|
|
|
|
|
- if (rc && remap_swapcache)
|
|
|
|
|
|
+ if (rc && page_was_mapped)
|
|
remove_migration_ptes(page, page);
|
|
remove_migration_ptes(page, page);
|
|
|
|
|
|
/* Drop an anon_vma reference if we took one */
|
|
/* Drop an anon_vma reference if we took one */
|
|
@@ -1017,6 +1020,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
|
|
{
|
|
{
|
|
int rc = 0;
|
|
int rc = 0;
|
|
int *result = NULL;
|
|
int *result = NULL;
|
|
|
|
+ int page_was_mapped = 0;
|
|
struct page *new_hpage;
|
|
struct page *new_hpage;
|
|
struct anon_vma *anon_vma = NULL;
|
|
struct anon_vma *anon_vma = NULL;
|
|
|
|
|
|
@@ -1047,12 +1051,16 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
|
|
if (PageAnon(hpage))
|
|
if (PageAnon(hpage))
|
|
anon_vma = page_get_anon_vma(hpage);
|
|
anon_vma = page_get_anon_vma(hpage);
|
|
|
|
|
|
- try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
|
|
|
|
|
|
+ if (page_mapped(hpage)) {
|
|
|
|
+ try_to_unmap(hpage,
|
|
|
|
+ TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
|
|
|
|
+ page_was_mapped = 1;
|
|
|
|
+ }
|
|
|
|
|
|
if (!page_mapped(hpage))
|
|
if (!page_mapped(hpage))
|
|
- rc = move_to_new_page(new_hpage, hpage, 1, mode);
|
|
|
|
|
|
+ rc = move_to_new_page(new_hpage, hpage, page_was_mapped, mode);
|
|
|
|
|
|
- if (rc != MIGRATEPAGE_SUCCESS)
|
|
|
|
|
|
+ if (rc != MIGRATEPAGE_SUCCESS && page_was_mapped)
|
|
remove_migration_ptes(hpage, hpage);
|
|
remove_migration_ptes(hpage, hpage);
|
|
|
|
|
|
if (anon_vma)
|
|
if (anon_vma)
|