|
@@ -634,15 +634,10 @@ static void copy_huge_page(struct page *dst, struct page *src)
|
|
|
/*
|
|
|
* Copy the page to its new location
|
|
|
*/
|
|
|
-void migrate_page_copy(struct page *newpage, struct page *page)
|
|
|
+void migrate_page_states(struct page *newpage, struct page *page)
|
|
|
{
|
|
|
int cpupid;
|
|
|
|
|
|
- if (PageHuge(page) || PageTransHuge(page))
|
|
|
- copy_huge_page(newpage, page);
|
|
|
- else
|
|
|
- copy_highpage(newpage, page);
|
|
|
-
|
|
|
if (PageError(page))
|
|
|
SetPageError(newpage);
|
|
|
if (PageReferenced(page))
|
|
@@ -696,6 +691,17 @@ void migrate_page_copy(struct page *newpage, struct page *page)
|
|
|
|
|
|
mem_cgroup_migrate(page, newpage);
|
|
|
}
|
|
|
+EXPORT_SYMBOL(migrate_page_states);
|
|
|
+
|
|
|
+void migrate_page_copy(struct page *newpage, struct page *page)
|
|
|
+{
|
|
|
+ if (PageHuge(page) || PageTransHuge(page))
|
|
|
+ copy_huge_page(newpage, page);
|
|
|
+ else
|
|
|
+ copy_highpage(newpage, page);
|
|
|
+
|
|
|
+ migrate_page_states(newpage, page);
|
|
|
+}
|
|
|
EXPORT_SYMBOL(migrate_page_copy);
|
|
|
|
|
|
/************************************************************
|
|
@@ -721,7 +727,10 @@ int migrate_page(struct address_space *mapping,
|
|
|
if (rc != MIGRATEPAGE_SUCCESS)
|
|
|
return rc;
|
|
|
|
|
|
- migrate_page_copy(newpage, page);
|
|
|
+ if (mode != MIGRATE_SYNC_NO_COPY)
|
|
|
+ migrate_page_copy(newpage, page);
|
|
|
+ else
|
|
|
+ migrate_page_states(newpage, page);
|
|
|
return MIGRATEPAGE_SUCCESS;
|
|
|
}
|
|
|
EXPORT_SYMBOL(migrate_page);
|
|
@@ -771,12 +780,15 @@ int buffer_migrate_page(struct address_space *mapping,
|
|
|
|
|
|
SetPagePrivate(newpage);
|
|
|
|
|
|
- migrate_page_copy(newpage, page);
|
|
|
+ if (mode != MIGRATE_SYNC_NO_COPY)
|
|
|
+ migrate_page_copy(newpage, page);
|
|
|
+ else
|
|
|
+ migrate_page_states(newpage, page);
|
|
|
|
|
|
bh = head;
|
|
|
do {
|
|
|
unlock_buffer(bh);
|
|
|
- put_bh(bh);
|
|
|
+ put_bh(bh);
|
|
|
bh = bh->b_this_page;
|
|
|
|
|
|
} while (bh != head);
|
|
@@ -835,8 +847,13 @@ static int fallback_migrate_page(struct address_space *mapping,
|
|
|
{
|
|
|
if (PageDirty(page)) {
|
|
|
/* Only writeback pages in full synchronous migration */
|
|
|
- if (mode != MIGRATE_SYNC)
|
|
|
+ switch (mode) {
|
|
|
+ case MIGRATE_SYNC:
|
|
|
+ case MIGRATE_SYNC_NO_COPY:
|
|
|
+ break;
|
|
|
+ default:
|
|
|
return -EBUSY;
|
|
|
+ }
|
|
|
return writeout(mapping, page);
|
|
|
}
|
|
|
|
|
@@ -973,7 +990,11 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
|
|
|
* the retry loop is too short and in the sync-light case,
|
|
|
* the overhead of stalling is too much
|
|
|
*/
|
|
|
- if (mode != MIGRATE_SYNC) {
|
|
|
+ switch (mode) {
|
|
|
+ case MIGRATE_SYNC:
|
|
|
+ case MIGRATE_SYNC_NO_COPY:
|
|
|
+ break;
|
|
|
+ default:
|
|
|
rc = -EBUSY;
|
|
|
goto out_unlock;
|
|
|
}
|
|
@@ -1243,8 +1264,15 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
|
|
|
return -ENOMEM;
|
|
|
|
|
|
if (!trylock_page(hpage)) {
|
|
|
- if (!force || mode != MIGRATE_SYNC)
|
|
|
+ if (!force)
|
|
|
goto out;
|
|
|
+ switch (mode) {
|
|
|
+ case MIGRATE_SYNC:
|
|
|
+ case MIGRATE_SYNC_NO_COPY:
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
lock_page(hpage);
|
|
|
}
|
|
|
|