|
@@ -31,6 +31,7 @@
|
|
|
#include <linux/vmalloc.h>
|
|
|
#include <linux/security.h>
|
|
|
#include <linux/backing-dev.h>
|
|
|
+#include <linux/compaction.h>
|
|
|
#include <linux/syscalls.h>
|
|
|
#include <linux/hugetlb.h>
|
|
|
#include <linux/hugetlb_cgroup.h>
|
|
@@ -73,6 +74,81 @@ int migrate_prep_local(void)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+bool isolate_movable_page(struct page *page, isolate_mode_t mode)
|
|
|
+{
|
|
|
+ struct address_space *mapping;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Avoid burning cycles with pages that are yet under __free_pages(),
|
|
|
+ * or just got freed under us.
|
|
|
+ *
|
|
|
+ * In case we 'win' a race for a movable page being freed under us and
|
|
|
+ * raise its refcount preventing __free_pages() from doing its job
|
|
|
+ * the put_page() at the end of this block will take care of
|
|
|
+ * release this page, thus avoiding a nasty leakage.
|
|
|
+ */
|
|
|
+ if (unlikely(!get_page_unless_zero(page)))
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Check PageMovable before holding a PG_lock because page's owner
|
|
|
+ * assumes anybody doesn't touch PG_lock of newly allocated page
|
|
|
+ * so unconditionally grapping the lock ruins page's owner side.
|
|
|
+ */
|
|
|
+ if (unlikely(!__PageMovable(page)))
|
|
|
+ goto out_putpage;
|
|
|
+ /*
|
|
|
+ * As movable pages are not isolated from LRU lists, concurrent
|
|
|
+ * compaction threads can race against page migration functions
|
|
|
+ * as well as race against the releasing a page.
|
|
|
+ *
|
|
|
+ * In order to avoid having an already isolated movable page
|
|
|
+ * being (wrongly) re-isolated while it is under migration,
|
|
|
+ * or to avoid attempting to isolate pages being released,
|
|
|
+ * lets be sure we have the page lock
|
|
|
+ * before proceeding with the movable page isolation steps.
|
|
|
+ */
|
|
|
+ if (unlikely(!trylock_page(page)))
|
|
|
+ goto out_putpage;
|
|
|
+
|
|
|
+ if (!PageMovable(page) || PageIsolated(page))
|
|
|
+ goto out_no_isolated;
|
|
|
+
|
|
|
+ mapping = page_mapping(page);
|
|
|
+ VM_BUG_ON_PAGE(!mapping, page);
|
|
|
+
|
|
|
+ if (!mapping->a_ops->isolate_page(page, mode))
|
|
|
+ goto out_no_isolated;
|
|
|
+
|
|
|
+ /* Driver shouldn't use PG_isolated bit of page->flags */
|
|
|
+ WARN_ON_ONCE(PageIsolated(page));
|
|
|
+ __SetPageIsolated(page);
|
|
|
+ unlock_page(page);
|
|
|
+
|
|
|
+ return true;
|
|
|
+
|
|
|
+out_no_isolated:
|
|
|
+ unlock_page(page);
|
|
|
+out_putpage:
|
|
|
+ put_page(page);
|
|
|
+out:
|
|
|
+ return false;
|
|
|
+}
|
|
|
+
|
|
|
+/* It should be called on page which is PG_movable */
|
|
|
+void putback_movable_page(struct page *page)
|
|
|
+{
|
|
|
+ struct address_space *mapping;
|
|
|
+
|
|
|
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
|
|
|
+ VM_BUG_ON_PAGE(!PageMovable(page), page);
|
|
|
+ VM_BUG_ON_PAGE(!PageIsolated(page), page);
|
|
|
+
|
|
|
+ mapping = page_mapping(page);
|
|
|
+ mapping->a_ops->putback_page(page);
|
|
|
+ __ClearPageIsolated(page);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Put previously isolated pages back onto the appropriate lists
|
|
|
* from where they were once taken off for compaction/migration.
|
|
@@ -94,10 +170,25 @@ void putback_movable_pages(struct list_head *l)
|
|
|
list_del(&page->lru);
|
|
|
dec_zone_page_state(page, NR_ISOLATED_ANON +
|
|
|
page_is_file_cache(page));
|
|
|
- if (unlikely(isolated_balloon_page(page)))
|
|
|
+ if (unlikely(isolated_balloon_page(page))) {
|
|
|
balloon_page_putback(page);
|
|
|
- else
|
|
|
+ /*
|
|
|
+ * We isolated non-lru movable page so here we can use
|
|
|
+ * __PageMovable because LRU page's mapping cannot have
|
|
|
+ * PAGE_MAPPING_MOVABLE.
|
|
|
+ */
|
|
|
+ } else if (unlikely(__PageMovable(page))) {
|
|
|
+ VM_BUG_ON_PAGE(!PageIsolated(page), page);
|
|
|
+ lock_page(page);
|
|
|
+ if (PageMovable(page))
|
|
|
+ putback_movable_page(page);
|
|
|
+ else
|
|
|
+ __ClearPageIsolated(page);
|
|
|
+ unlock_page(page);
|
|
|
+ put_page(page);
|
|
|
+ } else {
|
|
|
putback_lru_page(page);
|
|
|
+ }
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -594,7 +685,7 @@ EXPORT_SYMBOL(migrate_page_copy);
|
|
|
***********************************************************/
|
|
|
|
|
|
/*
|
|
|
- * Common logic to directly migrate a single page suitable for
|
|
|
+ * Common logic to directly migrate a single LRU page suitable for
|
|
|
* pages that do not use PagePrivate/PagePrivate2.
|
|
|
*
|
|
|
* Pages are locked upon entry and exit.
|
|
@@ -757,33 +848,72 @@ static int move_to_new_page(struct page *newpage, struct page *page,
|
|
|
enum migrate_mode mode)
|
|
|
{
|
|
|
struct address_space *mapping;
|
|
|
- int rc;
|
|
|
+ int rc = -EAGAIN;
|
|
|
+ bool is_lru = !__PageMovable(page);
|
|
|
|
|
|
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
|
|
VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
|
|
|
|
|
|
mapping = page_mapping(page);
|
|
|
- if (!mapping)
|
|
|
- rc = migrate_page(mapping, newpage, page, mode);
|
|
|
- else if (mapping->a_ops->migratepage)
|
|
|
+
|
|
|
+ if (likely(is_lru)) {
|
|
|
+ if (!mapping)
|
|
|
+ rc = migrate_page(mapping, newpage, page, mode);
|
|
|
+ else if (mapping->a_ops->migratepage)
|
|
|
+ /*
|
|
|
+ * Most pages have a mapping and most filesystems
|
|
|
+ * provide a migratepage callback. Anonymous pages
|
|
|
+ * are part of swap space which also has its own
|
|
|
+ * migratepage callback. This is the most common path
|
|
|
+ * for page migration.
|
|
|
+ */
|
|
|
+ rc = mapping->a_ops->migratepage(mapping, newpage,
|
|
|
+ page, mode);
|
|
|
+ else
|
|
|
+ rc = fallback_migrate_page(mapping, newpage,
|
|
|
+ page, mode);
|
|
|
+ } else {
|
|
|
/*
|
|
|
- * Most pages have a mapping and most filesystems provide a
|
|
|
- * migratepage callback. Anonymous pages are part of swap
|
|
|
- * space which also has its own migratepage callback. This
|
|
|
- * is the most common path for page migration.
|
|
|
+ * In case of non-lru page, it could be released after
|
|
|
+ * isolation step. In that case, we shouldn't try migration.
|
|
|
*/
|
|
|
- rc = mapping->a_ops->migratepage(mapping, newpage, page, mode);
|
|
|
- else
|
|
|
- rc = fallback_migrate_page(mapping, newpage, page, mode);
|
|
|
+ VM_BUG_ON_PAGE(!PageIsolated(page), page);
|
|
|
+ if (!PageMovable(page)) {
|
|
|
+ rc = MIGRATEPAGE_SUCCESS;
|
|
|
+ __ClearPageIsolated(page);
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
+ rc = mapping->a_ops->migratepage(mapping, newpage,
|
|
|
+ page, mode);
|
|
|
+ WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
|
|
|
+ !PageIsolated(page));
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
|
* When successful, old pagecache page->mapping must be cleared before
|
|
|
* page is freed; but stats require that PageAnon be left as PageAnon.
|
|
|
*/
|
|
|
if (rc == MIGRATEPAGE_SUCCESS) {
|
|
|
- if (!PageAnon(page))
|
|
|
+ if (__PageMovable(page)) {
|
|
|
+ VM_BUG_ON_PAGE(!PageIsolated(page), page);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * We clear PG_movable under page_lock so any compactor
|
|
|
+ * cannot try to migrate this page.
|
|
|
+ */
|
|
|
+ __ClearPageIsolated(page);
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Anonymous and movable page->mapping will be cleard by
|
|
|
+ * free_pages_prepare so don't reset it here for keeping
|
|
|
+ * the type to work PageAnon, for example.
|
|
|
+ */
|
|
|
+ if (!PageMappingFlags(page))
|
|
|
page->mapping = NULL;
|
|
|
}
|
|
|
+out:
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
@@ -793,6 +923,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
|
|
|
int rc = -EAGAIN;
|
|
|
int page_was_mapped = 0;
|
|
|
struct anon_vma *anon_vma = NULL;
|
|
|
+ bool is_lru = !__PageMovable(page);
|
|
|
|
|
|
if (!trylock_page(page)) {
|
|
|
if (!force || mode == MIGRATE_ASYNC)
|
|
@@ -873,6 +1004,11 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
|
|
|
goto out_unlock_both;
|
|
|
}
|
|
|
|
|
|
+ if (unlikely(!is_lru)) {
|
|
|
+ rc = move_to_new_page(newpage, page, mode);
|
|
|
+ goto out_unlock_both;
|
|
|
+ }
|
|
|
+
|
|
|
/*
|
|
|
* Corner case handling:
|
|
|
* 1. When a new swap-cache page is read into, it is added to the LRU
|
|
@@ -922,7 +1058,8 @@ out:
|
|
|
* list in here.
|
|
|
*/
|
|
|
if (rc == MIGRATEPAGE_SUCCESS) {
|
|
|
- if (unlikely(__is_movable_balloon_page(newpage)))
|
|
|
+ if (unlikely(__is_movable_balloon_page(newpage) ||
|
|
|
+ __PageMovable(newpage)))
|
|
|
put_page(newpage);
|
|
|
else
|
|
|
putback_lru_page(newpage);
|
|
@@ -963,6 +1100,12 @@ static ICE_noinline int unmap_and_move(new_page_t get_new_page,
|
|
|
/* page was freed from under us. So we are done. */
|
|
|
ClearPageActive(page);
|
|
|
ClearPageUnevictable(page);
|
|
|
+ if (unlikely(__PageMovable(page))) {
|
|
|
+ lock_page(page);
|
|
|
+ if (!PageMovable(page))
|
|
|
+ __ClearPageIsolated(page);
|
|
|
+ unlock_page(page);
|
|
|
+ }
|
|
|
if (put_new_page)
|
|
|
put_new_page(newpage, private);
|
|
|
else
|
|
@@ -1012,8 +1155,21 @@ out:
|
|
|
num_poisoned_pages_inc();
|
|
|
}
|
|
|
} else {
|
|
|
- if (rc != -EAGAIN)
|
|
|
- putback_lru_page(page);
|
|
|
+ if (rc != -EAGAIN) {
|
|
|
+ if (likely(!__PageMovable(page))) {
|
|
|
+ putback_lru_page(page);
|
|
|
+ goto put_new;
|
|
|
+ }
|
|
|
+
|
|
|
+ lock_page(page);
|
|
|
+ if (PageMovable(page))
|
|
|
+ putback_movable_page(page);
|
|
|
+ else
|
|
|
+ __ClearPageIsolated(page);
|
|
|
+ unlock_page(page);
|
|
|
+ put_page(page);
|
|
|
+ }
|
|
|
+put_new:
|
|
|
if (put_new_page)
|
|
|
put_new_page(newpage, private);
|
|
|
else
|