|
@@ -71,29 +71,13 @@ int migrate_prep_local(void)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Add isolated pages on the list back to the LRU under page lock
|
|
|
- * to avoid leaking evictable pages back onto unevictable list.
|
|
|
- */
|
|
|
-void putback_lru_pages(struct list_head *l)
|
|
|
-{
|
|
|
- struct page *page;
|
|
|
- struct page *page2;
|
|
|
-
|
|
|
- list_for_each_entry_safe(page, page2, l, lru) {
|
|
|
- list_del(&page->lru);
|
|
|
- dec_zone_page_state(page, NR_ISOLATED_ANON +
|
|
|
- page_is_file_cache(page));
|
|
|
- putback_lru_page(page);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* Put previously isolated pages back onto the appropriate lists
|
|
|
* from where they were once taken off for compaction/migration.
|
|
|
*
|
|
|
- * This function shall be used instead of putback_lru_pages(),
|
|
|
- * whenever the isolated pageset has been built by isolate_migratepages_range()
|
|
|
+ * This function shall be used whenever the isolated pageset has been
|
|
|
+ * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
|
|
|
+ * and isolate_huge_page().
|
|
|
*/
|
|
|
void putback_movable_pages(struct list_head *l)
|
|
|
{
|
|
@@ -1725,7 +1709,12 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
|
|
|
nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
|
|
|
node, MIGRATE_ASYNC, MR_NUMA_MISPLACED);
|
|
|
if (nr_remaining) {
|
|
|
- putback_lru_pages(&migratepages);
|
|
|
+ if (!list_empty(&migratepages)) {
|
|
|
+ list_del(&page->lru);
|
|
|
+ dec_zone_page_state(page, NR_ISOLATED_ANON +
|
|
|
+ page_is_file_cache(page));
|
|
|
+ putback_lru_page(page);
|
|
|
+ }
|
|
|
isolated = 0;
|
|
|
} else
|
|
|
count_vm_numa_event(NUMA_PAGE_MIGRATE);
|