|
@@ -403,7 +403,7 @@ static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
|
|
|
},
|
|
},
|
|
|
};
|
|
};
|
|
|
|
|
|
|
|
-static void migrate_page_add(struct page *page, struct list_head *pagelist,
|
|
|
|
|
|
|
+static int migrate_page_add(struct page *page, struct list_head *pagelist,
|
|
|
unsigned long flags);
|
|
unsigned long flags);
|
|
|
|
|
|
|
|
struct queue_pages {
|
|
struct queue_pages {
|
|
@@ -463,12 +463,11 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
|
|
|
flags = qp->flags;
|
|
flags = qp->flags;
|
|
|
/* go to thp migration */
|
|
/* go to thp migration */
|
|
|
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
|
|
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
|
|
|
- if (!vma_migratable(walk->vma)) {
|
|
|
|
|
|
|
+ if (!vma_migratable(walk->vma) ||
|
|
|
|
|
+ migrate_page_add(page, qp->pagelist, flags)) {
|
|
|
ret = 1;
|
|
ret = 1;
|
|
|
goto unlock;
|
|
goto unlock;
|
|
|
}
|
|
}
|
|
|
-
|
|
|
|
|
- migrate_page_add(page, qp->pagelist, flags);
|
|
|
|
|
} else
|
|
} else
|
|
|
ret = -EIO;
|
|
ret = -EIO;
|
|
|
unlock:
|
|
unlock:
|
|
@@ -532,7 +531,14 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
|
|
|
has_unmovable = true;
|
|
has_unmovable = true;
|
|
|
break;
|
|
break;
|
|
|
}
|
|
}
|
|
|
- migrate_page_add(page, qp->pagelist, flags);
|
|
|
|
|
|
|
+
|
|
|
|
|
+ /*
|
|
|
|
|
+ * Do not abort immediately since there may be
|
|
|
|
|
+ * temporary off LRU pages in the range. Still
|
|
|
|
|
+ * need migrate other LRU pages.
|
|
|
|
|
+ */
|
|
|
|
|
+ if (migrate_page_add(page, qp->pagelist, flags))
|
|
|
|
|
+ has_unmovable = true;
|
|
|
} else
|
|
} else
|
|
|
break;
|
|
break;
|
|
|
}
|
|
}
|
|
@@ -947,7 +953,7 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
|
|
|
/*
|
|
/*
|
|
|
* page migration, thp tail pages can be passed.
|
|
* page migration, thp tail pages can be passed.
|
|
|
*/
|
|
*/
|
|
|
-static void migrate_page_add(struct page *page, struct list_head *pagelist,
|
|
|
|
|
|
|
+static int migrate_page_add(struct page *page, struct list_head *pagelist,
|
|
|
unsigned long flags)
|
|
unsigned long flags)
|
|
|
{
|
|
{
|
|
|
struct page *head = compound_head(page);
|
|
struct page *head = compound_head(page);
|
|
@@ -960,8 +966,19 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
|
|
|
mod_node_page_state(page_pgdat(head),
|
|
mod_node_page_state(page_pgdat(head),
|
|
|
NR_ISOLATED_ANON + page_is_file_cache(head),
|
|
NR_ISOLATED_ANON + page_is_file_cache(head),
|
|
|
hpage_nr_pages(head));
|
|
hpage_nr_pages(head));
|
|
|
|
|
+ } else if (flags & MPOL_MF_STRICT) {
|
|
|
|
|
+ /*
|
|
|
|
|
+ * Non-movable page may reach here. And, there may be
|
|
|
|
|
+ * temporary off LRU pages or non-LRU movable pages.
|
|
|
|
|
+ * Treat them as unmovable pages since they can't be
|
|
|
|
|
+ * isolated, so they can't be moved at the moment. It
|
|
|
|
|
+ * should return -EIO for this case too.
|
|
|
|
|
+ */
|
|
|
|
|
+ return -EIO;
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
|
|
|
+
|
|
|
|
|
+ return 0;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
/* page allocation callback for NUMA node migration */
|
|
/* page allocation callback for NUMA node migration */
|
|
@@ -1164,9 +1181,10 @@ static struct page *new_page(struct page *page, unsigned long start)
|
|
|
}
|
|
}
|
|
|
#else
|
|
#else
|
|
|
|
|
|
|
|
-static void migrate_page_add(struct page *page, struct list_head *pagelist,
|
|
|
|
|
|
|
+static int migrate_page_add(struct page *page, struct list_head *pagelist,
|
|
|
unsigned long flags)
|
|
unsigned long flags)
|
|
|
{
|
|
{
|
|
|
|
|
+ return -EIO;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
|
|
int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
|