|
@@ -411,6 +411,21 @@ struct queue_pages {
|
|
|
struct vm_area_struct *prev;
|
|
|
};
|
|
|
|
|
|
+/*
|
|
|
+ * Check if the page's nid is in qp->nmask.
|
|
|
+ *
|
|
|
+ * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
|
|
|
+ * in the invert of qp->nmask.
|
|
|
+ */
|
|
|
+static inline bool queue_pages_required(struct page *page,
|
|
|
+ struct queue_pages *qp)
|
|
|
+{
|
|
|
+ int nid = page_to_nid(page);
|
|
|
+ unsigned long flags = qp->flags;
|
|
|
+
|
|
|
+ return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Scan through pages checking if pages follow certain conditions,
|
|
|
* and move them to the pagelist if they do.
|
|
@@ -464,8 +479,7 @@ retry:
|
|
|
*/
|
|
|
if (PageReserved(page))
|
|
|
continue;
|
|
|
- nid = page_to_nid(page);
|
|
|
- if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
|
|
|
+ if (!queue_pages_required(page, qp))
|
|
|
continue;
|
|
|
if (PageTransCompound(page)) {
|
|
|
get_page(page);
|
|
@@ -497,7 +511,6 @@ static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
|
|
|
#ifdef CONFIG_HUGETLB_PAGE
|
|
|
struct queue_pages *qp = walk->private;
|
|
|
unsigned long flags = qp->flags;
|
|
|
- int nid;
|
|
|
struct page *page;
|
|
|
spinlock_t *ptl;
|
|
|
pte_t entry;
|
|
@@ -507,8 +520,7 @@ static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
|
|
|
if (!pte_present(entry))
|
|
|
goto unlock;
|
|
|
page = pte_page(entry);
|
|
|
- nid = page_to_nid(page);
|
|
|
- if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
|
|
|
+ if (!queue_pages_required(page, qp))
|
|
|
goto unlock;
|
|
|
/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
|
|
|
if (flags & (MPOL_MF_MOVE_ALL) ||
|