|
|
@@ -428,6 +428,13 @@ static inline bool queue_pages_required(struct page *page,
|
|
|
return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * queue_pages_pmd() has three possible return values:
|
|
|
+ * 1 - pages are placed on the right node or queued successfully.
|
|
|
+ * 0 - THP was split.
|
|
|
+ * -EIO - is migration entry or MPOL_MF_STRICT was specified and an existing
|
|
|
+ * page was already on a node that does not follow the policy.
|
|
|
+ */
|
|
|
static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
|
|
|
unsigned long end, struct mm_walk *walk)
|
|
|
{
|
|
|
@@ -437,7 +444,7 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
|
|
|
unsigned long flags;
|
|
|
|
|
|
if (unlikely(is_pmd_migration_entry(*pmd))) {
|
|
|
- ret = 1;
|
|
|
+ ret = -EIO;
|
|
|
goto unlock;
|
|
|
}
|
|
|
page = pmd_page(*pmd);
|
|
|
@@ -454,8 +461,15 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
|
|
|
ret = 1;
|
|
|
flags = qp->flags;
|
|
|
/* go to thp migration */
|
|
|
- if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
|
|
|
+ if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
|
|
|
+ if (!vma_migratable(walk->vma)) {
|
|
|
+ ret = -EIO;
|
|
|
+ goto unlock;
|
|
|
+ }
|
|
|
+
|
|
|
migrate_page_add(page, qp->pagelist, flags);
|
|
|
+ } else
|
|
|
+ ret = -EIO;
|
|
|
unlock:
|
|
|
spin_unlock(ptl);
|
|
|
out:
|
|
|
@@ -480,8 +494,10 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
|
|
|
ptl = pmd_trans_huge_lock(pmd, vma);
|
|
|
if (ptl) {
|
|
|
ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
|
|
|
- if (ret)
|
|
|
+ if (ret > 0)
|
|
|
return 0;
|
|
|
+ else if (ret < 0)
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
if (pmd_trans_unstable(pmd))
|
|
|
@@ -502,11 +518,16 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
|
|
|
continue;
|
|
|
if (!queue_pages_required(page, qp))
|
|
|
continue;
|
|
|
- migrate_page_add(page, qp->pagelist, flags);
|
|
|
+ if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
|
|
|
+ if (!vma_migratable(vma))
|
|
|
+ break;
|
|
|
+ migrate_page_add(page, qp->pagelist, flags);
|
|
|
+ } else
|
|
|
+ break;
|
|
|
}
|
|
|
pte_unmap_unlock(pte - 1, ptl);
|
|
|
cond_resched();
|
|
|
- return 0;
|
|
|
+ return addr != end ? -EIO : 0;
|
|
|
}
|
|
|
|
|
|
static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
|
|
|
@@ -576,7 +597,12 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
|
|
|
unsigned long endvma = vma->vm_end;
|
|
|
unsigned long flags = qp->flags;
|
|
|
|
|
|
- if (!vma_migratable(vma))
|
|
|
+ /*
|
|
|
+ * Need check MPOL_MF_STRICT to return -EIO if possible
|
|
|
+ * regardless of vma_migratable
|
|
|
+ */
|
|
|
+ if (!vma_migratable(vma) &&
|
|
|
+ !(flags & MPOL_MF_STRICT))
|
|
|
return 1;
|
|
|
|
|
|
if (endvma > end)
|
|
|
@@ -603,7 +629,7 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
|
|
|
}
|
|
|
|
|
|
/* queue pages from current vma */
|
|
|
- if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
|
|
|
+ if (flags & MPOL_MF_VALID)
|
|
|
return 0;
|
|
|
return 1;
|
|
|
}
|