|
@@ -1211,7 +1211,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
|
|
|
return ERR_PTR(-EFAULT);
|
|
|
|
|
|
/* Full NUMA hinting faults to serialise migration in fault paths */
|
|
|
- if ((flags & FOLL_NUMA) && pmd_numa(*pmd))
|
|
|
+ if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
|
|
|
goto out;
|
|
|
|
|
|
page = pmd_page(*pmd);
|
|
@@ -1342,7 +1342,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
|
|
|
/*
|
|
|
* Migrate the THP to the requested node, returns with page unlocked
|
|
|
- * and pmd_numa cleared.
|
|
|
+ * and access rights restored.
|
|
|
*/
|
|
|
spin_unlock(ptl);
|
|
|
migrated = migrate_misplaced_transhuge_page(mm, vma,
|
|
@@ -1357,7 +1357,7 @@ clear_pmdnuma:
|
|
|
BUG_ON(!PageLocked(page));
|
|
|
pmd = pmd_mknonnuma(pmd);
|
|
|
set_pmd_at(mm, haddr, pmdp, pmd);
|
|
|
- VM_BUG_ON(pmd_numa(*pmdp));
|
|
|
+ VM_BUG_ON(pmd_protnone(*pmdp));
|
|
|
update_mmu_cache_pmd(vma, addr, pmdp);
|
|
|
unlock_page(page);
|
|
|
out_unlock:
|
|
@@ -1483,7 +1483,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
|
|
ret = 1;
|
|
|
if (!prot_numa) {
|
|
|
entry = pmdp_get_and_clear_notify(mm, addr, pmd);
|
|
|
- if (pmd_numa(entry))
|
|
|
+ if (pmd_protnone(entry))
|
|
|
entry = pmd_mknonnuma(entry);
|
|
|
entry = pmd_modify(entry, newprot);
|
|
|
ret = HPAGE_PMD_NR;
|
|
@@ -1499,7 +1499,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
|
|
* local vs remote hits on the zero page.
|
|
|
*/
|
|
|
if (!is_huge_zero_page(page) &&
|
|
|
- !pmd_numa(*pmd)) {
|
|
|
+ !pmd_protnone(*pmd)) {
|
|
|
pmdp_set_numa(mm, addr, pmd);
|
|
|
ret = HPAGE_PMD_NR;
|
|
|
}
|
|
@@ -1767,9 +1767,9 @@ static int __split_huge_page_map(struct page *page,
|
|
|
pte_t *pte, entry;
|
|
|
BUG_ON(PageCompound(page+i));
|
|
|
/*
|
|
|
- * Note that pmd_numa is not transferred deliberately
|
|
|
- * to avoid any possibility that pte_numa leaks to
|
|
|
- * a PROT_NONE VMA by accident.
|
|
|
+ * Note that NUMA hinting access restrictions are not
|
|
|
+ * transferred to avoid any possibility of altering
|
|
|
+ * permissions across VMAs.
|
|
|
*/
|
|
|
entry = mk_pte(page + i, vma->vm_page_prot);
|
|
|
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
|