|
@@ -69,11 +69,17 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
|
|
|
pte_t *pte, oldpte;
|
|
|
spinlock_t *ptl;
|
|
|
unsigned long pages = 0;
|
|
|
+ int target_node = NUMA_NO_NODE;
|
|
|
|
|
|
pte = lock_pte_protection(vma, pmd, addr, prot_numa, &ptl);
|
|
|
if (!pte)
|
|
|
return 0;
|
|
|
|
|
|
+ /* Get target node for single threaded private VMAs */
|
|
|
+ if (prot_numa && !(vma->vm_flags & VM_SHARED) &&
|
|
|
+ atomic_read(&vma->vm_mm->mm_users) == 1)
|
|
|
+ target_node = numa_node_id();
|
|
|
+
|
|
|
arch_enter_lazy_mmu_mode();
|
|
|
do {
|
|
|
oldpte = *pte;
|
|
@@ -95,6 +101,13 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
|
|
|
/* Avoid TLB flush if possible */
|
|
|
if (pte_protnone(oldpte))
|
|
|
continue;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Don't mess with PTEs if page is already on the node
|
|
|
+ * a single-threaded process is running on.
|
|
|
+ */
|
|
|
+ if (target_node == page_to_nid(page))
|
|
|
+ continue;
|
|
|
}
|
|
|
|
|
|
ptent = ptep_modify_prot_start(mm, addr, pte);
|