|
@@ -613,7 +613,7 @@ static inline int queue_pages_pgd_range(struct vm_area_struct *vma,
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
|
|
|
|
|
|
+#ifdef CONFIG_NUMA_BALANCING
|
|
/*
|
|
/*
|
|
* This is used to mark a range of virtual addresses to be inaccessible.
|
|
* This is used to mark a range of virtual addresses to be inaccessible.
|
|
* These are later cleared by a NUMA hinting fault. Depending on these
|
|
* These are later cleared by a NUMA hinting fault. Depending on these
|
|
@@ -627,7 +627,6 @@ unsigned long change_prot_numa(struct vm_area_struct *vma,
|
|
unsigned long addr, unsigned long end)
|
|
unsigned long addr, unsigned long end)
|
|
{
|
|
{
|
|
int nr_updated;
|
|
int nr_updated;
|
|
- BUILD_BUG_ON(_PAGE_NUMA != _PAGE_PROTNONE);
|
|
|
|
|
|
|
|
nr_updated = change_protection(vma, addr, end, vma->vm_page_prot, 0, 1);
|
|
nr_updated = change_protection(vma, addr, end, vma->vm_page_prot, 0, 1);
|
|
if (nr_updated)
|
|
if (nr_updated)
|
|
@@ -641,7 +640,7 @@ static unsigned long change_prot_numa(struct vm_area_struct *vma,
|
|
{
|
|
{
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
-#endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */
|
|
|
|
|
|
+#endif /* CONFIG_NUMA_BALANCING */
|
|
|
|
|
|
/*
|
|
/*
|
|
* Walk through page tables and collect pages to be migrated.
|
|
* Walk through page tables and collect pages to be migrated.
|