|
@@ -1738,6 +1738,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
|
|
|
struct task_struct *p = current;
|
|
|
bool migrated = flags & TNF_MIGRATED;
|
|
|
int cpu_node = task_node(current);
|
|
|
+ int local = !!(flags & TNF_FAULT_LOCAL);
|
|
|
int priv;
|
|
|
|
|
|
if (!numabalancing_enabled)
|
|
@@ -1786,6 +1787,17 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
|
|
|
task_numa_group(p, last_cpupid, flags, &priv);
|
|
|
}
|
|
|
|
|
|
+ /*
|
|
|
+ * If a workload spans multiple NUMA nodes, a shared fault that
|
|
|
+ * occurs wholly within the set of nodes that the workload is
|
|
|
+ * actively using should be counted as local. This allows the
|
|
|
+ * scan rate to slow down when a workload has settled down.
|
|
|
+ */
|
|
|
+ if (!priv && !local && p->numa_group &&
|
|
|
+ node_isset(cpu_node, p->numa_group->active_nodes) &&
|
|
|
+ node_isset(mem_node, p->numa_group->active_nodes))
|
|
|
+ local = 1;
|
|
|
+
|
|
|
task_numa_placement(p);
|
|
|
|
|
|
/*
|
|
@@ -1800,7 +1812,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
|
|
|
|
|
|
p->numa_faults_buffer_memory[task_faults_idx(mem_node, priv)] += pages;
|
|
|
p->numa_faults_buffer_cpu[task_faults_idx(cpu_node, priv)] += pages;
|
|
|
- p->numa_faults_locality[!!(flags & TNF_FAULT_LOCAL)] += pages;
|
|
|
+ p->numa_faults_locality[local] += pages;
|
|
|
}
|
|
|
|
|
|
static void reset_ptenuma_scan(struct task_struct *p)
|