Pārlūkot izejas kodu

sched/numa: Build per numa_group active node mask from numa_faults_cpu statistics

The numa_faults_cpu statistics are used to maintain an active_nodes nodemask
per numa_group. This allows us to be smarter about when to do numa migrations.

Signed-off-by: Rik van Riel <riel@redhat.com>
Acked-by: Mel Gorman <mgorman@suse.de>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Chegu Vinod <chegu_vinod@hp.com>
Link: http://lkml.kernel.org/r/1390860228-21539-5-git-send-email-riel@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Rik van Riel 12 gadi atpakaļ
vecāks
revīzija
20e07dea28
1 mainītis faili ar 42 papildinājumiem un 0 dzēšanām
  1. 42 0
      kernel/sched/fair.c

+ 42 - 0
kernel/sched/fair.c

@@ -885,6 +885,7 @@ struct numa_group {
 	struct list_head task_list;
 	struct list_head task_list;
 
 
 	struct rcu_head rcu;
 	struct rcu_head rcu;
+	nodemask_t active_nodes;
 	unsigned long total_faults;
 	unsigned long total_faults;
 	unsigned long *faults_cpu;
 	unsigned long *faults_cpu;
 	unsigned long faults[0];
 	unsigned long faults[0];
@@ -918,6 +919,12 @@ static inline unsigned long group_faults(struct task_struct *p, int nid)
 		p->numa_group->faults[task_faults_idx(nid, 1)];
 		p->numa_group->faults[task_faults_idx(nid, 1)];
 }
 }
 
 
+static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
+{
+	return group->faults_cpu[task_faults_idx(nid, 0)] +
+		group->faults_cpu[task_faults_idx(nid, 1)];
+}
+
 /*
 /*
  * These return the fraction of accesses done by a particular task, or
  * These return the fraction of accesses done by a particular task, or
  * task group, on a particular numa node.  The group weight is given a
  * task group, on a particular numa node.  The group weight is given a
@@ -1270,6 +1277,38 @@ static void numa_migrate_preferred(struct task_struct *p)
 	task_numa_migrate(p);
 	task_numa_migrate(p);
 }
 }
 
 
+/*
+ * Find the nodes on which the workload is actively running. We do this by
+ * tracking the nodes from which NUMA hinting faults are triggered. This can
+ * be different from the set of nodes where the workload's memory is currently
+ * located.
+ *
+ * The bitmask is used to make smarter decisions on when to do NUMA page
+ * migrations, To prevent flip-flopping, and excessive page migrations, nodes
+ * are added when they cause over 6/16 of the maximum number of faults, but
+ * only removed when they drop below 3/16.
+ */
+static void update_numa_active_node_mask(struct numa_group *numa_group)
+{
+	unsigned long faults, max_faults = 0;
+	int nid;
+
+	for_each_online_node(nid) {
+		faults = group_faults_cpu(numa_group, nid);
+		if (faults > max_faults)
+			max_faults = faults;
+	}
+
+	for_each_online_node(nid) {
+		faults = group_faults_cpu(numa_group, nid);
+		if (!node_isset(nid, numa_group->active_nodes)) {
+			if (faults > max_faults * 6 / 16)
+				node_set(nid, numa_group->active_nodes);
+		} else if (faults < max_faults * 3 / 16)
+			node_clear(nid, numa_group->active_nodes);
+	}
+}
+
 /*
 /*
  * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS
  * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS
  * increments. The more local the fault statistics are, the higher the scan
  * increments. The more local the fault statistics are, the higher the scan
@@ -1412,6 +1451,7 @@ static void task_numa_placement(struct task_struct *p)
 	update_task_scan_period(p, fault_types[0], fault_types[1]);
 	update_task_scan_period(p, fault_types[0], fault_types[1]);
 
 
 	if (p->numa_group) {
 	if (p->numa_group) {
+		update_numa_active_node_mask(p->numa_group);
 		/*
 		/*
 		 * If the preferred task and group nids are different,
 		 * If the preferred task and group nids are different,
 		 * iterate over the nodes again to find the best place.
 		 * iterate over the nodes again to find the best place.
@@ -1474,6 +1514,8 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags,
 		/* Second half of the array tracks nids where faults happen */
 		/* Second half of the array tracks nids where faults happen */
 		grp->faults_cpu = grp->faults + 2 * nr_node_ids;
 		grp->faults_cpu = grp->faults + 2 * nr_node_ids;
 
 
+		node_set(task_node(current), grp->active_nodes);
+
 		for (i = 0; i < 4*nr_node_ids; i++)
 		for (i = 0; i < 4*nr_node_ids; i++)
 			grp->faults[i] = p->numa_faults_memory[i];
 			grp->faults[i] = p->numa_faults_memory[i];