|
@@ -5663,10 +5663,15 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
|
|
}
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_NUMA_BALANCING
|
|
#ifdef CONFIG_NUMA_BALANCING
|
|
-/* Returns true if the destination node has incurred more faults */
|
|
|
|
|
|
+/*
|
|
|
|
+ * Returns true if the destination node is the preferred node.
|
|
|
|
+ * Needs to match fbq_classify_rq(): if there is a runnable task
|
|
|
|
+ * that is not on its preferred node, we should identify it.
|
|
|
|
+ */
|
|
static bool migrate_improves_locality(struct task_struct *p, struct lb_env *env)
|
|
static bool migrate_improves_locality(struct task_struct *p, struct lb_env *env)
|
|
{
|
|
{
|
|
struct numa_group *numa_group = rcu_dereference(p->numa_group);
|
|
struct numa_group *numa_group = rcu_dereference(p->numa_group);
|
|
|
|
+ unsigned long src_faults, dst_faults;
|
|
int src_nid, dst_nid;
|
|
int src_nid, dst_nid;
|
|
|
|
|
|
if (!sched_feat(NUMA_FAVOUR_HIGHER) || !p->numa_faults ||
|
|
if (!sched_feat(NUMA_FAVOUR_HIGHER) || !p->numa_faults ||
|
|
@@ -5680,29 +5685,30 @@ static bool migrate_improves_locality(struct task_struct *p, struct lb_env *env)
|
|
if (src_nid == dst_nid)
|
|
if (src_nid == dst_nid)
|
|
return false;
|
|
return false;
|
|
|
|
|
|
- if (numa_group) {
|
|
|
|
- /* Task is already in the group's interleave set. */
|
|
|
|
- if (node_isset(src_nid, numa_group->active_nodes))
|
|
|
|
- return false;
|
|
|
|
-
|
|
|
|
- /* Task is moving into the group's interleave set. */
|
|
|
|
- if (node_isset(dst_nid, numa_group->active_nodes))
|
|
|
|
- return true;
|
|
|
|
-
|
|
|
|
- return group_faults(p, dst_nid) > group_faults(p, src_nid);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
/* Encourage migration to the preferred node. */
|
|
/* Encourage migration to the preferred node. */
|
|
if (dst_nid == p->numa_preferred_nid)
|
|
if (dst_nid == p->numa_preferred_nid)
|
|
return true;
|
|
return true;
|
|
|
|
|
|
- return task_faults(p, dst_nid) > task_faults(p, src_nid);
|
|
|
|
|
|
+ /* Migrating away from the preferred node is bad. */
|
|
|
|
+ if (src_nid == p->numa_preferred_nid)
|
|
|
|
+ return false;
|
|
|
|
+
|
|
|
|
+ if (numa_group) {
|
|
|
|
+ src_faults = group_faults(p, src_nid);
|
|
|
|
+ dst_faults = group_faults(p, dst_nid);
|
|
|
|
+ } else {
|
|
|
|
+ src_faults = task_faults(p, src_nid);
|
|
|
|
+ dst_faults = task_faults(p, dst_nid);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return dst_faults > src_faults;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
|
|
static bool migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
|
|
{
|
|
{
|
|
struct numa_group *numa_group = rcu_dereference(p->numa_group);
|
|
struct numa_group *numa_group = rcu_dereference(p->numa_group);
|
|
|
|
+ unsigned long src_faults, dst_faults;
|
|
int src_nid, dst_nid;
|
|
int src_nid, dst_nid;
|
|
|
|
|
|
if (!sched_feat(NUMA) || !sched_feat(NUMA_RESIST_LOWER))
|
|
if (!sched_feat(NUMA) || !sched_feat(NUMA_RESIST_LOWER))
|
|
@@ -5717,23 +5723,23 @@ static bool migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
|
|
if (src_nid == dst_nid)
|
|
if (src_nid == dst_nid)
|
|
return false;
|
|
return false;
|
|
|
|
|
|
- if (numa_group) {
|
|
|
|
- /* Task is moving within/into the group's interleave set. */
|
|
|
|
- if (node_isset(dst_nid, numa_group->active_nodes))
|
|
|
|
- return false;
|
|
|
|
|
|
+ /* Migrating away from the preferred node is bad. */
|
|
|
|
+ if (src_nid == p->numa_preferred_nid)
|
|
|
|
+ return true;
|
|
|
|
|
|
- /* Task is moving out of the group's interleave set. */
|
|
|
|
- if (node_isset(src_nid, numa_group->active_nodes))
|
|
|
|
- return true;
|
|
|
|
|
|
+ /* Encourage migration to the preferred node. */
|
|
|
|
+ if (dst_nid == p->numa_preferred_nid)
|
|
|
|
+ return false;
|
|
|
|
|
|
- return group_faults(p, dst_nid) < group_faults(p, src_nid);
|
|
|
|
|
|
+ if (numa_group) {
|
|
|
|
+ src_faults = group_faults(p, src_nid);
|
|
|
|
+ dst_faults = group_faults(p, dst_nid);
|
|
|
|
+ } else {
|
|
|
|
+ src_faults = task_faults(p, src_nid);
|
|
|
|
+ dst_faults = task_faults(p, dst_nid);
|
|
}
|
|
}
|
|
|
|
|
|
- /* Migrating away from the preferred node is always bad. */
|
|
|
|
- if (src_nid == p->numa_preferred_nid)
|
|
|
|
- return true;
|
|
|
|
-
|
|
|
|
- return task_faults(p, dst_nid) < task_faults(p, src_nid);
|
|
|
|
|
|
+ return dst_faults < src_faults;
|
|
}
|
|
}
|
|
|
|
|
|
#else
|
|
#else
|