|
@@ -97,6 +97,16 @@ unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
|
|
|
|
|
|
const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
|
|
|
|
|
|
+#ifdef CONFIG_SMP
|
|
|
+/*
|
|
|
+ * For asym packing, by default the lower numbered cpu has higher priority.
|
|
|
+ */
|
|
|
+int __weak arch_asym_cpu_priority(int cpu)
|
|
|
+{
|
|
|
+ return -cpu;
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
#ifdef CONFIG_CFS_BANDWIDTH
|
|
|
/*
|
|
|
* Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
|
|
@@ -7388,16 +7398,18 @@ asym_packing:
|
|
|
if (env->idle == CPU_NOT_IDLE)
|
|
|
return true;
|
|
|
/*
|
|
|
- * ASYM_PACKING needs to move all the work to the lowest
|
|
|
- * numbered CPUs in the group, therefore mark all groups
|
|
|
- * higher than ourself as busy.
|
|
|
+ * ASYM_PACKING needs to move all the work to the highest
|
|
|
+ * prority CPUs in the group, therefore mark all groups
|
|
|
+ * of lower priority than ourself as busy.
|
|
|
*/
|
|
|
- if (sgs->sum_nr_running && env->dst_cpu < group_first_cpu(sg)) {
|
|
|
+ if (sgs->sum_nr_running &&
|
|
|
+ sched_asym_prefer(env->dst_cpu, sg->asym_prefer_cpu)) {
|
|
|
if (!sds->busiest)
|
|
|
return true;
|
|
|
|
|
|
- /* Prefer to move from highest possible cpu's work */
|
|
|
- if (group_first_cpu(sds->busiest) < group_first_cpu(sg))
|
|
|
+ /* Prefer to move from lowest priority cpu's work */
|
|
|
+ if (sched_asym_prefer(sds->busiest->asym_prefer_cpu,
|
|
|
+ sg->asym_prefer_cpu))
|
|
|
return true;
|
|
|
}
|
|
|
|
|
@@ -7549,8 +7561,8 @@ static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
|
|
|
if (!sds->busiest)
|
|
|
return 0;
|
|
|
|
|
|
- busiest_cpu = group_first_cpu(sds->busiest);
|
|
|
- if (env->dst_cpu > busiest_cpu)
|
|
|
+ busiest_cpu = sds->busiest->asym_prefer_cpu;
|
|
|
+ if (sched_asym_prefer(busiest_cpu, env->dst_cpu))
|
|
|
return 0;
|
|
|
|
|
|
env->imbalance = DIV_ROUND_CLOSEST(
|
|
@@ -7888,10 +7900,11 @@ static int need_active_balance(struct lb_env *env)
|
|
|
|
|
|
/*
|
|
|
* ASYM_PACKING needs to force migrate tasks from busy but
|
|
|
- * higher numbered CPUs in order to pack all tasks in the
|
|
|
- * lowest numbered CPUs.
|
|
|
+ * lower priority CPUs in order to pack all tasks in the
|
|
|
+ * highest priority CPUs.
|
|
|
*/
|
|
|
- if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu)
|
|
|
+ if ((sd->flags & SD_ASYM_PACKING) &&
|
|
|
+ sched_asym_prefer(env->dst_cpu, env->src_cpu))
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
@@ -8740,7 +8753,7 @@ static inline bool nohz_kick_needed(struct rq *rq)
|
|
|
unsigned long now = jiffies;
|
|
|
struct sched_domain_shared *sds;
|
|
|
struct sched_domain *sd;
|
|
|
- int nr_busy, cpu = rq->cpu;
|
|
|
+ int nr_busy, i, cpu = rq->cpu;
|
|
|
bool kick = false;
|
|
|
|
|
|
if (unlikely(rq->idle_balance))
|
|
@@ -8791,12 +8804,18 @@ static inline bool nohz_kick_needed(struct rq *rq)
|
|
|
}
|
|
|
|
|
|
sd = rcu_dereference(per_cpu(sd_asym, cpu));
|
|
|
- if (sd && (cpumask_first_and(nohz.idle_cpus_mask,
|
|
|
- sched_domain_span(sd)) < cpu)) {
|
|
|
- kick = true;
|
|
|
- goto unlock;
|
|
|
- }
|
|
|
+ if (sd) {
|
|
|
+ for_each_cpu(i, sched_domain_span(sd)) {
|
|
|
+ if (i == cpu ||
|
|
|
+ !cpumask_test_cpu(i, nohz.idle_cpus_mask))
|
|
|
+ continue;
|
|
|
|
|
|
+ if (sched_asym_prefer(i, cpu)) {
|
|
|
+ kick = true;
|
|
|
+ goto unlock;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
unlock:
|
|
|
rcu_read_unlock();
|
|
|
return kick;
|