|
@@ -6672,17 +6672,44 @@ out:
|
|
|
return ld_moved;
|
|
|
}
|
|
|
|
|
|
+static inline unsigned long
|
|
|
+get_sd_balance_interval(struct sched_domain *sd, int cpu_busy)
|
|
|
+{
|
|
|
+ unsigned long interval = sd->balance_interval;
|
|
|
+
|
|
|
+ if (cpu_busy)
|
|
|
+ interval *= sd->busy_factor;
|
|
|
+
|
|
|
+ /* scale ms to jiffies */
|
|
|
+ interval = msecs_to_jiffies(interval);
|
|
|
+ interval = clamp(interval, 1UL, max_load_balance_interval);
|
|
|
+
|
|
|
+ return interval;
|
|
|
+}
|
|
|
+
|
|
|
+static inline void
|
|
|
+update_next_balance(struct sched_domain *sd, int cpu_busy, unsigned long *next_balance)
|
|
|
+{
|
|
|
+ unsigned long interval, next;
|
|
|
+
|
|
|
+ interval = get_sd_balance_interval(sd, cpu_busy);
|
|
|
+ next = sd->last_balance + interval;
|
|
|
+
|
|
|
+ if (time_after(*next_balance, next))
|
|
|
+ *next_balance = next;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* idle_balance is called by schedule() if this_cpu is about to become
|
|
|
* idle. Attempts to pull tasks from other CPUs.
|
|
|
*/
|
|
|
static int idle_balance(struct rq *this_rq)
|
|
|
{
|
|
|
+ unsigned long next_balance = jiffies + HZ;
|
|
|
+ int this_cpu = this_rq->cpu;
|
|
|
struct sched_domain *sd;
|
|
|
int pulled_task = 0;
|
|
|
- unsigned long next_balance = jiffies + HZ;
|
|
|
u64 curr_cost = 0;
|
|
|
- int this_cpu = this_rq->cpu;
|
|
|
|
|
|
idle_enter_fair(this_rq);
|
|
|
|
|
@@ -6692,8 +6719,15 @@ static int idle_balance(struct rq *this_rq)
|
|
|
*/
|
|
|
this_rq->idle_stamp = rq_clock(this_rq);
|
|
|
|
|
|
- if (this_rq->avg_idle < sysctl_sched_migration_cost)
|
|
|
+ if (this_rq->avg_idle < sysctl_sched_migration_cost) {
|
|
|
+ rcu_read_lock();
|
|
|
+ sd = rcu_dereference_check_sched_domain(this_rq->sd);
|
|
|
+ if (sd)
|
|
|
+ update_next_balance(sd, 0, &next_balance);
|
|
|
+ rcu_read_unlock();
|
|
|
+
|
|
|
goto out;
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
|
* Drop the rq->lock, but keep IRQ/preempt disabled.
|
|
@@ -6703,15 +6737,16 @@ static int idle_balance(struct rq *this_rq)
|
|
|
update_blocked_averages(this_cpu);
|
|
|
rcu_read_lock();
|
|
|
for_each_domain(this_cpu, sd) {
|
|
|
- unsigned long interval;
|
|
|
int continue_balancing = 1;
|
|
|
u64 t0, domain_cost;
|
|
|
|
|
|
if (!(sd->flags & SD_LOAD_BALANCE))
|
|
|
continue;
|
|
|
|
|
|
- if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost)
|
|
|
+ if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) {
|
|
|
+ update_next_balance(sd, 0, &next_balance);
|
|
|
break;
|
|
|
+ }
|
|
|
|
|
|
if (sd->flags & SD_BALANCE_NEWIDLE) {
|
|
|
t0 = sched_clock_cpu(this_cpu);
|
|
@@ -6727,9 +6762,7 @@ static int idle_balance(struct rq *this_rq)
|
|
|
curr_cost += domain_cost;
|
|
|
}
|
|
|
|
|
|
- interval = msecs_to_jiffies(sd->balance_interval);
|
|
|
- if (time_after(next_balance, sd->last_balance + interval))
|
|
|
- next_balance = sd->last_balance + interval;
|
|
|
+ update_next_balance(sd, 0, &next_balance);
|
|
|
|
|
|
/*
|
|
|
* Stop searching for tasks to pull if there are
|
|
@@ -6753,15 +6786,11 @@ static int idle_balance(struct rq *this_rq)
|
|
|
if (this_rq->cfs.h_nr_running && !pulled_task)
|
|
|
pulled_task = 1;
|
|
|
|
|
|
- if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
|
|
|
- /*
|
|
|
- * We are going idle. next_balance may be set based on
|
|
|
- * a busy processor. So reset next_balance.
|
|
|
- */
|
|
|
+out:
|
|
|
+ /* Move the next balance forward */
|
|
|
+ if (time_after(this_rq->next_balance, next_balance))
|
|
|
this_rq->next_balance = next_balance;
|
|
|
- }
|
|
|
|
|
|
-out:
|
|
|
/* Is there a task of a high priority class? */
|
|
|
if (this_rq->nr_running != this_rq->cfs.h_nr_running)
|
|
|
pulled_task = -1;
|
|
@@ -7044,16 +7073,9 @@ static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
- interval = sd->balance_interval;
|
|
|
- if (idle != CPU_IDLE)
|
|
|
- interval *= sd->busy_factor;
|
|
|
-
|
|
|
- /* scale ms to jiffies */
|
|
|
- interval = msecs_to_jiffies(interval);
|
|
|
- interval = clamp(interval, 1UL, max_load_balance_interval);
|
|
|
+ interval = get_sd_balance_interval(sd, idle != CPU_IDLE);
|
|
|
|
|
|
need_serialize = sd->flags & SD_SERIALIZE;
|
|
|
-
|
|
|
if (need_serialize) {
|
|
|
if (!spin_trylock(&balancing))
|
|
|
goto out;
|
|
@@ -7069,6 +7091,7 @@ static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
|
|
|
idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
|
|
|
}
|
|
|
sd->last_balance = jiffies;
|
|
|
+ interval = get_sd_balance_interval(sd, idle != CPU_IDLE);
|
|
|
}
|
|
|
if (need_serialize)
|
|
|
spin_unlock(&balancing);
|