|
@@ -1378,7 +1378,7 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
|
|
|
group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4;
|
|
group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
-static unsigned long weighted_cpuload(const int cpu);
|
|
|
|
|
|
|
+static unsigned long weighted_cpuload(struct rq *rq);
|
|
|
static unsigned long source_load(int cpu, int type);
|
|
static unsigned long source_load(int cpu, int type);
|
|
|
static unsigned long target_load(int cpu, int type);
|
|
static unsigned long target_load(int cpu, int type);
|
|
|
static unsigned long capacity_of(int cpu);
|
|
static unsigned long capacity_of(int cpu);
|
|
@@ -1409,7 +1409,7 @@ static void update_numa_stats(struct numa_stats *ns, int nid)
|
|
|
struct rq *rq = cpu_rq(cpu);
|
|
struct rq *rq = cpu_rq(cpu);
|
|
|
|
|
|
|
|
ns->nr_running += rq->nr_running;
|
|
ns->nr_running += rq->nr_running;
|
|
|
- ns->load += weighted_cpuload(cpu);
|
|
|
|
|
|
|
+ ns->load += weighted_cpuload(rq);
|
|
|
ns->compute_capacity += capacity_of(cpu);
|
|
ns->compute_capacity += capacity_of(cpu);
|
|
|
|
|
|
|
|
cpus++;
|
|
cpus++;
|
|
@@ -5125,9 +5125,9 @@ static void cpu_load_update(struct rq *this_rq, unsigned long this_load,
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
/* Used instead of source_load when we know the type == 0 */
|
|
/* Used instead of source_load when we know the type == 0 */
|
|
|
-static unsigned long weighted_cpuload(const int cpu)
|
|
|
|
|
|
|
+static unsigned long weighted_cpuload(struct rq *rq)
|
|
|
{
|
|
{
|
|
|
- return cfs_rq_runnable_load_avg(&cpu_rq(cpu)->cfs);
|
|
|
|
|
|
|
+ return cfs_rq_runnable_load_avg(&rq->cfs);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_NO_HZ_COMMON
|
|
#ifdef CONFIG_NO_HZ_COMMON
|
|
@@ -5172,7 +5172,7 @@ static void cpu_load_update_idle(struct rq *this_rq)
|
|
|
/*
|
|
/*
|
|
|
* bail if there's load or we're actually up-to-date.
|
|
* bail if there's load or we're actually up-to-date.
|
|
|
*/
|
|
*/
|
|
|
- if (weighted_cpuload(cpu_of(this_rq)))
|
|
|
|
|
|
|
+ if (weighted_cpuload(this_rq))
|
|
|
return;
|
|
return;
|
|
|
|
|
|
|
|
cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), 0);
|
|
cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), 0);
|
|
@@ -5193,7 +5193,7 @@ void cpu_load_update_nohz_start(void)
|
|
|
* concurrently we'll exit nohz. And cpu_load write can race with
|
|
* concurrently we'll exit nohz. And cpu_load write can race with
|
|
|
* cpu_load_update_idle() but both updater would be writing the same.
|
|
* cpu_load_update_idle() but both updater would be writing the same.
|
|
|
*/
|
|
*/
|
|
|
- this_rq->cpu_load[0] = weighted_cpuload(cpu_of(this_rq));
|
|
|
|
|
|
|
+ this_rq->cpu_load[0] = weighted_cpuload(this_rq);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -5209,7 +5209,7 @@ void cpu_load_update_nohz_stop(void)
|
|
|
if (curr_jiffies == this_rq->last_load_update_tick)
|
|
if (curr_jiffies == this_rq->last_load_update_tick)
|
|
|
return;
|
|
return;
|
|
|
|
|
|
|
|
- load = weighted_cpuload(cpu_of(this_rq));
|
|
|
|
|
|
|
+ load = weighted_cpuload(this_rq);
|
|
|
rq_lock(this_rq, &rf);
|
|
rq_lock(this_rq, &rf);
|
|
|
update_rq_clock(this_rq);
|
|
update_rq_clock(this_rq);
|
|
|
cpu_load_update_nohz(this_rq, curr_jiffies, load);
|
|
cpu_load_update_nohz(this_rq, curr_jiffies, load);
|
|
@@ -5235,7 +5235,7 @@ static void cpu_load_update_periodic(struct rq *this_rq, unsigned long load)
|
|
|
*/
|
|
*/
|
|
|
void cpu_load_update_active(struct rq *this_rq)
|
|
void cpu_load_update_active(struct rq *this_rq)
|
|
|
{
|
|
{
|
|
|
- unsigned long load = weighted_cpuload(cpu_of(this_rq));
|
|
|
|
|
|
|
+ unsigned long load = weighted_cpuload(this_rq);
|
|
|
|
|
|
|
|
if (tick_nohz_tick_stopped())
|
|
if (tick_nohz_tick_stopped())
|
|
|
cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), load);
|
|
cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), load);
|
|
@@ -5253,7 +5253,7 @@ void cpu_load_update_active(struct rq *this_rq)
|
|
|
static unsigned long source_load(int cpu, int type)
|
|
static unsigned long source_load(int cpu, int type)
|
|
|
{
|
|
{
|
|
|
struct rq *rq = cpu_rq(cpu);
|
|
struct rq *rq = cpu_rq(cpu);
|
|
|
- unsigned long total = weighted_cpuload(cpu);
|
|
|
|
|
|
|
+ unsigned long total = weighted_cpuload(rq);
|
|
|
|
|
|
|
|
if (type == 0 || !sched_feat(LB_BIAS))
|
|
if (type == 0 || !sched_feat(LB_BIAS))
|
|
|
return total;
|
|
return total;
|
|
@@ -5268,7 +5268,7 @@ static unsigned long source_load(int cpu, int type)
|
|
|
static unsigned long target_load(int cpu, int type)
|
|
static unsigned long target_load(int cpu, int type)
|
|
|
{
|
|
{
|
|
|
struct rq *rq = cpu_rq(cpu);
|
|
struct rq *rq = cpu_rq(cpu);
|
|
|
- unsigned long total = weighted_cpuload(cpu);
|
|
|
|
|
|
|
+ unsigned long total = weighted_cpuload(rq);
|
|
|
|
|
|
|
|
if (type == 0 || !sched_feat(LB_BIAS))
|
|
if (type == 0 || !sched_feat(LB_BIAS))
|
|
|
return total;
|
|
return total;
|
|
@@ -5290,7 +5290,7 @@ static unsigned long cpu_avg_load_per_task(int cpu)
|
|
|
{
|
|
{
|
|
|
struct rq *rq = cpu_rq(cpu);
|
|
struct rq *rq = cpu_rq(cpu);
|
|
|
unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running);
|
|
unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running);
|
|
|
- unsigned long load_avg = weighted_cpuload(cpu);
|
|
|
|
|
|
|
+ unsigned long load_avg = weighted_cpuload(rq);
|
|
|
|
|
|
|
|
if (nr_running)
|
|
if (nr_running)
|
|
|
return load_avg / nr_running;
|
|
return load_avg / nr_running;
|
|
@@ -5550,7 +5550,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
|
|
|
shallowest_idle_cpu = i;
|
|
shallowest_idle_cpu = i;
|
|
|
}
|
|
}
|
|
|
} else if (shallowest_idle_cpu == -1) {
|
|
} else if (shallowest_idle_cpu == -1) {
|
|
|
- load = weighted_cpuload(i);
|
|
|
|
|
|
|
+ load = weighted_cpuload(cpu_rq(i));
|
|
|
if (load < min_load || (load == min_load && i == this_cpu)) {
|
|
if (load < min_load || (load == min_load && i == this_cpu)) {
|
|
|
min_load = load;
|
|
min_load = load;
|
|
|
least_loaded_cpu = i;
|
|
least_loaded_cpu = i;
|
|
@@ -7363,7 +7363,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
|
|
|
sgs->nr_numa_running += rq->nr_numa_running;
|
|
sgs->nr_numa_running += rq->nr_numa_running;
|
|
|
sgs->nr_preferred_running += rq->nr_preferred_running;
|
|
sgs->nr_preferred_running += rq->nr_preferred_running;
|
|
|
#endif
|
|
#endif
|
|
|
- sgs->sum_weighted_load += weighted_cpuload(i);
|
|
|
|
|
|
|
+ sgs->sum_weighted_load += weighted_cpuload(rq);
|
|
|
/*
|
|
/*
|
|
|
* No need to call idle_cpu() if nr_running is not 0
|
|
* No need to call idle_cpu() if nr_running is not 0
|
|
|
*/
|
|
*/
|
|
@@ -7892,7 +7892,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
|
|
|
|
|
|
|
|
capacity = capacity_of(i);
|
|
capacity = capacity_of(i);
|
|
|
|
|
|
|
|
- wl = weighted_cpuload(i);
|
|
|
|
|
|
|
+ wl = weighted_cpuload(rq);
|
|
|
|
|
|
|
|
/*
|
|
/*
|
|
|
* When comparing with imbalance, use weighted_cpuload()
|
|
* When comparing with imbalance, use weighted_cpuload()
|