|
@@ -800,7 +800,7 @@ static void update_curr(struct cfs_rq *cfs_rq)
|
|
|
max(delta_exec, curr->statistics.exec_max));
|
|
|
|
|
|
curr->sum_exec_runtime += delta_exec;
|
|
|
- schedstat_add(cfs_rq, exec_clock, delta_exec);
|
|
|
+ schedstat_add(cfs_rq->exec_clock, delta_exec);
|
|
|
|
|
|
curr->vruntime += calc_delta_fair(delta_exec, curr);
|
|
|
update_min_vruntime(cfs_rq);
|
|
@@ -3275,7 +3275,7 @@ static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
|
d = -d;
|
|
|
|
|
|
if (d > 3*sysctl_sched_latency)
|
|
|
- schedstat_inc(cfs_rq, nr_spread_over);
|
|
|
+ schedstat_inc(cfs_rq->nr_spread_over);
|
|
|
#endif
|
|
|
}
|
|
|
|
|
@@ -5164,13 +5164,13 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p,
|
|
|
|
|
|
balanced = this_eff_load <= prev_eff_load;
|
|
|
|
|
|
- schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
|
|
|
+ schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts);
|
|
|
|
|
|
if (!balanced)
|
|
|
return 0;
|
|
|
|
|
|
- schedstat_inc(sd, ttwu_move_affine);
|
|
|
- schedstat_inc(p, se.statistics.nr_wakeups_affine);
|
|
|
+ schedstat_inc(sd->ttwu_move_affine);
|
|
|
+ schedstat_inc(p->se.statistics.nr_wakeups_affine);
|
|
|
|
|
|
return 1;
|
|
|
}
|
|
@@ -6183,7 +6183,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
|
|
|
if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
|
|
|
int cpu;
|
|
|
|
|
|
- schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
|
|
|
+ schedstat_inc(p->se.statistics.nr_failed_migrations_affine);
|
|
|
|
|
|
env->flags |= LBF_SOME_PINNED;
|
|
|
|
|
@@ -6214,7 +6214,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
|
|
|
env->flags &= ~LBF_ALL_PINNED;
|
|
|
|
|
|
if (task_running(env->src_rq, p)) {
|
|
|
- schedstat_inc(p, se.statistics.nr_failed_migrations_running);
|
|
|
+ schedstat_inc(p->se.statistics.nr_failed_migrations_running);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -6231,13 +6231,13 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
|
|
|
if (tsk_cache_hot <= 0 ||
|
|
|
env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
|
|
|
if (tsk_cache_hot == 1) {
|
|
|
- schedstat_inc(env->sd, lb_hot_gained[env->idle]);
|
|
|
- schedstat_inc(p, se.statistics.nr_forced_migrations);
|
|
|
+ schedstat_inc(env->sd->lb_hot_gained[env->idle]);
|
|
|
+ schedstat_inc(p->se.statistics.nr_forced_migrations);
|
|
|
}
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
|
- schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
|
|
|
+ schedstat_inc(p->se.statistics.nr_failed_migrations_hot);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -6277,7 +6277,7 @@ static struct task_struct *detach_one_task(struct lb_env *env)
|
|
|
* so we can safely collect stats here rather than
|
|
|
* inside detach_tasks().
|
|
|
*/
|
|
|
- schedstat_inc(env->sd, lb_gained[env->idle]);
|
|
|
+ schedstat_inc(env->sd->lb_gained[env->idle]);
|
|
|
return p;
|
|
|
}
|
|
|
return NULL;
|
|
@@ -6369,7 +6369,7 @@ next:
|
|
|
* so we can safely collect detach_one_task() stats here rather
|
|
|
* than inside detach_one_task().
|
|
|
*/
|
|
|
- schedstat_add(env->sd, lb_gained[env->idle], detached);
|
|
|
+ schedstat_add(env->sd->lb_gained[env->idle], detached);
|
|
|
|
|
|
return detached;
|
|
|
}
|
|
@@ -7510,7 +7510,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
|
|
|
|
|
|
cpumask_copy(cpus, cpu_active_mask);
|
|
|
|
|
|
- schedstat_inc(sd, lb_count[idle]);
|
|
|
+ schedstat_inc(sd->lb_count[idle]);
|
|
|
|
|
|
redo:
|
|
|
if (!should_we_balance(&env)) {
|
|
@@ -7520,19 +7520,19 @@ redo:
|
|
|
|
|
|
group = find_busiest_group(&env);
|
|
|
if (!group) {
|
|
|
- schedstat_inc(sd, lb_nobusyg[idle]);
|
|
|
+ schedstat_inc(sd->lb_nobusyg[idle]);
|
|
|
goto out_balanced;
|
|
|
}
|
|
|
|
|
|
busiest = find_busiest_queue(&env, group);
|
|
|
if (!busiest) {
|
|
|
- schedstat_inc(sd, lb_nobusyq[idle]);
|
|
|
+ schedstat_inc(sd->lb_nobusyq[idle]);
|
|
|
goto out_balanced;
|
|
|
}
|
|
|
|
|
|
BUG_ON(busiest == env.dst_rq);
|
|
|
|
|
|
- schedstat_add(sd, lb_imbalance[idle], env.imbalance);
|
|
|
+ schedstat_add(sd->lb_imbalance[idle], env.imbalance);
|
|
|
|
|
|
env.src_cpu = busiest->cpu;
|
|
|
env.src_rq = busiest;
|
|
@@ -7639,7 +7639,7 @@ more_balance:
|
|
|
}
|
|
|
|
|
|
if (!ld_moved) {
|
|
|
- schedstat_inc(sd, lb_failed[idle]);
|
|
|
+ schedstat_inc(sd->lb_failed[idle]);
|
|
|
/*
|
|
|
* Increment the failure counter only on periodic balance.
|
|
|
* We do not want newidle balance, which can be very
|
|
@@ -7722,7 +7722,7 @@ out_all_pinned:
|
|
|
* we can't migrate them. Let the imbalance flag set so parent level
|
|
|
* can try to migrate them.
|
|
|
*/
|
|
|
- schedstat_inc(sd, lb_balanced[idle]);
|
|
|
+ schedstat_inc(sd->lb_balanced[idle]);
|
|
|
|
|
|
sd->nr_balance_failed = 0;
|
|
|
|
|
@@ -7915,15 +7915,15 @@ static int active_load_balance_cpu_stop(void *data)
|
|
|
.idle = CPU_IDLE,
|
|
|
};
|
|
|
|
|
|
- schedstat_inc(sd, alb_count);
|
|
|
+ schedstat_inc(sd->alb_count);
|
|
|
|
|
|
p = detach_one_task(&env);
|
|
|
if (p) {
|
|
|
- schedstat_inc(sd, alb_pushed);
|
|
|
+ schedstat_inc(sd->alb_pushed);
|
|
|
/* Active balancing done, reset the failure counter. */
|
|
|
sd->nr_balance_failed = 0;
|
|
|
} else {
|
|
|
- schedstat_inc(sd, alb_failed);
|
|
|
+ schedstat_inc(sd->alb_failed);
|
|
|
}
|
|
|
}
|
|
|
rcu_read_unlock();
|