|
@@ -821,26 +821,34 @@ static void update_curr_fair(struct rq *rq)
|
|
|
update_curr(cfs_rq_of(&rq->curr->se));
|
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_SCHEDSTATS
|
|
|
static inline void
|
|
|
update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
|
{
|
|
|
- u64 wait_start = rq_clock(rq_of(cfs_rq));
|
|
|
+ u64 wait_start, prev_wait_start;
|
|
|
+
|
|
|
+ if (!schedstat_enabled())
|
|
|
+ return;
|
|
|
+
|
|
|
+ wait_start = rq_clock(rq_of(cfs_rq));
|
|
|
+ prev_wait_start = schedstat_val(se->statistics.wait_start);
|
|
|
|
|
|
if (entity_is_task(se) && task_on_rq_migrating(task_of(se)) &&
|
|
|
- likely(wait_start > se->statistics.wait_start))
|
|
|
- wait_start -= se->statistics.wait_start;
|
|
|
+ likely(wait_start > prev_wait_start))
|
|
|
+ wait_start -= prev_wait_start;
|
|
|
|
|
|
- se->statistics.wait_start = wait_start;
|
|
|
+ schedstat_set(se->statistics.wait_start, wait_start);
|
|
|
}
|
|
|
|
|
|
-static void
|
|
|
+static inline void
|
|
|
update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
|
{
|
|
|
struct task_struct *p;
|
|
|
u64 delta;
|
|
|
|
|
|
- delta = rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start;
|
|
|
+ if (!schedstat_enabled())
|
|
|
+ return;
|
|
|
+
|
|
|
+ delta = rq_clock(rq_of(cfs_rq)) - schedstat_val(se->statistics.wait_start);
|
|
|
|
|
|
if (entity_is_task(se)) {
|
|
|
p = task_of(se);
|
|
@@ -850,59 +858,67 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
|
* time stamp can be adjusted to accumulate wait time
|
|
|
* prior to migration.
|
|
|
*/
|
|
|
- se->statistics.wait_start = delta;
|
|
|
+ schedstat_set(se->statistics.wait_start, delta);
|
|
|
return;
|
|
|
}
|
|
|
trace_sched_stat_wait(p, delta);
|
|
|
}
|
|
|
|
|
|
- se->statistics.wait_max = max(se->statistics.wait_max, delta);
|
|
|
- se->statistics.wait_count++;
|
|
|
- se->statistics.wait_sum += delta;
|
|
|
- se->statistics.wait_start = 0;
|
|
|
+ schedstat_set(se->statistics.wait_max,
|
|
|
+ max(schedstat_val(se->statistics.wait_max), delta));
|
|
|
+ schedstat_inc(se->statistics.wait_count);
|
|
|
+ schedstat_add(se->statistics.wait_sum, delta);
|
|
|
+ schedstat_set(se->statistics.wait_start, 0);
|
|
|
}
|
|
|
|
|
|
-static void
|
|
|
+static inline void
|
|
|
update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
|
{
|
|
|
struct task_struct *tsk = NULL;
|
|
|
+ u64 sleep_start, block_start;
|
|
|
+
|
|
|
+ if (!schedstat_enabled())
|
|
|
+ return;
|
|
|
+
|
|
|
+ sleep_start = schedstat_val(se->statistics.sleep_start);
|
|
|
+ block_start = schedstat_val(se->statistics.block_start);
|
|
|
|
|
|
if (entity_is_task(se))
|
|
|
tsk = task_of(se);
|
|
|
|
|
|
- if (se->statistics.sleep_start) {
|
|
|
- u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.sleep_start;
|
|
|
+ if (sleep_start) {
|
|
|
+ u64 delta = rq_clock(rq_of(cfs_rq)) - sleep_start;
|
|
|
|
|
|
if ((s64)delta < 0)
|
|
|
delta = 0;
|
|
|
|
|
|
- if (unlikely(delta > se->statistics.sleep_max))
|
|
|
- se->statistics.sleep_max = delta;
|
|
|
+ if (unlikely(delta > schedstat_val(se->statistics.sleep_max)))
|
|
|
+ schedstat_set(se->statistics.sleep_max, delta);
|
|
|
|
|
|
- se->statistics.sleep_start = 0;
|
|
|
- se->statistics.sum_sleep_runtime += delta;
|
|
|
+ schedstat_set(se->statistics.sleep_start, 0);
|
|
|
+ schedstat_add(se->statistics.sum_sleep_runtime, delta);
|
|
|
|
|
|
if (tsk) {
|
|
|
account_scheduler_latency(tsk, delta >> 10, 1);
|
|
|
trace_sched_stat_sleep(tsk, delta);
|
|
|
}
|
|
|
}
|
|
|
- if (se->statistics.block_start) {
|
|
|
- u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.block_start;
|
|
|
+ if (block_start) {
|
|
|
+ u64 delta = rq_clock(rq_of(cfs_rq)) - block_start;
|
|
|
|
|
|
if ((s64)delta < 0)
|
|
|
delta = 0;
|
|
|
|
|
|
- if (unlikely(delta > se->statistics.block_max))
|
|
|
- se->statistics.block_max = delta;
|
|
|
+ if (unlikely(delta > schedstat_val(se->statistics.block_max)))
|
|
|
+ schedstat_set(se->statistics.block_max, delta);
|
|
|
|
|
|
- se->statistics.block_start = 0;
|
|
|
- se->statistics.sum_sleep_runtime += delta;
|
|
|
+ schedstat_set(se->statistics.block_start, 0);
|
|
|
+ schedstat_add(se->statistics.sum_sleep_runtime, delta);
|
|
|
|
|
|
if (tsk) {
|
|
|
if (tsk->in_iowait) {
|
|
|
- se->statistics.iowait_sum += delta;
|
|
|
- se->statistics.iowait_count++;
|
|
|
+ schedstat_add(se->statistics.iowait_sum, delta);
|
|
|
+ schedstat_inc(se->statistics.iowait_count);
|
|
|
trace_sched_stat_iowait(tsk, delta);
|
|
|
}
|
|
|
|
|
@@ -929,6 +945,9 @@ update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
|
static inline void
|
|
|
update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
|
|
{
|
|
|
+ if (!schedstat_enabled())
|
|
|
+ return;
|
|
|
+
|
|
|
/*
|
|
|
* Are we enqueueing a waiting task? (for current tasks
|
|
|
* a dequeue/enqueue event is a NOP)
|
|
@@ -943,6 +962,10 @@ update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
|
|
static inline void
|
|
|
update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
|
|
{
|
|
|
+
|
|
|
+ if (!schedstat_enabled())
|
|
|
+ return;
|
|
|
+
|
|
|
/*
|
|
|
* Mark the end of the wait period if dequeueing a
|
|
|
* waiting task:
|
|
@@ -950,45 +973,18 @@ update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
|
|
if (se != cfs_rq->curr)
|
|
|
update_stats_wait_end(cfs_rq, se);
|
|
|
|
|
|
- if (flags & DEQUEUE_SLEEP) {
|
|
|
- if (entity_is_task(se)) {
|
|
|
- struct task_struct *tsk = task_of(se);
|
|
|
+ if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) {
|
|
|
+ struct task_struct *tsk = task_of(se);
|
|
|
|
|
|
- if (tsk->state & TASK_INTERRUPTIBLE)
|
|
|
- se->statistics.sleep_start = rq_clock(rq_of(cfs_rq));
|
|
|
- if (tsk->state & TASK_UNINTERRUPTIBLE)
|
|
|
- se->statistics.block_start = rq_clock(rq_of(cfs_rq));
|
|
|
- }
|
|
|
+ if (tsk->state & TASK_INTERRUPTIBLE)
|
|
|
+ schedstat_set(se->statistics.sleep_start,
|
|
|
+ rq_clock(rq_of(cfs_rq)));
|
|
|
+ if (tsk->state & TASK_UNINTERRUPTIBLE)
|
|
|
+ schedstat_set(se->statistics.block_start,
|
|
|
+ rq_clock(rq_of(cfs_rq)));
|
|
|
}
|
|
|
-
|
|
|
-}
|
|
|
-#else
|
|
|
-static inline void
|
|
|
-update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
|
-{
|
|
|
-}
|
|
|
-
|
|
|
-static inline void
|
|
|
-update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
|
-{
|
|
|
-}
|
|
|
-
|
|
|
-static inline void
|
|
|
-update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
|
-{
|
|
|
}
|
|
|
|
|
|
-static inline void
|
|
|
-update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
|
|
-{
|
|
|
-}
|
|
|
-
|
|
|
-static inline void
|
|
|
-update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
|
|
-{
|
|
|
-}
|
|
|
-#endif
|
|
|
-
|
|
|
/*
|
|
|
* We are picking a new current task - update its stats:
|
|
|
*/
|
|
@@ -3396,10 +3392,8 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
|
|
place_entity(cfs_rq, se, 0);
|
|
|
|
|
|
check_schedstat_required();
|
|
|
- if (schedstat_enabled()) {
|
|
|
- update_stats_enqueue(cfs_rq, se, flags);
|
|
|
- check_spread(cfs_rq, se);
|
|
|
- }
|
|
|
+ update_stats_enqueue(cfs_rq, se, flags);
|
|
|
+ check_spread(cfs_rq, se);
|
|
|
if (!curr)
|
|
|
__enqueue_entity(cfs_rq, se);
|
|
|
se->on_rq = 1;
|
|
@@ -3466,8 +3460,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
|
|
update_curr(cfs_rq);
|
|
|
dequeue_entity_load_avg(cfs_rq, se);
|
|
|
|
|
|
- if (schedstat_enabled())
|
|
|
- update_stats_dequeue(cfs_rq, se, flags);
|
|
|
+ update_stats_dequeue(cfs_rq, se, flags);
|
|
|
|
|
|
clear_buddies(cfs_rq, se);
|
|
|
|
|
@@ -3541,25 +3534,25 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
|
* a CPU. So account for the time it spent waiting on the
|
|
|
* runqueue.
|
|
|
*/
|
|
|
- if (schedstat_enabled())
|
|
|
- update_stats_wait_end(cfs_rq, se);
|
|
|
+ update_stats_wait_end(cfs_rq, se);
|
|
|
__dequeue_entity(cfs_rq, se);
|
|
|
update_load_avg(se, 1);
|
|
|
}
|
|
|
|
|
|
update_stats_curr_start(cfs_rq, se);
|
|
|
cfs_rq->curr = se;
|
|
|
-#ifdef CONFIG_SCHEDSTATS
|
|
|
+
|
|
|
/*
|
|
|
* Track our maximum slice length, if the CPU's load is at
|
|
|
* least twice that of our own weight (i.e. dont track it
|
|
|
* when there are only lesser-weight tasks around):
|
|
|
*/
|
|
|
if (schedstat_enabled() && rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
|
|
|
- se->statistics.slice_max = max(se->statistics.slice_max,
|
|
|
- se->sum_exec_runtime - se->prev_sum_exec_runtime);
|
|
|
+ schedstat_set(se->statistics.slice_max,
|
|
|
+ max((u64)schedstat_val(se->statistics.slice_max),
|
|
|
+ se->sum_exec_runtime - se->prev_sum_exec_runtime));
|
|
|
}
|
|
|
-#endif
|
|
|
+
|
|
|
se->prev_sum_exec_runtime = se->sum_exec_runtime;
|
|
|
}
|
|
|
|
|
@@ -3638,13 +3631,10 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
|
|
|
/* throttle cfs_rqs exceeding runtime */
|
|
|
check_cfs_rq_runtime(cfs_rq);
|
|
|
|
|
|
- if (schedstat_enabled()) {
|
|
|
- check_spread(cfs_rq, prev);
|
|
|
- if (prev->on_rq)
|
|
|
- update_stats_wait_start(cfs_rq, prev);
|
|
|
- }
|
|
|
+ check_spread(cfs_rq, prev);
|
|
|
|
|
|
if (prev->on_rq) {
|
|
|
+ update_stats_wait_start(cfs_rq, prev);
|
|
|
/* Put 'current' back into the tree. */
|
|
|
__enqueue_entity(cfs_rq, prev);
|
|
|
/* in !on_rq case, update occurred at dequeue */
|