|
@@ -862,11 +862,72 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
|
se->statistics.wait_start = 0;
|
|
|
}
|
|
|
|
|
|
+static void
|
|
|
+update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
|
+{
|
|
|
+ struct task_struct *tsk = NULL;
|
|
|
+
|
|
|
+ if (entity_is_task(se))
|
|
|
+ tsk = task_of(se);
|
|
|
+
|
|
|
+ if (se->statistics.sleep_start) {
|
|
|
+ u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.sleep_start;
|
|
|
+
|
|
|
+ if ((s64)delta < 0)
|
|
|
+ delta = 0;
|
|
|
+
|
|
|
+ if (unlikely(delta > se->statistics.sleep_max))
|
|
|
+ se->statistics.sleep_max = delta;
|
|
|
+
|
|
|
+ se->statistics.sleep_start = 0;
|
|
|
+ se->statistics.sum_sleep_runtime += delta;
|
|
|
+
|
|
|
+ if (tsk) {
|
|
|
+ account_scheduler_latency(tsk, delta >> 10, 1);
|
|
|
+ trace_sched_stat_sleep(tsk, delta);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ if (se->statistics.block_start) {
|
|
|
+ u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.block_start;
|
|
|
+
|
|
|
+ if ((s64)delta < 0)
|
|
|
+ delta = 0;
|
|
|
+
|
|
|
+ if (unlikely(delta > se->statistics.block_max))
|
|
|
+ se->statistics.block_max = delta;
|
|
|
+
|
|
|
+ se->statistics.block_start = 0;
|
|
|
+ se->statistics.sum_sleep_runtime += delta;
|
|
|
+
|
|
|
+ if (tsk) {
|
|
|
+ if (tsk->in_iowait) {
|
|
|
+ se->statistics.iowait_sum += delta;
|
|
|
+ se->statistics.iowait_count++;
|
|
|
+ trace_sched_stat_iowait(tsk, delta);
|
|
|
+ }
|
|
|
+
|
|
|
+ trace_sched_stat_blocked(tsk, delta);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Blocking time is in units of nanosecs, so shift by
|
|
|
+ * 20 to get a milliseconds-range estimation of the
|
|
|
+ * amount of time that the task spent sleeping:
|
|
|
+ */
|
|
|
+ if (unlikely(prof_on == SLEEP_PROFILING)) {
|
|
|
+ profile_hits(SLEEP_PROFILING,
|
|
|
+ (void *)get_wchan(tsk),
|
|
|
+ delta >> 20);
|
|
|
+ }
|
|
|
+ account_scheduler_latency(tsk, delta >> 10, 0);
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Task is being enqueued - update stats:
|
|
|
*/
|
|
|
static inline void
|
|
|
-update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
|
+update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
|
|
{
|
|
|
/*
|
|
|
* Are we enqueueing a waiting task? (for current tasks
|
|
@@ -874,6 +935,9 @@ update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
|
*/
|
|
|
if (se != cfs_rq->curr)
|
|
|
update_stats_wait_start(cfs_rq, se);
|
|
|
+
|
|
|
+ if (flags & ENQUEUE_WAKEUP)
|
|
|
+ update_stats_enqueue_sleeper(cfs_rq, se);
|
|
|
}
|
|
|
|
|
|
static inline void
|
|
@@ -910,7 +974,12 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
|
}
|
|
|
|
|
|
static inline void
|
|
|
-update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
|
+update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
|
+{
|
|
|
+}
|
|
|
+
|
|
|
+static inline void
|
|
|
+update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
|
|
{
|
|
|
}
|
|
|
|
|
@@ -3197,68 +3266,6 @@ static inline int idle_balance(struct rq *rq)
|
|
|
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
|
|
-static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
|
-{
|
|
|
-#ifdef CONFIG_SCHEDSTATS
|
|
|
- struct task_struct *tsk = NULL;
|
|
|
-
|
|
|
- if (entity_is_task(se))
|
|
|
- tsk = task_of(se);
|
|
|
-
|
|
|
- if (se->statistics.sleep_start) {
|
|
|
- u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.sleep_start;
|
|
|
-
|
|
|
- if ((s64)delta < 0)
|
|
|
- delta = 0;
|
|
|
-
|
|
|
- if (unlikely(delta > se->statistics.sleep_max))
|
|
|
- se->statistics.sleep_max = delta;
|
|
|
-
|
|
|
- se->statistics.sleep_start = 0;
|
|
|
- se->statistics.sum_sleep_runtime += delta;
|
|
|
-
|
|
|
- if (tsk) {
|
|
|
- account_scheduler_latency(tsk, delta >> 10, 1);
|
|
|
- trace_sched_stat_sleep(tsk, delta);
|
|
|
- }
|
|
|
- }
|
|
|
- if (se->statistics.block_start) {
|
|
|
- u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.block_start;
|
|
|
-
|
|
|
- if ((s64)delta < 0)
|
|
|
- delta = 0;
|
|
|
-
|
|
|
- if (unlikely(delta > se->statistics.block_max))
|
|
|
- se->statistics.block_max = delta;
|
|
|
-
|
|
|
- se->statistics.block_start = 0;
|
|
|
- se->statistics.sum_sleep_runtime += delta;
|
|
|
-
|
|
|
- if (tsk) {
|
|
|
- if (tsk->in_iowait) {
|
|
|
- se->statistics.iowait_sum += delta;
|
|
|
- se->statistics.iowait_count++;
|
|
|
- trace_sched_stat_iowait(tsk, delta);
|
|
|
- }
|
|
|
-
|
|
|
- trace_sched_stat_blocked(tsk, delta);
|
|
|
-
|
|
|
- /*
|
|
|
- * Blocking time is in units of nanosecs, so shift by
|
|
|
- * 20 to get a milliseconds-range estimation of the
|
|
|
- * amount of time that the task spent sleeping:
|
|
|
- */
|
|
|
- if (unlikely(prof_on == SLEEP_PROFILING)) {
|
|
|
- profile_hits(SLEEP_PROFILING,
|
|
|
- (void *)get_wchan(tsk),
|
|
|
- delta >> 20);
|
|
|
- }
|
|
|
- account_scheduler_latency(tsk, delta >> 10, 0);
|
|
|
- }
|
|
|
- }
|
|
|
-#endif
|
|
|
-}
|
|
|
-
|
|
|
static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
|
{
|
|
|
#ifdef CONFIG_SCHED_DEBUG
|
|
@@ -3385,15 +3392,12 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
|
|
account_entity_enqueue(cfs_rq, se);
|
|
|
update_cfs_shares(cfs_rq);
|
|
|
|
|
|
- if (flags & ENQUEUE_WAKEUP) {
|
|
|
+ if (flags & ENQUEUE_WAKEUP)
|
|
|
place_entity(cfs_rq, se, 0);
|
|
|
- if (schedstat_enabled())
|
|
|
- enqueue_sleeper(cfs_rq, se);
|
|
|
- }
|
|
|
|
|
|
check_schedstat_required();
|
|
|
if (schedstat_enabled()) {
|
|
|
- update_stats_enqueue(cfs_rq, se);
|
|
|
+ update_stats_enqueue(cfs_rq, se, flags);
|
|
|
check_spread(cfs_rq, se);
|
|
|
}
|
|
|
if (!curr)
|