|
@@ -738,12 +738,56 @@ static void update_curr_fair(struct rq *rq)
|
|
|
update_curr(cfs_rq_of(&rq->curr->se));
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_SCHEDSTATS
|
|
|
static inline void
|
|
|
update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
|
{
|
|
|
- schedstat_set(se->statistics.wait_start, rq_clock(rq_of(cfs_rq)));
|
|
|
+ u64 wait_start = rq_clock(rq_of(cfs_rq));
|
|
|
+
|
|
|
+ if (entity_is_task(se) && task_on_rq_migrating(task_of(se)) &&
|
|
|
+ likely(wait_start > se->statistics.wait_start))
|
|
|
+ wait_start -= se->statistics.wait_start;
|
|
|
+
|
|
|
+ se->statistics.wait_start = wait_start;
|
|
|
}
|
|
|
|
|
|
+static void
|
|
|
+update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
|
+{
|
|
|
+ struct task_struct *p;
|
|
|
+ u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start;
|
|
|
+
|
|
|
+ if (entity_is_task(se)) {
|
|
|
+ p = task_of(se);
|
|
|
+ if (task_on_rq_migrating(p)) {
|
|
|
+ /*
|
|
|
+ * Preserve migrating task's wait time so wait_start
|
|
|
+ * time stamp can be adjusted to accumulate wait time
|
|
|
+ * prior to migration.
|
|
|
+ */
|
|
|
+ se->statistics.wait_start = delta;
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ trace_sched_stat_wait(p, delta);
|
|
|
+ }
|
|
|
+
|
|
|
+ se->statistics.wait_max = max(se->statistics.wait_max, delta);
|
|
|
+ se->statistics.wait_count++;
|
|
|
+ se->statistics.wait_sum += delta;
|
|
|
+ se->statistics.wait_start = 0;
|
|
|
+}
|
|
|
+#else
|
|
|
+static inline void
|
|
|
+update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
|
+{
|
|
|
+}
|
|
|
+
|
|
|
+static inline void
|
|
|
+update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
|
+{
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
/*
|
|
|
* Task is being enqueued - update stats:
|
|
|
*/
|
|
@@ -757,23 +801,6 @@ static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
|
update_stats_wait_start(cfs_rq, se);
|
|
|
}
|
|
|
|
|
|
-static void
|
|
|
-update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
|
-{
|
|
|
- schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
|
|
|
- rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start));
|
|
|
- schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
|
|
|
- schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
|
|
|
- rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
|
|
|
-#ifdef CONFIG_SCHEDSTATS
|
|
|
- if (entity_is_task(se)) {
|
|
|
- trace_sched_stat_wait(task_of(se),
|
|
|
- rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
|
|
|
- }
|
|
|
-#endif
|
|
|
- schedstat_set(se->statistics.wait_start, 0);
|
|
|
-}
|
|
|
-
|
|
|
static inline void
|
|
|
update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
|
{
|
|
@@ -5745,8 +5772,8 @@ static void detach_task(struct task_struct *p, struct lb_env *env)
|
|
|
{
|
|
|
lockdep_assert_held(&env->src_rq->lock);
|
|
|
|
|
|
- deactivate_task(env->src_rq, p, 0);
|
|
|
p->on_rq = TASK_ON_RQ_MIGRATING;
|
|
|
+ deactivate_task(env->src_rq, p, 0);
|
|
|
set_task_cpu(p, env->dst_cpu);
|
|
|
}
|
|
|
|
|
@@ -5879,8 +5906,8 @@ static void attach_task(struct rq *rq, struct task_struct *p)
|
|
|
lockdep_assert_held(&rq->lock);
|
|
|
|
|
|
BUG_ON(task_rq(p) != rq);
|
|
|
- p->on_rq = TASK_ON_RQ_QUEUED;
|
|
|
activate_task(rq, p, 0);
|
|
|
+ p->on_rq = TASK_ON_RQ_QUEUED;
|
|
|
check_preempt_curr(rq, p, 0);
|
|
|
}
|
|
|
|