|
@@ -2712,6 +2712,20 @@ static inline void update_load_avg(struct sched_entity *se, int update_tg)
|
|
|
|
|
|
static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
|
{
|
|
|
+ /*
|
|
|
+ * If we got migrated (either between CPUs or between cgroups) we'll
|
|
|
+ * have aged the average right before clearing @last_update_time.
|
|
|
+ */
|
|
|
+ if (se->avg.last_update_time) {
|
|
|
+ __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)),
|
|
|
+ &se->avg, 0, 0, NULL);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * XXX: we could have just aged the entire load away if we've been
|
|
|
+ * absent from the fair class for too long.
|
|
|
+ */
|
|
|
+ }
|
|
|
+
|
|
|
se->avg.last_update_time = cfs_rq->avg.last_update_time;
|
|
|
cfs_rq->avg.load_avg += se->avg.load_avg;
|
|
|
cfs_rq->avg.load_sum += se->avg.load_sum;
|
|
@@ -7945,6 +7959,9 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p)
|
|
|
se->depth = se->parent ? se->parent->depth + 1 : 0;
|
|
|
#endif
|
|
|
|
|
|
+ /* Synchronize task with its cfs_rq */
|
|
|
+ attach_entity_load_avg(cfs_rq_of(&p->se), &p->se);
|
|
|
+
|
|
|
if (!task_on_rq_queued(p)) {
|
|
|
|
|
|
/*
|
|
@@ -8044,6 +8061,12 @@ static void task_move_group_fair(struct task_struct *p, int queued)
|
|
|
/* Synchronize task with its prev cfs_rq */
|
|
|
detach_entity_load_avg(cfs_rq, se);
|
|
|
set_task_rq(p, task_cpu(p));
|
|
|
+
|
|
|
+#ifdef CONFIG_SMP
|
|
|
+ /* Tell se's cfs_rq has been changed -- migrated */
|
|
|
+ p->se.avg.last_update_time = 0;
|
|
|
+#endif
|
|
|
+
|
|
|
se->depth = se->parent ? se->parent->depth + 1 : 0;
|
|
|
cfs_rq = cfs_rq_of(se);
|
|
|
if (!queued)
|