|
@@ -2715,6 +2715,52 @@ static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * Called within set_task_rq() right before setting a task's cpu. The
|
|
|
|
+ * caller only guarantees p->pi_lock is held; no other assumptions,
|
|
|
|
+ * including the state of rq->lock, should be made.
|
|
|
|
+ */
|
|
|
|
+void set_task_rq_fair(struct sched_entity *se,
|
|
|
|
+ struct cfs_rq *prev, struct cfs_rq *next)
|
|
|
|
+{
|
|
|
|
+ if (!sched_feat(ATTACH_AGE_LOAD))
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * We are supposed to update the task to "current" time, then its up to
|
|
|
|
+ * date and ready to go to new CPU/cfs_rq. But we have difficulty in
|
|
|
|
+ * getting what current time is, so simply throw away the out-of-date
|
|
|
|
+ * time. This will result in the wakee task is less decayed, but giving
|
|
|
|
+ * the wakee more load sounds not bad.
|
|
|
|
+ */
|
|
|
|
+ if (se->avg.last_update_time && prev) {
|
|
|
|
+ u64 p_last_update_time;
|
|
|
|
+ u64 n_last_update_time;
|
|
|
|
+
|
|
|
|
+#ifndef CONFIG_64BIT
|
|
|
|
+ u64 p_last_update_time_copy;
|
|
|
|
+ u64 n_last_update_time_copy;
|
|
|
|
+
|
|
|
|
+ do {
|
|
|
|
+ p_last_update_time_copy = prev->load_last_update_time_copy;
|
|
|
|
+ n_last_update_time_copy = next->load_last_update_time_copy;
|
|
|
|
+
|
|
|
|
+ smp_rmb();
|
|
|
|
+
|
|
|
|
+ p_last_update_time = prev->avg.last_update_time;
|
|
|
|
+ n_last_update_time = next->avg.last_update_time;
|
|
|
|
+
|
|
|
|
+ } while (p_last_update_time != p_last_update_time_copy ||
|
|
|
|
+ n_last_update_time != n_last_update_time_copy);
|
|
|
|
+#else
|
|
|
|
+ p_last_update_time = prev->avg.last_update_time;
|
|
|
|
+ n_last_update_time = next->avg.last_update_time;
|
|
|
|
+#endif
|
|
|
|
+ __update_load_avg(p_last_update_time, cpu_of(rq_of(prev)),
|
|
|
|
+ &se->avg, 0, 0, NULL);
|
|
|
|
+ se->avg.last_update_time = n_last_update_time;
|
|
|
|
+ }
|
|
|
|
+}
|
|
#else /* CONFIG_FAIR_GROUP_SCHED */
|
|
#else /* CONFIG_FAIR_GROUP_SCHED */
|
|
static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {}
|
|
static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {}
|
|
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
|
#endif /* CONFIG_FAIR_GROUP_SCHED */
|