|
@@ -2790,6 +2790,29 @@ static inline void update_cfs_shares(struct sched_entity *se)
|
|
|
}
|
|
|
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
|
|
|
|
|
+static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
|
|
|
+{
|
|
|
+ if (&this_rq()->cfs == cfs_rq) {
|
|
|
+ /*
|
|
|
+ * There are a few boundary cases this might miss but it should
|
|
|
+ * get called often enough that that should (hopefully) not be
|
|
|
+ * a real problem -- added to that it only calls on the local
|
|
|
+ * CPU, so if we enqueue remotely we'll miss an update, but
|
|
|
+ * the next tick/schedule should update.
|
|
|
+ *
|
|
|
+ * It will not get called when we go idle, because the idle
|
|
|
+ * thread is a different class (!fair), nor will the utilization
|
|
|
+ * number include things like RT tasks.
|
|
|
+ *
|
|
|
+ * As is, the util number is not freq-invariant (we'd have to
|
|
|
+ * implement arch_scale_freq_capacity() for that).
|
|
|
+ *
|
|
|
+ * See cpu_util().
|
|
|
+ */
|
|
|
+ cpufreq_update_util(rq_of(cfs_rq), 0);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
#ifdef CONFIG_SMP
|
|
|
/*
|
|
|
* Approximate:
|
|
@@ -3276,29 +3299,6 @@ static inline void set_tg_cfs_propagate(struct cfs_rq *cfs_rq) {}
|
|
|
|
|
|
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
|
|
|
|
|
-static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
|
|
|
-{
|
|
|
- if (&this_rq()->cfs == cfs_rq) {
|
|
|
- /*
|
|
|
- * There are a few boundary cases this might miss but it should
|
|
|
- * get called often enough that that should (hopefully) not be
|
|
|
- * a real problem -- added to that it only calls on the local
|
|
|
- * CPU, so if we enqueue remotely we'll miss an update, but
|
|
|
- * the next tick/schedule should update.
|
|
|
- *
|
|
|
- * It will not get called when we go idle, because the idle
|
|
|
- * thread is a different class (!fair), nor will the utilization
|
|
|
- * number include things like RT tasks.
|
|
|
- *
|
|
|
- * As is, the util number is not freq-invariant (we'd have to
|
|
|
- * implement arch_scale_freq_capacity() for that).
|
|
|
- *
|
|
|
- * See cpu_util().
|
|
|
- */
|
|
|
- cpufreq_update_util(rq_of(cfs_rq), 0);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* Unsigned subtract and clamp on underflow.
|
|
|
*
|
|
@@ -3544,7 +3544,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
|
|
|
|
|
|
static inline void update_load_avg(struct sched_entity *se, int not_used1)
|
|
|
{
|
|
|
- cpufreq_update_util(rq_of(cfs_rq_of(se)), 0);
|
|
|
+ cfs_rq_util_change(cfs_rq_of(se));
|
|
|
}
|
|
|
|
|
|
static inline void
|