|
@@ -806,7 +806,7 @@ void post_init_entity_util_avg(struct sched_entity *se)
|
|
|
/*
|
|
|
* For !fair tasks do:
|
|
|
*
|
|
|
- update_cfs_rq_load_avg(now, cfs_rq, false);
|
|
|
+ update_cfs_rq_load_avg(now, cfs_rq);
|
|
|
attach_entity_load_avg(cfs_rq, se);
|
|
|
switched_from_fair(rq, p);
|
|
|
*
|
|
@@ -3320,7 +3320,6 @@ static inline void set_tg_cfs_propagate(struct cfs_rq *cfs_rq) {}
|
|
|
* update_cfs_rq_load_avg - update the cfs_rq's load/util averages
|
|
|
* @now: current time, as per cfs_rq_clock_task()
|
|
|
* @cfs_rq: cfs_rq to update
|
|
|
- * @update_freq: should we call cfs_rq_util_change() or will the call do so
|
|
|
*
|
|
|
* The cfs_rq avg is the direct sum of all its entities (blocked and runnable)
|
|
|
* avg. The immediate corollary is that all (fair) tasks must be attached, see
|
|
@@ -3334,7 +3333,7 @@ static inline void set_tg_cfs_propagate(struct cfs_rq *cfs_rq) {}
|
|
|
* call update_tg_load_avg() when this function returns true.
|
|
|
*/
|
|
|
static inline int
|
|
|
-update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
|
|
|
+update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
|
|
|
{
|
|
|
struct sched_avg *sa = &cfs_rq->avg;
|
|
|
int decayed, removed_load = 0, removed_util = 0;
|
|
@@ -3362,7 +3361,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
|
|
|
cfs_rq->load_last_update_time_copy = sa->last_update_time;
|
|
|
#endif
|
|
|
|
|
|
- if (update_freq && (decayed || removed_util))
|
|
|
+ if (decayed || removed_util)
|
|
|
cfs_rq_util_change(cfs_rq);
|
|
|
|
|
|
return decayed || removed_load;
|
|
@@ -3390,7 +3389,7 @@ static inline void update_load_avg(struct sched_entity *se, int flags)
|
|
|
if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD))
|
|
|
__update_load_avg_se(now, cpu, cfs_rq, se);
|
|
|
|
|
|
- decayed = update_cfs_rq_load_avg(now, cfs_rq, true);
|
|
|
+ decayed = update_cfs_rq_load_avg(now, cfs_rq);
|
|
|
decayed |= propagate_entity_load_avg(se);
|
|
|
|
|
|
if (decayed && (flags & UPDATE_TG))
|
|
@@ -3534,7 +3533,7 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf);
|
|
|
#else /* CONFIG_SMP */
|
|
|
|
|
|
static inline int
|
|
|
-update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
|
|
|
+update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
|
|
|
{
|
|
|
return 0;
|
|
|
}
|
|
@@ -6919,7 +6918,7 @@ static void update_blocked_averages(int cpu)
|
|
|
if (throttled_hierarchy(cfs_rq))
|
|
|
continue;
|
|
|
|
|
|
- if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true))
|
|
|
+ if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq))
|
|
|
update_tg_load_avg(cfs_rq, 0);
|
|
|
|
|
|
/* Propagate pending load changes to the parent, if any: */
|
|
@@ -6992,7 +6991,7 @@ static inline void update_blocked_averages(int cpu)
|
|
|
|
|
|
rq_lock_irqsave(rq, &rf);
|
|
|
update_rq_clock(rq);
|
|
|
- update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true);
|
|
|
+ update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
|
|
|
rq_unlock_irqrestore(rq, &rf);
|
|
|
}
|
|
|
|