|
@@ -772,7 +772,7 @@ void post_init_entity_util_avg(struct sched_entity *se)
|
|
* For !fair tasks do:
|
|
* For !fair tasks do:
|
|
*
|
|
*
|
|
update_cfs_rq_load_avg(now, cfs_rq);
|
|
update_cfs_rq_load_avg(now, cfs_rq);
|
|
- attach_entity_load_avg(cfs_rq, se);
|
|
|
|
|
|
+ attach_entity_load_avg(cfs_rq, se, 0);
|
|
switched_from_fair(rq, p);
|
|
switched_from_fair(rq, p);
|
|
*
|
|
*
|
|
* such that the next switched_to_fair() has the
|
|
* such that the next switched_to_fair() has the
|
|
@@ -3009,11 +3009,11 @@ static inline void update_cfs_group(struct sched_entity *se)
|
|
}
|
|
}
|
|
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
|
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
|
|
|
|
|
-static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
|
|
|
|
|
|
+static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
|
|
{
|
|
{
|
|
struct rq *rq = rq_of(cfs_rq);
|
|
struct rq *rq = rq_of(cfs_rq);
|
|
|
|
|
|
- if (&rq->cfs == cfs_rq) {
|
|
|
|
|
|
+ if (&rq->cfs == cfs_rq || (flags & SCHED_CPUFREQ_MIGRATION)) {
|
|
/*
|
|
/*
|
|
* There are a few boundary cases this might miss but it should
|
|
* There are a few boundary cases this might miss but it should
|
|
* get called often enough that that should (hopefully) not be
|
|
* get called often enough that that should (hopefully) not be
|
|
@@ -3028,7 +3028,7 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
|
|
*
|
|
*
|
|
* See cpu_util().
|
|
* See cpu_util().
|
|
*/
|
|
*/
|
|
- cpufreq_update_util(rq, 0);
|
|
|
|
|
|
+ cpufreq_update_util(rq, flags);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -3686,7 +3686,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
|
|
#endif
|
|
#endif
|
|
|
|
|
|
if (decayed)
|
|
if (decayed)
|
|
- cfs_rq_util_change(cfs_rq);
|
|
|
|
|
|
+ cfs_rq_util_change(cfs_rq, 0);
|
|
|
|
|
|
return decayed;
|
|
return decayed;
|
|
}
|
|
}
|
|
@@ -3699,7 +3699,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
|
|
* Must call update_cfs_rq_load_avg() before this, since we rely on
|
|
* Must call update_cfs_rq_load_avg() before this, since we rely on
|
|
* cfs_rq->avg.last_update_time being current.
|
|
* cfs_rq->avg.last_update_time being current.
|
|
*/
|
|
*/
|
|
-static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
|
|
|
|
+static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
|
{
|
|
{
|
|
u32 divider = LOAD_AVG_MAX - 1024 + cfs_rq->avg.period_contrib;
|
|
u32 divider = LOAD_AVG_MAX - 1024 + cfs_rq->avg.period_contrib;
|
|
|
|
|
|
@@ -3735,7 +3735,7 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
|
|
|
|
|
|
add_tg_cfs_propagate(cfs_rq, se->avg.load_sum);
|
|
add_tg_cfs_propagate(cfs_rq, se->avg.load_sum);
|
|
|
|
|
|
- cfs_rq_util_change(cfs_rq);
|
|
|
|
|
|
+ cfs_rq_util_change(cfs_rq, flags);
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
@@ -3754,7 +3754,7 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
|
|
|
|
|
|
add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
|
|
add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
|
|
|
|
|
|
- cfs_rq_util_change(cfs_rq);
|
|
|
|
|
|
+ cfs_rq_util_change(cfs_rq, 0);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -3784,7 +3784,14 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
|
|
|
|
|
|
if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
|
|
if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
|
|
|
|
|
|
- attach_entity_load_avg(cfs_rq, se);
|
|
|
|
|
|
+ /*
|
|
|
|
+ * DO_ATTACH means we're here from enqueue_entity().
|
|
|
|
+ * !last_update_time means we've passed through
|
|
|
|
+ * migrate_task_rq_fair() indicating we migrated.
|
|
|
|
+ *
|
|
|
|
+ * IOW we're enqueueing a task on a new CPU.
|
|
|
|
+ */
|
|
|
|
+ attach_entity_load_avg(cfs_rq, se, SCHED_CPUFREQ_MIGRATION);
|
|
update_tg_load_avg(cfs_rq, 0);
|
|
update_tg_load_avg(cfs_rq, 0);
|
|
|
|
|
|
} else if (decayed && (flags & UPDATE_TG))
|
|
} else if (decayed && (flags & UPDATE_TG))
|
|
@@ -3880,13 +3887,13 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
|
|
|
|
|
|
static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1)
|
|
static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1)
|
|
{
|
|
{
|
|
- cfs_rq_util_change(cfs_rq);
|
|
|
|
|
|
+ cfs_rq_util_change(cfs_rq, 0);
|
|
}
|
|
}
|
|
|
|
|
|
static inline void remove_entity_load_avg(struct sched_entity *se) {}
|
|
static inline void remove_entity_load_avg(struct sched_entity *se) {}
|
|
|
|
|
|
static inline void
|
|
static inline void
|
|
-attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
|
|
|
|
|
|
+attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) {}
|
|
static inline void
|
|
static inline void
|
|
detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
|
|
detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
|
|
|
|
|
|
@@ -9726,7 +9733,7 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
|
|
|
|
|
|
/* Synchronize entity with its cfs_rq */
|
|
/* Synchronize entity with its cfs_rq */
|
|
update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
|
|
update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
|
|
- attach_entity_load_avg(cfs_rq, se);
|
|
|
|
|
|
+ attach_entity_load_avg(cfs_rq, se, 0);
|
|
update_tg_load_avg(cfs_rq, false);
|
|
update_tg_load_avg(cfs_rq, false);
|
|
propagate_entity_cfs_rq(se);
|
|
propagate_entity_cfs_rq(se);
|
|
}
|
|
}
|