|
@@ -2497,28 +2497,22 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
|
|
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
|
# ifdef CONFIG_SMP
|
|
|
-static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
|
|
|
+static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
|
|
|
{
|
|
|
- long tg_weight;
|
|
|
+ long tg_weight, load, shares;
|
|
|
|
|
|
/*
|
|
|
- * Use this CPU's real-time load instead of the last load contribution
|
|
|
- * as the updating of the contribution is delayed, and we will use the
|
|
|
- * the real-time load to calc the share. See update_tg_load_avg().
|
|
|
+ * This really should be: cfs_rq->avg.load_avg, but instead we use
|
|
|
+ * cfs_rq->load.weight, which is its upper bound. This helps ramp up
|
|
|
+ * the shares for small weight interactive tasks.
|
|
|
*/
|
|
|
- tg_weight = atomic_long_read(&tg->load_avg);
|
|
|
- tg_weight -= cfs_rq->tg_load_avg_contrib;
|
|
|
- tg_weight += cfs_rq->load.weight;
|
|
|
-
|
|
|
- return tg_weight;
|
|
|
-}
|
|
|
+ load = scale_load_down(cfs_rq->load.weight);
|
|
|
|
|
|
-static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
|
|
|
-{
|
|
|
- long tg_weight, load, shares;
|
|
|
+ tg_weight = atomic_long_read(&tg->load_avg);
|
|
|
|
|
|
- tg_weight = calc_tg_weight(tg, cfs_rq);
|
|
|
- load = cfs_rq->load.weight;
|
|
|
+ /* Ensure tg_weight >= load */
|
|
|
+ tg_weight -= cfs_rq->tg_load_avg_contrib;
|
|
|
+ tg_weight += load;
|
|
|
|
|
|
shares = (tg->shares * load);
|
|
|
if (tg_weight)
|
|
@@ -2537,6 +2531,7 @@ static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
|
|
|
return tg->shares;
|
|
|
}
|
|
|
# endif /* CONFIG_SMP */
|
|
|
+
|
|
|
static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
|
|
|
unsigned long weight)
|
|
|
{
|