|
@@ -2835,24 +2835,6 @@ void remove_entity_load_avg(struct sched_entity *se)
|
|
|
atomic_long_add(se->avg.util_avg, &cfs_rq->removed_util_avg);
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Update the rq's load with the elapsed running time before entering
|
|
|
- * idle. if the last scheduled task is not a CFS task, idle_enter will
|
|
|
- * be the only way to update the runnable statistic.
|
|
|
- */
|
|
|
-void idle_enter_fair(struct rq *this_rq)
|
|
|
-{
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * Update the rq's load with the elapsed idle time before a task is
|
|
|
- * scheduled. if the newly scheduled task is not a CFS task, idle_exit will
|
|
|
- * be the only way to update the runnable statistic.
|
|
|
- */
|
|
|
-void idle_exit_fair(struct rq *this_rq)
|
|
|
-{
|
|
|
-}
|
|
|
-
|
|
|
static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq)
|
|
|
{
|
|
|
return cfs_rq->runnable_load_avg;
|
|
@@ -7248,8 +7230,6 @@ static int idle_balance(struct rq *this_rq)
|
|
|
int pulled_task = 0;
|
|
|
u64 curr_cost = 0;
|
|
|
|
|
|
- idle_enter_fair(this_rq);
|
|
|
-
|
|
|
/*
|
|
|
* We must set idle_stamp _before_ calling idle_balance(), such that we
|
|
|
* measure the duration of idle_balance() as idle time.
|
|
@@ -7330,10 +7310,8 @@ out:
|
|
|
if (this_rq->nr_running != this_rq->cfs.h_nr_running)
|
|
|
pulled_task = -1;
|
|
|
|
|
|
- if (pulled_task) {
|
|
|
- idle_exit_fair(this_rq);
|
|
|
+ if (pulled_task)
|
|
|
this_rq->idle_stamp = 0;
|
|
|
- }
|
|
|
|
|
|
return pulled_task;
|
|
|
}
|