|
@@ -508,8 +508,11 @@ static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
|
|
|
|
|
|
rt_se = rt_rq->tg->rt_se[cpu];
|
|
rt_se = rt_rq->tg->rt_se[cpu];
|
|
|
|
|
|
- if (!rt_se)
|
|
|
|
|
|
+ if (!rt_se) {
|
|
dequeue_top_rt_rq(rt_rq);
|
|
dequeue_top_rt_rq(rt_rq);
|
|
|
|
+ /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
|
|
|
|
+ cpufreq_update_util(rq_of_rt_rq(rt_rq), 0);
|
|
|
|
+ }
|
|
else if (on_rt_rq(rt_se))
|
|
else if (on_rt_rq(rt_se))
|
|
dequeue_rt_entity(rt_se, 0);
|
|
dequeue_rt_entity(rt_se, 0);
|
|
}
|
|
}
|
|
@@ -1001,8 +1004,6 @@ dequeue_top_rt_rq(struct rt_rq *rt_rq)
|
|
sub_nr_running(rq, rt_rq->rt_nr_running);
|
|
sub_nr_running(rq, rt_rq->rt_nr_running);
|
|
rt_rq->rt_queued = 0;
|
|
rt_rq->rt_queued = 0;
|
|
|
|
|
|
- /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
|
|
|
|
- cpufreq_update_util(rq, 0);
|
|
|
|
}
|
|
}
|
|
|
|
|
|
static void
|
|
static void
|
|
@@ -1014,11 +1015,14 @@ enqueue_top_rt_rq(struct rt_rq *rt_rq)
|
|
|
|
|
|
if (rt_rq->rt_queued)
|
|
if (rt_rq->rt_queued)
|
|
return;
|
|
return;
|
|
- if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
|
|
|
|
|
|
+
|
|
|
|
+ if (rt_rq_throttled(rt_rq))
|
|
return;
|
|
return;
|
|
|
|
|
|
- add_nr_running(rq, rt_rq->rt_nr_running);
|
|
|
|
- rt_rq->rt_queued = 1;
|
|
|
|
|
|
+ if (rt_rq->rt_nr_running) {
|
|
|
|
+ add_nr_running(rq, rt_rq->rt_nr_running);
|
|
|
|
+ rt_rq->rt_queued = 1;
|
|
|
|
+ }
|
|
|
|
|
|
/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
|
|
/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
|
|
cpufreq_update_util(rq, 0);
|
|
cpufreq_update_util(rq, 0);
|