|
@@ -840,6 +840,17 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
|
|
|
int enqueue = 0;
|
|
int enqueue = 0;
|
|
|
struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
|
|
struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
|
|
|
struct rq *rq = rq_of_rt_rq(rt_rq);
|
|
struct rq *rq = rq_of_rt_rq(rt_rq);
|
|
|
|
|
+ int skip;
|
|
|
|
|
+
|
|
|
|
|
+ /*
|
|
|
|
|
+ * When span == cpu_online_mask, taking each rq->lock
|
|
|
|
|
+ * can be time-consuming. Try to avoid it when possible.
|
|
|
|
|
+ */
|
|
|
|
|
+ raw_spin_lock(&rt_rq->rt_runtime_lock);
|
|
|
|
|
+ skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
|
|
|
|
|
+ raw_spin_unlock(&rt_rq->rt_runtime_lock);
|
|
|
|
|
+ if (skip)
|
|
|
|
|
+ continue;
|
|
|
|
|
|
|
|
raw_spin_lock(&rq->lock);
|
|
raw_spin_lock(&rq->lock);
|
|
|
if (rt_rq->rt_time) {
|
|
if (rt_rq->rt_time) {
|