|
@@ -135,7 +135,6 @@ static void update_dl_migration(struct dl_rq *dl_rq)
|
|
|
static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
|
|
|
{
|
|
|
struct task_struct *p = dl_task_of(dl_se);
|
|
|
- dl_rq = &rq_of_dl_rq(dl_rq)->dl;
|
|
|
|
|
|
if (p->nr_cpus_allowed > 1)
|
|
|
dl_rq->dl_nr_migratory++;
|
|
@@ -146,7 +145,6 @@ static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
|
|
|
static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
|
|
|
{
|
|
|
struct task_struct *p = dl_task_of(dl_se);
|
|
|
- dl_rq = &rq_of_dl_rq(dl_rq)->dl;
|
|
|
|
|
|
if (p->nr_cpus_allowed > 1)
|
|
|
dl_rq->dl_nr_migratory--;
|
|
@@ -564,6 +562,8 @@ int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se)
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
|
+extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
|
|
|
+
|
|
|
/*
|
|
|
* Update the current task's runtime statistics (provided it is still
|
|
|
* a -deadline task and has not been removed from the dl_rq).
|
|
@@ -627,11 +627,13 @@ static void update_curr_dl(struct rq *rq)
|
|
|
struct rt_rq *rt_rq = &rq->rt;
|
|
|
|
|
|
raw_spin_lock(&rt_rq->rt_runtime_lock);
|
|
|
- rt_rq->rt_time += delta_exec;
|
|
|
/*
|
|
|
* We'll let actual RT tasks worry about the overflow here, we
|
|
|
- * have our own CBS to keep us inline -- see above.
|
|
|
+ * have our own CBS to keep us inline; only account when RT
|
|
|
+ * bandwidth is relevant.
|
|
|
*/
|
|
|
+ if (sched_rt_bandwidth_account(rt_rq))
|
|
|
+ rt_rq->rt_time += delta_exec;
|
|
|
raw_spin_unlock(&rt_rq->rt_runtime_lock);
|
|
|
}
|
|
|
}
|