|
@@ -518,12 +518,20 @@ again:
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * We need to take care of a possible races here. In fact, the
|
|
|
- * task might have changed its scheduling policy to something
|
|
|
- * different from SCHED_DEADLINE or changed its reservation
|
|
|
- * parameters (through sched_setattr()).
|
|
|
+ * We need to take care of several possible races here:
|
|
|
+ *
|
|
|
+ * - the task might have changed its scheduling policy
|
|
|
+ * to something different than SCHED_DEADLINE
|
|
|
+ * - the task might have changed its reservation parameters
|
|
|
+ * (through sched_setattr())
|
|
|
+ * - the task might have been boosted by someone else and
|
|
|
+ * might be in the boosting/deboosting path
|
|
|
+ *
|
|
|
+ * In all this cases we bail out, as the task is already
|
|
|
+ * in the runqueue or is going to be enqueued back anyway.
|
|
|
*/
|
|
|
- if (!dl_task(p) || dl_se->dl_new)
|
|
|
+ if (!dl_task(p) || dl_se->dl_new ||
|
|
|
+ dl_se->dl_boosted || !dl_se->dl_throttled)
|
|
|
goto unlock;
|
|
|
|
|
|
sched_clock_tick();
|
|
@@ -532,7 +540,7 @@ again:
|
|
|
dl_se->dl_yielded = 0;
|
|
|
if (task_on_rq_queued(p)) {
|
|
|
enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
|
|
|
- if (task_has_dl_policy(rq->curr))
|
|
|
+ if (dl_task(rq->curr))
|
|
|
check_preempt_curr_dl(rq, p, 0);
|
|
|
else
|
|
|
resched_curr(rq);
|
|
@@ -847,8 +855,19 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
|
|
|
* smaller than our one... OTW we keep our runtime and
|
|
|
* deadline.
|
|
|
*/
|
|
|
- if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio))
|
|
|
+ if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio)) {
|
|
|
pi_se = &pi_task->dl;
|
|
|
+ } else if (!dl_prio(p->normal_prio)) {
|
|
|
+ /*
|
|
|
+ * Special case in which we have a !SCHED_DEADLINE task
|
|
|
+ * that is going to be deboosted, but exceedes its
|
|
|
+ * runtime while doing so. No point in replenishing
|
|
|
+ * it, as it's going to return back to its original
|
|
|
+ * scheduling class after this.
|
|
|
+ */
|
|
|
+ BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
|
|
|
+ return;
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
|
* If p is throttled, we do nothing. In fact, if it exhausted
|
|
@@ -1607,8 +1626,12 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
|
|
|
/* Only reschedule if pushing failed */
|
|
|
check_resched = 0;
|
|
|
#endif /* CONFIG_SMP */
|
|
|
- if (check_resched && task_has_dl_policy(rq->curr))
|
|
|
- check_preempt_curr_dl(rq, p, 0);
|
|
|
+ if (check_resched) {
|
|
|
+ if (dl_task(rq->curr))
|
|
|
+ check_preempt_curr_dl(rq, p, 0);
|
|
|
+ else
|
|
|
+ resched_curr(rq);
|
|
|
+ }
|
|
|
}
|
|
|
}
|
|
|
|