|
@@ -2475,9 +2475,6 @@ static inline int dl_bw_cpus(int i)
|
|
|
* allocated bandwidth to reflect the new situation.
|
|
|
*
|
|
|
* This function is called while holding p's rq->lock.
|
|
|
- *
|
|
|
- * XXX we should delay bw change until the task's 0-lag point, see
|
|
|
- * __setparam_dl().
|
|
|
*/
|
|
|
static int dl_overflow(struct task_struct *p, int policy,
|
|
|
const struct sched_attr *attr)
|
|
@@ -2502,16 +2499,29 @@ static int dl_overflow(struct task_struct *p, int policy,
|
|
|
cpus = dl_bw_cpus(task_cpu(p));
|
|
|
if (dl_policy(policy) && !task_has_dl_policy(p) &&
|
|
|
!__dl_overflow(dl_b, cpus, 0, new_bw)) {
|
|
|
+ if (hrtimer_active(&p->dl.inactive_timer))
|
|
|
+ __dl_clear(dl_b, p->dl.dl_bw);
|
|
|
__dl_add(dl_b, new_bw);
|
|
|
err = 0;
|
|
|
} else if (dl_policy(policy) && task_has_dl_policy(p) &&
|
|
|
!__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
|
|
|
+ /*
|
|
|
+ * XXX this is slightly incorrect: when the task
|
|
|
+ * utilization decreases, we should delay the total
|
|
|
+ * utilization change until the task's 0-lag point.
|
|
|
+ * But this would require to set the task's "inactive
|
|
|
+ * timer" when the task is not inactive.
|
|
|
+ */
|
|
|
__dl_clear(dl_b, p->dl.dl_bw);
|
|
|
__dl_add(dl_b, new_bw);
|
|
|
dl_change_utilization(p, new_bw);
|
|
|
err = 0;
|
|
|
} else if (!dl_policy(policy) && task_has_dl_policy(p)) {
|
|
|
- __dl_clear(dl_b, p->dl.dl_bw);
|
|
|
+ /*
|
|
|
+ * Do not decrease the total deadline utilization here,
|
|
|
+ * switched_from_dl() will take care to do it at the correct
|
|
|
+ * (0-lag) time.
|
|
|
+ */
|
|
|
err = 0;
|
|
|
}
|
|
|
raw_spin_unlock(&dl_b->lock);
|
|
@@ -4020,26 +4030,6 @@ __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
|
|
|
dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
|
|
|
dl_se->flags = attr->sched_flags;
|
|
|
dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
|
|
|
-
|
|
|
- /*
|
|
|
- * Changing the parameters of a task is 'tricky' and we're not doing
|
|
|
- * the correct thing -- also see task_dead_dl() and switched_from_dl().
|
|
|
- *
|
|
|
- * What we SHOULD do is delay the bandwidth release until the 0-lag
|
|
|
- * point. This would include retaining the task_struct until that time
|
|
|
- * and change dl_overflow() to not immediately decrement the current
|
|
|
- * amount.
|
|
|
- *
|
|
|
- * Instead we retain the current runtime/deadline and let the new
|
|
|
- * parameters take effect after the current reservation period lapses.
|
|
|
- * This is safe (albeit pessimistic) because the 0-lag point is always
|
|
|
- * before the current scheduling deadline.
|
|
|
- *
|
|
|
- * We can still have temporary overloads because we do not delay the
|
|
|
- * change in bandwidth until that time; so admission control is
|
|
|
- * not on the safe side. It does however guarantee tasks will never
|
|
|
- * consume more than promised.
|
|
|
- */
|
|
|
}
|
|
|
|
|
|
/*
|