|
@@ -242,7 +242,7 @@ static void task_non_contending(struct task_struct *p)
|
|
|
if (p->state == TASK_DEAD)
|
|
|
sub_rq_bw(p->dl.dl_bw, &rq->dl);
|
|
|
raw_spin_lock(&dl_b->lock);
|
|
|
- __dl_clear(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
|
|
|
+ __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
|
|
|
__dl_clear_params(p);
|
|
|
raw_spin_unlock(&dl_b->lock);
|
|
|
}
|
|
@@ -1209,7 +1209,7 @@ static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
|
|
|
}
|
|
|
|
|
|
raw_spin_lock(&dl_b->lock);
|
|
|
- __dl_clear(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
|
|
|
+ __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
|
|
|
raw_spin_unlock(&dl_b->lock);
|
|
|
__dl_clear_params(p);
|
|
|
|
|
@@ -2170,7 +2170,7 @@ static void set_cpus_allowed_dl(struct task_struct *p,
|
|
|
* until we complete the update.
|
|
|
*/
|
|
|
raw_spin_lock(&src_dl_b->lock);
|
|
|
- __dl_clear(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
|
|
|
+ __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
|
|
|
raw_spin_unlock(&src_dl_b->lock);
|
|
|
}
|
|
|
|
|
@@ -2448,7 +2448,7 @@ int sched_dl_overflow(struct task_struct *p, int policy,
|
|
|
if (dl_policy(policy) && !task_has_dl_policy(p) &&
|
|
|
!__dl_overflow(dl_b, cpus, 0, new_bw)) {
|
|
|
if (hrtimer_active(&p->dl.inactive_timer))
|
|
|
- __dl_clear(dl_b, p->dl.dl_bw, cpus);
|
|
|
+ __dl_sub(dl_b, p->dl.dl_bw, cpus);
|
|
|
__dl_add(dl_b, new_bw, cpus);
|
|
|
err = 0;
|
|
|
} else if (dl_policy(policy) && task_has_dl_policy(p) &&
|
|
@@ -2460,7 +2460,7 @@ int sched_dl_overflow(struct task_struct *p, int policy,
|
|
|
* But this would require to set the task's "inactive
|
|
|
* timer" when the task is not inactive.
|
|
|
*/
|
|
|
- __dl_clear(dl_b, p->dl.dl_bw, cpus);
|
|
|
+ __dl_sub(dl_b, p->dl.dl_bw, cpus);
|
|
|
__dl_add(dl_b, new_bw, cpus);
|
|
|
dl_change_utilization(p, new_bw);
|
|
|
err = 0;
|