|
|
@@ -529,6 +529,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
|
|
|
static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
|
|
|
{
|
|
|
struct rq *later_rq = NULL;
|
|
|
+ struct dl_bw *dl_b;
|
|
|
|
|
|
later_rq = find_lock_later_rq(p, rq);
|
|
|
if (!later_rq) {
|
|
|
@@ -557,6 +558,38 @@ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p
|
|
|
double_lock_balance(rq, later_rq);
|
|
|
}
|
|
|
|
|
|
+ if (p->dl.dl_non_contending || p->dl.dl_throttled) {
|
|
|
+ /*
|
|
|
+ * Inactive timer is armed (or callback is running, but
|
|
|
+ * waiting for us to release rq locks). In any case, when it
|
|
|
+ * will fire (or continue), it will see running_bw of this
|
|
|
+ * task migrated to later_rq (and correctly handle it).
|
|
|
+ */
|
|
|
+ sub_running_bw(&p->dl, &rq->dl);
|
|
|
+ sub_rq_bw(&p->dl, &rq->dl);
|
|
|
+
|
|
|
+ add_rq_bw(&p->dl, &later_rq->dl);
|
|
|
+ add_running_bw(&p->dl, &later_rq->dl);
|
|
|
+ } else {
|
|
|
+ sub_rq_bw(&p->dl, &rq->dl);
|
|
|
+ add_rq_bw(&p->dl, &later_rq->dl);
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * And we finally need to fixup root_domain(s) bandwidth accounting,
|
|
|
+ * since p is still hanging out in the old (now moved to default) root
|
|
|
+ * domain.
|
|
|
+ */
|
|
|
+ dl_b = &rq->rd->dl_bw;
|
|
|
+ raw_spin_lock(&dl_b->lock);
|
|
|
+ __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
|
|
|
+ raw_spin_unlock(&dl_b->lock);
|
|
|
+
|
|
|
+ dl_b = &later_rq->rd->dl_bw;
|
|
|
+ raw_spin_lock(&dl_b->lock);
|
|
|
+ __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
|
|
|
+ raw_spin_unlock(&dl_b->lock);
|
|
|
+
|
|
|
set_task_cpu(p, later_rq->cpu);
|
|
|
double_unlock_balance(later_rq, rq);
|
|
|
|