|
@@ -134,7 +134,7 @@ static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
|
|
{
|
|
{
|
|
struct task_struct *p = dl_task_of(dl_se);
|
|
struct task_struct *p = dl_task_of(dl_se);
|
|
|
|
|
|
- if (p->nr_cpus_allowed > 1)
|
|
|
|
|
|
+ if (tsk_nr_cpus_allowed(p) > 1)
|
|
dl_rq->dl_nr_migratory++;
|
|
dl_rq->dl_nr_migratory++;
|
|
|
|
|
|
update_dl_migration(dl_rq);
|
|
update_dl_migration(dl_rq);
|
|
@@ -144,7 +144,7 @@ static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
|
|
{
|
|
{
|
|
struct task_struct *p = dl_task_of(dl_se);
|
|
struct task_struct *p = dl_task_of(dl_se);
|
|
|
|
|
|
- if (p->nr_cpus_allowed > 1)
|
|
|
|
|
|
+ if (tsk_nr_cpus_allowed(p) > 1)
|
|
dl_rq->dl_nr_migratory--;
|
|
dl_rq->dl_nr_migratory--;
|
|
|
|
|
|
update_dl_migration(dl_rq);
|
|
update_dl_migration(dl_rq);
|
|
@@ -966,7 +966,7 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
|
|
|
|
|
|
enqueue_dl_entity(&p->dl, pi_se, flags);
|
|
enqueue_dl_entity(&p->dl, pi_se, flags);
|
|
|
|
|
|
- if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
|
|
|
|
|
|
+ if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1)
|
|
enqueue_pushable_dl_task(rq, p);
|
|
enqueue_pushable_dl_task(rq, p);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1040,9 +1040,9 @@ select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
|
|
* try to make it stay here, it might be important.
|
|
* try to make it stay here, it might be important.
|
|
*/
|
|
*/
|
|
if (unlikely(dl_task(curr)) &&
|
|
if (unlikely(dl_task(curr)) &&
|
|
- (curr->nr_cpus_allowed < 2 ||
|
|
|
|
|
|
+ (tsk_nr_cpus_allowed(curr) < 2 ||
|
|
!dl_entity_preempt(&p->dl, &curr->dl)) &&
|
|
!dl_entity_preempt(&p->dl, &curr->dl)) &&
|
|
- (p->nr_cpus_allowed > 1)) {
|
|
|
|
|
|
+ (tsk_nr_cpus_allowed(p) > 1)) {
|
|
int target = find_later_rq(p);
|
|
int target = find_later_rq(p);
|
|
|
|
|
|
if (target != -1 &&
|
|
if (target != -1 &&
|
|
@@ -1063,7 +1063,7 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
|
|
* Current can't be migrated, useless to reschedule,
|
|
* Current can't be migrated, useless to reschedule,
|
|
* let's hope p can move out.
|
|
* let's hope p can move out.
|
|
*/
|
|
*/
|
|
- if (rq->curr->nr_cpus_allowed == 1 ||
|
|
|
|
|
|
+ if (tsk_nr_cpus_allowed(rq->curr) == 1 ||
|
|
cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1)
|
|
cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1)
|
|
return;
|
|
return;
|
|
|
|
|
|
@@ -1071,7 +1071,7 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
|
|
* p is migratable, so let's not schedule it and
|
|
* p is migratable, so let's not schedule it and
|
|
* see if it is pushed or pulled somewhere else.
|
|
* see if it is pushed or pulled somewhere else.
|
|
*/
|
|
*/
|
|
- if (p->nr_cpus_allowed != 1 &&
|
|
|
|
|
|
+ if (tsk_nr_cpus_allowed(p) != 1 &&
|
|
cpudl_find(&rq->rd->cpudl, p, NULL) != -1)
|
|
cpudl_find(&rq->rd->cpudl, p, NULL) != -1)
|
|
return;
|
|
return;
|
|
|
|
|
|
@@ -1186,7 +1186,7 @@ static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
|
|
{
|
|
{
|
|
update_curr_dl(rq);
|
|
update_curr_dl(rq);
|
|
|
|
|
|
- if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
|
|
|
|
|
|
+ if (on_dl_rq(&p->dl) && tsk_nr_cpus_allowed(p) > 1)
|
|
enqueue_pushable_dl_task(rq, p);
|
|
enqueue_pushable_dl_task(rq, p);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1287,7 +1287,7 @@ static int find_later_rq(struct task_struct *task)
|
|
if (unlikely(!later_mask))
|
|
if (unlikely(!later_mask))
|
|
return -1;
|
|
return -1;
|
|
|
|
|
|
- if (task->nr_cpus_allowed == 1)
|
|
|
|
|
|
+ if (tsk_nr_cpus_allowed(task) == 1)
|
|
return -1;
|
|
return -1;
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -1433,7 +1433,7 @@ static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
|
|
|
|
|
|
BUG_ON(rq->cpu != task_cpu(p));
|
|
BUG_ON(rq->cpu != task_cpu(p));
|
|
BUG_ON(task_current(rq, p));
|
|
BUG_ON(task_current(rq, p));
|
|
- BUG_ON(p->nr_cpus_allowed <= 1);
|
|
|
|
|
|
+ BUG_ON(tsk_nr_cpus_allowed(p) <= 1);
|
|
|
|
|
|
BUG_ON(!task_on_rq_queued(p));
|
|
BUG_ON(!task_on_rq_queued(p));
|
|
BUG_ON(!dl_task(p));
|
|
BUG_ON(!dl_task(p));
|
|
@@ -1472,7 +1472,7 @@ retry:
|
|
*/
|
|
*/
|
|
if (dl_task(rq->curr) &&
|
|
if (dl_task(rq->curr) &&
|
|
dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
|
|
dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
|
|
- rq->curr->nr_cpus_allowed > 1) {
|
|
|
|
|
|
+ tsk_nr_cpus_allowed(rq->curr) > 1) {
|
|
resched_curr(rq);
|
|
resched_curr(rq);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -1619,9 +1619,9 @@ static void task_woken_dl(struct rq *rq, struct task_struct *p)
|
|
{
|
|
{
|
|
if (!task_running(rq, p) &&
|
|
if (!task_running(rq, p) &&
|
|
!test_tsk_need_resched(rq->curr) &&
|
|
!test_tsk_need_resched(rq->curr) &&
|
|
- p->nr_cpus_allowed > 1 &&
|
|
|
|
|
|
+ tsk_nr_cpus_allowed(p) > 1 &&
|
|
dl_task(rq->curr) &&
|
|
dl_task(rq->curr) &&
|
|
- (rq->curr->nr_cpus_allowed < 2 ||
|
|
|
|
|
|
+ (tsk_nr_cpus_allowed(rq->curr) < 2 ||
|
|
!dl_entity_preempt(&p->dl, &rq->curr->dl))) {
|
|
!dl_entity_preempt(&p->dl, &rq->curr->dl))) {
|
|
push_dl_tasks(rq);
|
|
push_dl_tasks(rq);
|
|
}
|
|
}
|
|
@@ -1725,7 +1725,7 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
|
|
|
|
|
|
if (task_on_rq_queued(p) && rq->curr != p) {
|
|
if (task_on_rq_queued(p) && rq->curr != p) {
|
|
#ifdef CONFIG_SMP
|
|
#ifdef CONFIG_SMP
|
|
- if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
|
|
|
|
|
|
+ if (tsk_nr_cpus_allowed(p) > 1 && rq->dl.overloaded)
|
|
queue_push_tasks(rq);
|
|
queue_push_tasks(rq);
|
|
#else
|
|
#else
|
|
if (dl_task(rq->curr))
|
|
if (dl_task(rq->curr))
|