|
@@ -1043,7 +1043,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
|
|
|
* A queue event has occurred, and we're going to schedule. In
|
|
|
* this case, we can save a useless back to back clock update.
|
|
|
*/
|
|
|
- if (rq->curr->on_rq && test_tsk_need_resched(rq->curr))
|
|
|
+ if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
|
|
|
rq->skip_clock_update = 1;
|
|
|
}
|
|
|
|
|
@@ -1088,7 +1088,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
|
|
|
|
|
|
static void __migrate_swap_task(struct task_struct *p, int cpu)
|
|
|
{
|
|
|
- if (p->on_rq) {
|
|
|
+ if (task_on_rq_queued(p)) {
|
|
|
struct rq *src_rq, *dst_rq;
|
|
|
|
|
|
src_rq = task_rq(p);
|
|
@@ -1214,7 +1214,7 @@ static int migration_cpu_stop(void *data);
|
|
|
unsigned long wait_task_inactive(struct task_struct *p, long match_state)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
- int running, on_rq;
|
|
|
+ int running, queued;
|
|
|
unsigned long ncsw;
|
|
|
struct rq *rq;
|
|
|
|
|
@@ -1252,7 +1252,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
|
|
|
rq = task_rq_lock(p, &flags);
|
|
|
trace_sched_wait_task(p);
|
|
|
running = task_running(rq, p);
|
|
|
- on_rq = p->on_rq;
|
|
|
+ queued = task_on_rq_queued(p);
|
|
|
ncsw = 0;
|
|
|
if (!match_state || p->state == match_state)
|
|
|
ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
|
|
@@ -1284,7 +1284,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
|
|
|
* running right now), it's preempted, and we should
|
|
|
* yield - it could be a while.
|
|
|
*/
|
|
|
- if (unlikely(on_rq)) {
|
|
|
+ if (unlikely(queued)) {
|
|
|
ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
|
|
|
|
|
|
set_current_state(TASK_UNINTERRUPTIBLE);
|
|
@@ -1478,7 +1478,7 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
|
|
|
static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
|
|
|
{
|
|
|
activate_task(rq, p, en_flags);
|
|
|
- p->on_rq = 1;
|
|
|
+ p->on_rq = TASK_ON_RQ_QUEUED;
|
|
|
|
|
|
/* if a worker is waking up, notify workqueue */
|
|
|
if (p->flags & PF_WQ_WORKER)
|
|
@@ -1537,7 +1537,7 @@ static int ttwu_remote(struct task_struct *p, int wake_flags)
|
|
|
int ret = 0;
|
|
|
|
|
|
rq = __task_rq_lock(p);
|
|
|
- if (p->on_rq) {
|
|
|
+ if (task_on_rq_queued(p)) {
|
|
|
/* check_preempt_curr() may use rq clock */
|
|
|
update_rq_clock(rq);
|
|
|
ttwu_do_wakeup(rq, p, wake_flags);
|
|
@@ -1678,7 +1678,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
|
|
|
success = 1; /* we're going to change ->state */
|
|
|
cpu = task_cpu(p);
|
|
|
|
|
|
- if (p->on_rq && ttwu_remote(p, wake_flags))
|
|
|
+ if (task_on_rq_queued(p) && ttwu_remote(p, wake_flags))
|
|
|
goto stat;
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
@@ -1742,7 +1742,7 @@ static void try_to_wake_up_local(struct task_struct *p)
|
|
|
if (!(p->state & TASK_NORMAL))
|
|
|
goto out;
|
|
|
|
|
|
- if (!p->on_rq)
|
|
|
+ if (!task_on_rq_queued(p))
|
|
|
ttwu_activate(rq, p, ENQUEUE_WAKEUP);
|
|
|
|
|
|
ttwu_do_wakeup(rq, p, 0);
|
|
@@ -2095,7 +2095,7 @@ void wake_up_new_task(struct task_struct *p)
|
|
|
init_task_runnable_average(p);
|
|
|
rq = __task_rq_lock(p);
|
|
|
activate_task(rq, p, 0);
|
|
|
- p->on_rq = 1;
|
|
|
+ p->on_rq = TASK_ON_RQ_QUEUED;
|
|
|
trace_sched_wakeup_new(p, true);
|
|
|
check_preempt_curr(rq, p, WF_FORK);
|
|
|
#ifdef CONFIG_SMP
|
|
@@ -2444,7 +2444,7 @@ static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
|
|
|
* project cycles that may never be accounted to this
|
|
|
* thread, breaking clock_gettime().
|
|
|
*/
|
|
|
- if (task_current(rq, p) && p->on_rq) {
|
|
|
+ if (task_current(rq, p) && task_on_rq_queued(p)) {
|
|
|
update_rq_clock(rq);
|
|
|
ns = rq_clock_task(rq) - p->se.exec_start;
|
|
|
if ((s64)ns < 0)
|
|
@@ -2490,7 +2490,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
|
|
|
* If we see ->on_cpu without ->on_rq, the task is leaving, and has
|
|
|
* been accounted, so we're correct here as well.
|
|
|
*/
|
|
|
- if (!p->on_cpu || !p->on_rq)
|
|
|
+ if (!p->on_cpu || !task_on_rq_queued(p))
|
|
|
return p->se.sum_exec_runtime;
|
|
|
#endif
|
|
|
|
|
@@ -2794,7 +2794,7 @@ need_resched:
|
|
|
switch_count = &prev->nvcsw;
|
|
|
}
|
|
|
|
|
|
- if (prev->on_rq || rq->skip_clock_update < 0)
|
|
|
+ if (task_on_rq_queued(prev) || rq->skip_clock_update < 0)
|
|
|
update_rq_clock(rq);
|
|
|
|
|
|
next = pick_next_task(rq, prev);
|
|
@@ -2959,7 +2959,7 @@ EXPORT_SYMBOL(default_wake_function);
|
|
|
*/
|
|
|
void rt_mutex_setprio(struct task_struct *p, int prio)
|
|
|
{
|
|
|
- int oldprio, on_rq, running, enqueue_flag = 0;
|
|
|
+ int oldprio, queued, running, enqueue_flag = 0;
|
|
|
struct rq *rq;
|
|
|
const struct sched_class *prev_class;
|
|
|
|
|
@@ -2988,9 +2988,9 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
|
|
|
trace_sched_pi_setprio(p, prio);
|
|
|
oldprio = p->prio;
|
|
|
prev_class = p->sched_class;
|
|
|
- on_rq = p->on_rq;
|
|
|
+ queued = task_on_rq_queued(p);
|
|
|
running = task_current(rq, p);
|
|
|
- if (on_rq)
|
|
|
+ if (queued)
|
|
|
dequeue_task(rq, p, 0);
|
|
|
if (running)
|
|
|
p->sched_class->put_prev_task(rq, p);
|
|
@@ -3030,7 +3030,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
|
|
|
|
|
|
if (running)
|
|
|
p->sched_class->set_curr_task(rq);
|
|
|
- if (on_rq)
|
|
|
+ if (queued)
|
|
|
enqueue_task(rq, p, enqueue_flag);
|
|
|
|
|
|
check_class_changed(rq, p, prev_class, oldprio);
|
|
@@ -3041,7 +3041,7 @@ out_unlock:
|
|
|
|
|
|
void set_user_nice(struct task_struct *p, long nice)
|
|
|
{
|
|
|
- int old_prio, delta, on_rq;
|
|
|
+ int old_prio, delta, queued;
|
|
|
unsigned long flags;
|
|
|
struct rq *rq;
|
|
|
|
|
@@ -3062,8 +3062,8 @@ void set_user_nice(struct task_struct *p, long nice)
|
|
|
p->static_prio = NICE_TO_PRIO(nice);
|
|
|
goto out_unlock;
|
|
|
}
|
|
|
- on_rq = p->on_rq;
|
|
|
- if (on_rq)
|
|
|
+ queued = task_on_rq_queued(p);
|
|
|
+ if (queued)
|
|
|
dequeue_task(rq, p, 0);
|
|
|
|
|
|
p->static_prio = NICE_TO_PRIO(nice);
|
|
@@ -3072,7 +3072,7 @@ void set_user_nice(struct task_struct *p, long nice)
|
|
|
p->prio = effective_prio(p);
|
|
|
delta = p->prio - old_prio;
|
|
|
|
|
|
- if (on_rq) {
|
|
|
+ if (queued) {
|
|
|
enqueue_task(rq, p, 0);
|
|
|
/*
|
|
|
* If the task increased its priority or is running and
|
|
@@ -3344,7 +3344,7 @@ static int __sched_setscheduler(struct task_struct *p,
|
|
|
{
|
|
|
int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
|
|
|
MAX_RT_PRIO - 1 - attr->sched_priority;
|
|
|
- int retval, oldprio, oldpolicy = -1, on_rq, running;
|
|
|
+ int retval, oldprio, oldpolicy = -1, queued, running;
|
|
|
int policy = attr->sched_policy;
|
|
|
unsigned long flags;
|
|
|
const struct sched_class *prev_class;
|
|
@@ -3541,9 +3541,9 @@ change:
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
- on_rq = p->on_rq;
|
|
|
+ queued = task_on_rq_queued(p);
|
|
|
running = task_current(rq, p);
|
|
|
- if (on_rq)
|
|
|
+ if (queued)
|
|
|
dequeue_task(rq, p, 0);
|
|
|
if (running)
|
|
|
p->sched_class->put_prev_task(rq, p);
|
|
@@ -3553,7 +3553,7 @@ change:
|
|
|
|
|
|
if (running)
|
|
|
p->sched_class->set_curr_task(rq);
|
|
|
- if (on_rq) {
|
|
|
+ if (queued) {
|
|
|
/*
|
|
|
* We enqueue to tail when the priority of a task is
|
|
|
* increased (user space view).
|
|
@@ -4568,7 +4568,7 @@ void init_idle(struct task_struct *idle, int cpu)
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
rq->curr = rq->idle = idle;
|
|
|
- idle->on_rq = 1;
|
|
|
+ idle->on_rq = TASK_ON_RQ_QUEUED;
|
|
|
#if defined(CONFIG_SMP)
|
|
|
idle->on_cpu = 1;
|
|
|
#endif
|
|
@@ -4645,7 +4645,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
|
|
|
goto out;
|
|
|
|
|
|
dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
|
|
|
- if (p->on_rq) {
|
|
|
+ if (task_on_rq_queued(p)) {
|
|
|
struct migration_arg arg = { p, dest_cpu };
|
|
|
/* Need help from migration thread: drop lock and wait. */
|
|
|
task_rq_unlock(rq, p, &flags);
|
|
@@ -4695,7 +4695,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
|
|
|
* If we're not on a rq, the next wake-up will ensure we're
|
|
|
* placed properly.
|
|
|
*/
|
|
|
- if (p->on_rq) {
|
|
|
+ if (task_on_rq_queued(p)) {
|
|
|
dequeue_task(rq_src, p, 0);
|
|
|
set_task_cpu(p, dest_cpu);
|
|
|
enqueue_task(rq_dest, p, 0);
|
|
@@ -4736,13 +4736,13 @@ void sched_setnuma(struct task_struct *p, int nid)
|
|
|
{
|
|
|
struct rq *rq;
|
|
|
unsigned long flags;
|
|
|
- bool on_rq, running;
|
|
|
+ bool queued, running;
|
|
|
|
|
|
rq = task_rq_lock(p, &flags);
|
|
|
- on_rq = p->on_rq;
|
|
|
+ queued = task_on_rq_queued(p);
|
|
|
running = task_current(rq, p);
|
|
|
|
|
|
- if (on_rq)
|
|
|
+ if (queued)
|
|
|
dequeue_task(rq, p, 0);
|
|
|
if (running)
|
|
|
p->sched_class->put_prev_task(rq, p);
|
|
@@ -4751,7 +4751,7 @@ void sched_setnuma(struct task_struct *p, int nid)
|
|
|
|
|
|
if (running)
|
|
|
p->sched_class->set_curr_task(rq);
|
|
|
- if (on_rq)
|
|
|
+ if (queued)
|
|
|
enqueue_task(rq, p, 0);
|
|
|
task_rq_unlock(rq, p, &flags);
|
|
|
}
|
|
@@ -7116,13 +7116,13 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
|
|
|
.sched_policy = SCHED_NORMAL,
|
|
|
};
|
|
|
int old_prio = p->prio;
|
|
|
- int on_rq;
|
|
|
+ int queued;
|
|
|
|
|
|
- on_rq = p->on_rq;
|
|
|
- if (on_rq)
|
|
|
+ queued = task_on_rq_queued(p);
|
|
|
+ if (queued)
|
|
|
dequeue_task(rq, p, 0);
|
|
|
__setscheduler(rq, p, &attr);
|
|
|
- if (on_rq) {
|
|
|
+ if (queued) {
|
|
|
enqueue_task(rq, p, 0);
|
|
|
resched_curr(rq);
|
|
|
}
|
|
@@ -7309,16 +7309,16 @@ void sched_offline_group(struct task_group *tg)
|
|
|
void sched_move_task(struct task_struct *tsk)
|
|
|
{
|
|
|
struct task_group *tg;
|
|
|
- int on_rq, running;
|
|
|
+ int queued, running;
|
|
|
unsigned long flags;
|
|
|
struct rq *rq;
|
|
|
|
|
|
rq = task_rq_lock(tsk, &flags);
|
|
|
|
|
|
running = task_current(rq, tsk);
|
|
|
- on_rq = tsk->on_rq;
|
|
|
+ queued = task_on_rq_queued(tsk);
|
|
|
|
|
|
- if (on_rq)
|
|
|
+ if (queued)
|
|
|
dequeue_task(rq, tsk, 0);
|
|
|
if (unlikely(running))
|
|
|
tsk->sched_class->put_prev_task(rq, tsk);
|
|
@@ -7331,14 +7331,14 @@ void sched_move_task(struct task_struct *tsk)
|
|
|
|
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
|
if (tsk->sched_class->task_move_group)
|
|
|
- tsk->sched_class->task_move_group(tsk, on_rq);
|
|
|
+ tsk->sched_class->task_move_group(tsk, queued);
|
|
|
else
|
|
|
#endif
|
|
|
set_task_rq(tsk, task_cpu(tsk));
|
|
|
|
|
|
if (unlikely(running))
|
|
|
tsk->sched_class->set_curr_task(rq);
|
|
|
- if (on_rq)
|
|
|
+ if (queued)
|
|
|
enqueue_task(rq, tsk, 0);
|
|
|
|
|
|
task_rq_unlock(rq, tsk, &flags);
|