|
@@ -184,7 +184,7 @@ struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
|
|
|
rq = task_rq(p);
|
|
|
raw_spin_lock(&rq->lock);
|
|
|
if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
|
|
|
- lockdep_pin_lock(&rq->lock);
|
|
|
+ rf->cookie = lockdep_pin_lock(&rq->lock);
|
|
|
return rq;
|
|
|
}
|
|
|
raw_spin_unlock(&rq->lock);
|
|
@@ -224,7 +224,7 @@ struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
|
|
|
* pair with the WMB to ensure we must then also see migrating.
|
|
|
*/
|
|
|
if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
|
|
|
- lockdep_pin_lock(&rq->lock);
|
|
|
+ rf->cookie = lockdep_pin_lock(&rq->lock);
|
|
|
return rq;
|
|
|
}
|
|
|
raw_spin_unlock(&rq->lock);
|
|
@@ -1193,9 +1193,9 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
|
|
|
* OK, since we're going to drop the lock immediately
|
|
|
* afterwards anyway.
|
|
|
*/
|
|
|
- lockdep_unpin_lock(&rq->lock);
|
|
|
+ lockdep_unpin_lock(&rq->lock, rf.cookie);
|
|
|
rq = move_queued_task(rq, p, dest_cpu);
|
|
|
- lockdep_pin_lock(&rq->lock);
|
|
|
+ lockdep_repin_lock(&rq->lock, rf.cookie);
|
|
|
}
|
|
|
out:
|
|
|
task_rq_unlock(rq, p, &rf);
|
|
@@ -1669,8 +1669,8 @@ static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_fl
|
|
|
/*
|
|
|
* Mark the task runnable and perform wakeup-preemption.
|
|
|
*/
|
|
|
-static void
|
|
|
-ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
|
|
|
+static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags,
|
|
|
+ struct pin_cookie cookie)
|
|
|
{
|
|
|
check_preempt_curr(rq, p, wake_flags);
|
|
|
p->state = TASK_RUNNING;
|
|
@@ -1682,9 +1682,9 @@ ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
|
|
|
* Our task @p is fully woken up and running; so its safe to
|
|
|
* drop the rq->lock, hereafter rq is only used for statistics.
|
|
|
*/
|
|
|
- lockdep_unpin_lock(&rq->lock);
|
|
|
+ lockdep_unpin_lock(&rq->lock, cookie);
|
|
|
p->sched_class->task_woken(rq, p);
|
|
|
- lockdep_pin_lock(&rq->lock);
|
|
|
+ lockdep_repin_lock(&rq->lock, cookie);
|
|
|
}
|
|
|
|
|
|
if (rq->idle_stamp) {
|
|
@@ -1702,7 +1702,8 @@ ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
|
|
|
}
|
|
|
|
|
|
static void
|
|
|
-ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
|
|
|
+ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
|
|
|
+ struct pin_cookie cookie)
|
|
|
{
|
|
|
lockdep_assert_held(&rq->lock);
|
|
|
|
|
@@ -1712,7 +1713,7 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
|
|
|
#endif
|
|
|
|
|
|
ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
|
|
|
- ttwu_do_wakeup(rq, p, wake_flags);
|
|
|
+ ttwu_do_wakeup(rq, p, wake_flags, cookie);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1731,7 +1732,7 @@ static int ttwu_remote(struct task_struct *p, int wake_flags)
|
|
|
if (task_on_rq_queued(p)) {
|
|
|
/* check_preempt_curr() may use rq clock */
|
|
|
update_rq_clock(rq);
|
|
|
- ttwu_do_wakeup(rq, p, wake_flags);
|
|
|
+ ttwu_do_wakeup(rq, p, wake_flags, rf.cookie);
|
|
|
ret = 1;
|
|
|
}
|
|
|
__task_rq_unlock(rq, &rf);
|
|
@@ -1744,6 +1745,7 @@ void sched_ttwu_pending(void)
|
|
|
{
|
|
|
struct rq *rq = this_rq();
|
|
|
struct llist_node *llist = llist_del_all(&rq->wake_list);
|
|
|
+ struct pin_cookie cookie;
|
|
|
struct task_struct *p;
|
|
|
unsigned long flags;
|
|
|
|
|
@@ -1751,15 +1753,15 @@ void sched_ttwu_pending(void)
|
|
|
return;
|
|
|
|
|
|
raw_spin_lock_irqsave(&rq->lock, flags);
|
|
|
- lockdep_pin_lock(&rq->lock);
|
|
|
+ cookie = lockdep_pin_lock(&rq->lock);
|
|
|
|
|
|
while (llist) {
|
|
|
p = llist_entry(llist, struct task_struct, wake_entry);
|
|
|
llist = llist_next(llist);
|
|
|
- ttwu_do_activate(rq, p, 0);
|
|
|
+ ttwu_do_activate(rq, p, 0, cookie);
|
|
|
}
|
|
|
|
|
|
- lockdep_unpin_lock(&rq->lock);
|
|
|
+ lockdep_unpin_lock(&rq->lock, cookie);
|
|
|
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
|
|
}
|
|
|
|
|
@@ -1846,6 +1848,7 @@ bool cpus_share_cache(int this_cpu, int that_cpu)
|
|
|
static void ttwu_queue(struct task_struct *p, int cpu)
|
|
|
{
|
|
|
struct rq *rq = cpu_rq(cpu);
|
|
|
+ struct pin_cookie cookie;
|
|
|
|
|
|
#if defined(CONFIG_SMP)
|
|
|
if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
|
|
@@ -1856,9 +1859,9 @@ static void ttwu_queue(struct task_struct *p, int cpu)
|
|
|
#endif
|
|
|
|
|
|
raw_spin_lock(&rq->lock);
|
|
|
- lockdep_pin_lock(&rq->lock);
|
|
|
- ttwu_do_activate(rq, p, 0);
|
|
|
- lockdep_unpin_lock(&rq->lock);
|
|
|
+ cookie = lockdep_pin_lock(&rq->lock);
|
|
|
+ ttwu_do_activate(rq, p, 0, cookie);
|
|
|
+ lockdep_unpin_lock(&rq->lock, cookie);
|
|
|
raw_spin_unlock(&rq->lock);
|
|
|
}
|
|
|
|
|
@@ -2055,7 +2058,7 @@ out:
|
|
|
* ensure that this_rq() is locked, @p is bound to this_rq() and not
|
|
|
* the current task.
|
|
|
*/
|
|
|
-static void try_to_wake_up_local(struct task_struct *p)
|
|
|
+static void try_to_wake_up_local(struct task_struct *p, struct pin_cookie cookie)
|
|
|
{
|
|
|
struct rq *rq = task_rq(p);
|
|
|
|
|
@@ -2072,11 +2075,11 @@ static void try_to_wake_up_local(struct task_struct *p)
|
|
|
* disabled avoiding further scheduler activity on it and we've
|
|
|
* not yet picked a replacement task.
|
|
|
*/
|
|
|
- lockdep_unpin_lock(&rq->lock);
|
|
|
+ lockdep_unpin_lock(&rq->lock, cookie);
|
|
|
raw_spin_unlock(&rq->lock);
|
|
|
raw_spin_lock(&p->pi_lock);
|
|
|
raw_spin_lock(&rq->lock);
|
|
|
- lockdep_pin_lock(&rq->lock);
|
|
|
+ lockdep_repin_lock(&rq->lock, cookie);
|
|
|
}
|
|
|
|
|
|
if (!(p->state & TASK_NORMAL))
|
|
@@ -2087,7 +2090,7 @@ static void try_to_wake_up_local(struct task_struct *p)
|
|
|
if (!task_on_rq_queued(p))
|
|
|
ttwu_activate(rq, p, ENQUEUE_WAKEUP);
|
|
|
|
|
|
- ttwu_do_wakeup(rq, p, 0);
|
|
|
+ ttwu_do_wakeup(rq, p, 0, cookie);
|
|
|
if (schedstat_enabled())
|
|
|
ttwu_stat(p, smp_processor_id(), 0);
|
|
|
out:
|
|
@@ -2515,9 +2518,9 @@ void wake_up_new_task(struct task_struct *p)
|
|
|
* Nothing relies on rq->lock after this, so its fine to
|
|
|
* drop it.
|
|
|
*/
|
|
|
- lockdep_unpin_lock(&rq->lock);
|
|
|
+ lockdep_unpin_lock(&rq->lock, rf.cookie);
|
|
|
p->sched_class->task_woken(rq, p);
|
|
|
- lockdep_pin_lock(&rq->lock);
|
|
|
+ lockdep_repin_lock(&rq->lock, rf.cookie);
|
|
|
}
|
|
|
#endif
|
|
|
task_rq_unlock(rq, p, &rf);
|
|
@@ -2782,7 +2785,7 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev)
|
|
|
*/
|
|
|
static __always_inline struct rq *
|
|
|
context_switch(struct rq *rq, struct task_struct *prev,
|
|
|
- struct task_struct *next)
|
|
|
+ struct task_struct *next, struct pin_cookie cookie)
|
|
|
{
|
|
|
struct mm_struct *mm, *oldmm;
|
|
|
|
|
@@ -2814,7 +2817,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
|
|
|
* of the scheduler it's an obvious special-case), so we
|
|
|
* do an early lockdep release here:
|
|
|
*/
|
|
|
- lockdep_unpin_lock(&rq->lock);
|
|
|
+ lockdep_unpin_lock(&rq->lock, cookie);
|
|
|
spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
|
|
|
|
|
|
/* Here we just switch the register state and the stack. */
|
|
@@ -3154,7 +3157,7 @@ static inline void schedule_debug(struct task_struct *prev)
|
|
|
* Pick up the highest-prio task:
|
|
|
*/
|
|
|
static inline struct task_struct *
|
|
|
-pick_next_task(struct rq *rq, struct task_struct *prev)
|
|
|
+pick_next_task(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
|
|
|
{
|
|
|
const struct sched_class *class = &fair_sched_class;
|
|
|
struct task_struct *p;
|
|
@@ -3165,20 +3168,20 @@ pick_next_task(struct rq *rq, struct task_struct *prev)
|
|
|
*/
|
|
|
if (likely(prev->sched_class == class &&
|
|
|
rq->nr_running == rq->cfs.h_nr_running)) {
|
|
|
- p = fair_sched_class.pick_next_task(rq, prev);
|
|
|
+ p = fair_sched_class.pick_next_task(rq, prev, cookie);
|
|
|
if (unlikely(p == RETRY_TASK))
|
|
|
goto again;
|
|
|
|
|
|
/* assumes fair_sched_class->next == idle_sched_class */
|
|
|
if (unlikely(!p))
|
|
|
- p = idle_sched_class.pick_next_task(rq, prev);
|
|
|
+ p = idle_sched_class.pick_next_task(rq, prev, cookie);
|
|
|
|
|
|
return p;
|
|
|
}
|
|
|
|
|
|
again:
|
|
|
for_each_class(class) {
|
|
|
- p = class->pick_next_task(rq, prev);
|
|
|
+ p = class->pick_next_task(rq, prev, cookie);
|
|
|
if (p) {
|
|
|
if (unlikely(p == RETRY_TASK))
|
|
|
goto again;
|
|
@@ -3232,6 +3235,7 @@ static void __sched notrace __schedule(bool preempt)
|
|
|
{
|
|
|
struct task_struct *prev, *next;
|
|
|
unsigned long *switch_count;
|
|
|
+ struct pin_cookie cookie;
|
|
|
struct rq *rq;
|
|
|
int cpu;
|
|
|
|
|
@@ -3265,7 +3269,7 @@ static void __sched notrace __schedule(bool preempt)
|
|
|
*/
|
|
|
smp_mb__before_spinlock();
|
|
|
raw_spin_lock(&rq->lock);
|
|
|
- lockdep_pin_lock(&rq->lock);
|
|
|
+ cookie = lockdep_pin_lock(&rq->lock);
|
|
|
|
|
|
rq->clock_skip_update <<= 1; /* promote REQ to ACT */
|
|
|
|
|
@@ -3287,7 +3291,7 @@ static void __sched notrace __schedule(bool preempt)
|
|
|
|
|
|
to_wakeup = wq_worker_sleeping(prev);
|
|
|
if (to_wakeup)
|
|
|
- try_to_wake_up_local(to_wakeup);
|
|
|
+ try_to_wake_up_local(to_wakeup, cookie);
|
|
|
}
|
|
|
}
|
|
|
switch_count = &prev->nvcsw;
|
|
@@ -3296,7 +3300,7 @@ static void __sched notrace __schedule(bool preempt)
|
|
|
if (task_on_rq_queued(prev))
|
|
|
update_rq_clock(rq);
|
|
|
|
|
|
- next = pick_next_task(rq, prev);
|
|
|
+ next = pick_next_task(rq, prev, cookie);
|
|
|
clear_tsk_need_resched(prev);
|
|
|
clear_preempt_need_resched();
|
|
|
rq->clock_skip_update = 0;
|
|
@@ -3307,9 +3311,9 @@ static void __sched notrace __schedule(bool preempt)
|
|
|
++*switch_count;
|
|
|
|
|
|
trace_sched_switch(preempt, prev, next);
|
|
|
- rq = context_switch(rq, prev, next); /* unlocks the rq */
|
|
|
+ rq = context_switch(rq, prev, next, cookie); /* unlocks the rq */
|
|
|
} else {
|
|
|
- lockdep_unpin_lock(&rq->lock);
|
|
|
+ lockdep_unpin_lock(&rq->lock, cookie);
|
|
|
raw_spin_unlock_irq(&rq->lock);
|
|
|
}
|
|
|
|
|
@@ -5392,6 +5396,7 @@ static void migrate_tasks(struct rq *dead_rq)
|
|
|
{
|
|
|
struct rq *rq = dead_rq;
|
|
|
struct task_struct *next, *stop = rq->stop;
|
|
|
+ struct pin_cookie cookie;
|
|
|
int dest_cpu;
|
|
|
|
|
|
/*
|
|
@@ -5423,8 +5428,8 @@ static void migrate_tasks(struct rq *dead_rq)
|
|
|
/*
|
|
|
* pick_next_task assumes pinned rq->lock.
|
|
|
*/
|
|
|
- lockdep_pin_lock(&rq->lock);
|
|
|
- next = pick_next_task(rq, &fake_task);
|
|
|
+ cookie = lockdep_pin_lock(&rq->lock);
|
|
|
+ next = pick_next_task(rq, &fake_task, cookie);
|
|
|
BUG_ON(!next);
|
|
|
next->sched_class->put_prev_task(rq, next);
|
|
|
|
|
@@ -5437,7 +5442,7 @@ static void migrate_tasks(struct rq *dead_rq)
|
|
|
* because !cpu_active at this point, which means load-balance
|
|
|
* will not interfere. Also, stop-machine.
|
|
|
*/
|
|
|
- lockdep_unpin_lock(&rq->lock);
|
|
|
+ lockdep_unpin_lock(&rq->lock, cookie);
|
|
|
raw_spin_unlock(&rq->lock);
|
|
|
raw_spin_lock(&next->pi_lock);
|
|
|
raw_spin_lock(&rq->lock);
|