|
@@ -1451,70 +1451,11 @@ static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
|
|
|
static inline void sched_avg_update(struct rq *rq) { }
|
|
|
#endif
|
|
|
|
|
|
-/*
|
|
|
- * __task_rq_lock - lock the rq @p resides on.
|
|
|
- */
|
|
|
-static inline struct rq *__task_rq_lock(struct task_struct *p)
|
|
|
- __acquires(rq->lock)
|
|
|
-{
|
|
|
- struct rq *rq;
|
|
|
-
|
|
|
- lockdep_assert_held(&p->pi_lock);
|
|
|
-
|
|
|
- for (;;) {
|
|
|
- rq = task_rq(p);
|
|
|
- raw_spin_lock(&rq->lock);
|
|
|
- if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
|
|
|
- lockdep_pin_lock(&rq->lock);
|
|
|
- return rq;
|
|
|
- }
|
|
|
- raw_spin_unlock(&rq->lock);
|
|
|
-
|
|
|
- while (unlikely(task_on_rq_migrating(p)))
|
|
|
- cpu_relax();
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
|
|
|
- */
|
|
|
-static inline struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
|
|
|
+struct rq *__task_rq_lock(struct task_struct *p)
|
|
|
+ __acquires(rq->lock);
|
|
|
+struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
|
|
|
__acquires(p->pi_lock)
|
|
|
- __acquires(rq->lock)
|
|
|
-{
|
|
|
- struct rq *rq;
|
|
|
-
|
|
|
- for (;;) {
|
|
|
- raw_spin_lock_irqsave(&p->pi_lock, *flags);
|
|
|
- rq = task_rq(p);
|
|
|
- raw_spin_lock(&rq->lock);
|
|
|
- /*
|
|
|
- * move_queued_task() task_rq_lock()
|
|
|
- *
|
|
|
- * ACQUIRE (rq->lock)
|
|
|
- * [S] ->on_rq = MIGRATING [L] rq = task_rq()
|
|
|
- * WMB (__set_task_cpu()) ACQUIRE (rq->lock);
|
|
|
- * [S] ->cpu = new_cpu [L] task_rq()
|
|
|
- * [L] ->on_rq
|
|
|
- * RELEASE (rq->lock)
|
|
|
- *
|
|
|
- * If we observe the old cpu in task_rq_lock, the acquire of
|
|
|
- * the old rq->lock will fully serialize against the stores.
|
|
|
- *
|
|
|
- * If we observe the new cpu in task_rq_lock, the acquire will
|
|
|
- * pair with the WMB to ensure we must then also see migrating.
|
|
|
- */
|
|
|
- if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
|
|
|
- lockdep_pin_lock(&rq->lock);
|
|
|
- return rq;
|
|
|
- }
|
|
|
- raw_spin_unlock(&rq->lock);
|
|
|
- raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
|
|
|
-
|
|
|
- while (unlikely(task_on_rq_migrating(p)))
|
|
|
- cpu_relax();
|
|
|
- }
|
|
|
-}
|
|
|
+ __acquires(rq->lock);
|
|
|
|
|
|
static inline void __task_rq_unlock(struct rq *rq)
|
|
|
__releases(rq->lock)
|