|
@@ -306,82 +306,6 @@ __read_mostly int scheduler_running;
|
|
*/
|
|
*/
|
|
int sysctl_sched_rt_runtime = 950000;
|
|
int sysctl_sched_rt_runtime = 950000;
|
|
|
|
|
|
-/*
|
|
|
|
- * __task_rq_lock - lock the rq @p resides on.
|
|
|
|
- */
|
|
|
|
-static inline struct rq *__task_rq_lock(struct task_struct *p)
|
|
|
|
- __acquires(rq->lock)
|
|
|
|
-{
|
|
|
|
- struct rq *rq;
|
|
|
|
-
|
|
|
|
- lockdep_assert_held(&p->pi_lock);
|
|
|
|
-
|
|
|
|
- for (;;) {
|
|
|
|
- rq = task_rq(p);
|
|
|
|
- raw_spin_lock(&rq->lock);
|
|
|
|
- if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
|
|
|
|
- return rq;
|
|
|
|
- raw_spin_unlock(&rq->lock);
|
|
|
|
-
|
|
|
|
- while (unlikely(task_on_rq_migrating(p)))
|
|
|
|
- cpu_relax();
|
|
|
|
- }
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
|
|
|
|
- */
|
|
|
|
-static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
|
|
|
|
- __acquires(p->pi_lock)
|
|
|
|
- __acquires(rq->lock)
|
|
|
|
-{
|
|
|
|
- struct rq *rq;
|
|
|
|
-
|
|
|
|
- for (;;) {
|
|
|
|
- raw_spin_lock_irqsave(&p->pi_lock, *flags);
|
|
|
|
- rq = task_rq(p);
|
|
|
|
- raw_spin_lock(&rq->lock);
|
|
|
|
- /*
|
|
|
|
- * move_queued_task() task_rq_lock()
|
|
|
|
- *
|
|
|
|
- * ACQUIRE (rq->lock)
|
|
|
|
- * [S] ->on_rq = MIGRATING [L] rq = task_rq()
|
|
|
|
- * WMB (__set_task_cpu()) ACQUIRE (rq->lock);
|
|
|
|
- * [S] ->cpu = new_cpu [L] task_rq()
|
|
|
|
- * [L] ->on_rq
|
|
|
|
- * RELEASE (rq->lock)
|
|
|
|
- *
|
|
|
|
- * If we observe the old cpu in task_rq_lock, the acquire of
|
|
|
|
- * the old rq->lock will fully serialize against the stores.
|
|
|
|
- *
|
|
|
|
- * If we observe the new cpu in task_rq_lock, the acquire will
|
|
|
|
- * pair with the WMB to ensure we must then also see migrating.
|
|
|
|
- */
|
|
|
|
- if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
|
|
|
|
- return rq;
|
|
|
|
- raw_spin_unlock(&rq->lock);
|
|
|
|
- raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
|
|
|
|
-
|
|
|
|
- while (unlikely(task_on_rq_migrating(p)))
|
|
|
|
- cpu_relax();
|
|
|
|
- }
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static void __task_rq_unlock(struct rq *rq)
|
|
|
|
- __releases(rq->lock)
|
|
|
|
-{
|
|
|
|
- raw_spin_unlock(&rq->lock);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static inline void
|
|
|
|
-task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
|
|
|
|
- __releases(rq->lock)
|
|
|
|
- __releases(p->pi_lock)
|
|
|
|
-{
|
|
|
|
- raw_spin_unlock(&rq->lock);
|
|
|
|
- raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* this_rq_lock - lock this runqueue and disable interrupts.
|
|
* this_rq_lock - lock this runqueue and disable interrupts.
|
|
*/
|
|
*/
|