|
@@ -957,6 +957,8 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
|
|
|
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
|
|
|
#define raw_rq() raw_cpu_ptr(&runqueues)
|
|
|
|
|
|
+extern void update_rq_clock(struct rq *rq);
|
|
|
+
|
|
|
static inline u64 __rq_clock_broken(struct rq *rq)
|
|
|
{
|
|
|
return READ_ONCE(rq->clock);
|
|
@@ -1075,6 +1077,86 @@ static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf)
|
|
|
#endif
|
|
|
}
|
|
|
|
|
|
+struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
|
|
|
+ __acquires(rq->lock);
|
|
|
+
|
|
|
+struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
|
|
|
+ __acquires(p->pi_lock)
|
|
|
+ __acquires(rq->lock);
|
|
|
+
|
|
|
+static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
|
|
|
+ __releases(rq->lock)
|
|
|
+{
|
|
|
+ rq_unpin_lock(rq, rf);
|
|
|
+ raw_spin_unlock(&rq->lock);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void
|
|
|
+task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
|
|
|
+ __releases(rq->lock)
|
|
|
+ __releases(p->pi_lock)
|
|
|
+{
|
|
|
+ rq_unpin_lock(rq, rf);
|
|
|
+ raw_spin_unlock(&rq->lock);
|
|
|
+ raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void
|
|
|
+rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
|
|
|
+ __acquires(rq->lock)
|
|
|
+{
|
|
|
+ raw_spin_lock_irqsave(&rq->lock, rf->flags);
|
|
|
+ rq_pin_lock(rq, rf);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void
|
|
|
+rq_lock_irq(struct rq *rq, struct rq_flags *rf)
|
|
|
+ __acquires(rq->lock)
|
|
|
+{
|
|
|
+ raw_spin_lock_irq(&rq->lock);
|
|
|
+ rq_pin_lock(rq, rf);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void
|
|
|
+rq_lock(struct rq *rq, struct rq_flags *rf)
|
|
|
+ __acquires(rq->lock)
|
|
|
+{
|
|
|
+ raw_spin_lock(&rq->lock);
|
|
|
+ rq_pin_lock(rq, rf);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void
|
|
|
+rq_relock(struct rq *rq, struct rq_flags *rf)
|
|
|
+ __acquires(rq->lock)
|
|
|
+{
|
|
|
+ raw_spin_lock(&rq->lock);
|
|
|
+ rq_repin_lock(rq, rf);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void
|
|
|
+rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
|
|
|
+ __releases(rq->lock)
|
|
|
+{
|
|
|
+ rq_unpin_lock(rq, rf);
|
|
|
+ raw_spin_unlock_irqrestore(&rq->lock, rf->flags);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void
|
|
|
+rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
|
|
|
+ __releases(rq->lock)
|
|
|
+{
|
|
|
+ rq_unpin_lock(rq, rf);
|
|
|
+ raw_spin_unlock_irq(&rq->lock);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void
|
|
|
+rq_unlock(struct rq *rq, struct rq_flags *rf)
|
|
|
+ __releases(rq->lock)
|
|
|
+{
|
|
|
+ rq_unpin_lock(rq, rf);
|
|
|
+ raw_spin_unlock(&rq->lock);
|
|
|
+}
|
|
|
+
|
|
|
#ifdef CONFIG_NUMA
|
|
|
enum numa_topology_type {
|
|
|
NUMA_DIRECT,
|
|
@@ -1717,8 +1799,6 @@ static inline void sub_nr_running(struct rq *rq, unsigned count)
|
|
|
sched_update_tick_dependency(rq);
|
|
|
}
|
|
|
|
|
|
-extern void update_rq_clock(struct rq *rq);
|
|
|
-
|
|
|
extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
|
|
|
extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
|
|
|
|
|
@@ -1783,86 +1863,6 @@ unsigned long arch_scale_cpu_capacity(void __always_unused *sd, int cpu)
|
|
|
#endif
|
|
|
#endif
|
|
|
|
|
|
-struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
|
|
|
- __acquires(rq->lock);
|
|
|
-
|
|
|
-struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
|
|
|
- __acquires(p->pi_lock)
|
|
|
- __acquires(rq->lock);
|
|
|
-
|
|
|
-static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
|
|
|
- __releases(rq->lock)
|
|
|
-{
|
|
|
- rq_unpin_lock(rq, rf);
|
|
|
- raw_spin_unlock(&rq->lock);
|
|
|
-}
|
|
|
-
|
|
|
-static inline void
|
|
|
-task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
|
|
|
- __releases(rq->lock)
|
|
|
- __releases(p->pi_lock)
|
|
|
-{
|
|
|
- rq_unpin_lock(rq, rf);
|
|
|
- raw_spin_unlock(&rq->lock);
|
|
|
- raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
|
|
|
-}
|
|
|
-
|
|
|
-static inline void
|
|
|
-rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
|
|
|
- __acquires(rq->lock)
|
|
|
-{
|
|
|
- raw_spin_lock_irqsave(&rq->lock, rf->flags);
|
|
|
- rq_pin_lock(rq, rf);
|
|
|
-}
|
|
|
-
|
|
|
-static inline void
|
|
|
-rq_lock_irq(struct rq *rq, struct rq_flags *rf)
|
|
|
- __acquires(rq->lock)
|
|
|
-{
|
|
|
- raw_spin_lock_irq(&rq->lock);
|
|
|
- rq_pin_lock(rq, rf);
|
|
|
-}
|
|
|
-
|
|
|
-static inline void
|
|
|
-rq_lock(struct rq *rq, struct rq_flags *rf)
|
|
|
- __acquires(rq->lock)
|
|
|
-{
|
|
|
- raw_spin_lock(&rq->lock);
|
|
|
- rq_pin_lock(rq, rf);
|
|
|
-}
|
|
|
-
|
|
|
-static inline void
|
|
|
-rq_relock(struct rq *rq, struct rq_flags *rf)
|
|
|
- __acquires(rq->lock)
|
|
|
-{
|
|
|
- raw_spin_lock(&rq->lock);
|
|
|
- rq_repin_lock(rq, rf);
|
|
|
-}
|
|
|
-
|
|
|
-static inline void
|
|
|
-rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
|
|
|
- __releases(rq->lock)
|
|
|
-{
|
|
|
- rq_unpin_lock(rq, rf);
|
|
|
- raw_spin_unlock_irqrestore(&rq->lock, rf->flags);
|
|
|
-}
|
|
|
-
|
|
|
-static inline void
|
|
|
-rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
|
|
|
- __releases(rq->lock)
|
|
|
-{
|
|
|
- rq_unpin_lock(rq, rf);
|
|
|
- raw_spin_unlock_irq(&rq->lock);
|
|
|
-}
|
|
|
-
|
|
|
-static inline void
|
|
|
-rq_unlock(struct rq *rq, struct rq_flags *rf)
|
|
|
- __releases(rq->lock)
|
|
|
-{
|
|
|
- rq_unpin_lock(rq, rf);
|
|
|
- raw_spin_unlock(&rq->lock);
|
|
|
-}
|
|
|
-
|
|
|
#ifdef CONFIG_SMP
|
|
|
#ifdef CONFIG_PREEMPT
|
|
|
|