|
|
@@ -1907,9 +1907,8 @@ static void push_rt_tasks(struct rq *rq)
|
|
|
* the rt_loop_next will cause the iterator to perform another scan.
|
|
|
*
|
|
|
*/
|
|
|
-static int rto_next_cpu(struct rq *rq)
|
|
|
+static int rto_next_cpu(struct root_domain *rd)
|
|
|
{
|
|
|
- struct root_domain *rd = rq->rd;
|
|
|
int next;
|
|
|
int cpu;
|
|
|
|
|
|
@@ -1985,7 +1984,7 @@ static void tell_cpu_to_push(struct rq *rq)
|
|
|
* Otherwise it is finishing up and an ipi needs to be sent.
|
|
|
*/
|
|
|
if (rq->rd->rto_cpu < 0)
|
|
|
- cpu = rto_next_cpu(rq);
|
|
|
+ cpu = rto_next_cpu(rq->rd);
|
|
|
|
|
|
raw_spin_unlock(&rq->rd->rto_lock);
|
|
|
|
|
|
@@ -1998,6 +1997,8 @@ static void tell_cpu_to_push(struct rq *rq)
|
|
|
/* Called from hardirq context */
|
|
|
void rto_push_irq_work_func(struct irq_work *work)
|
|
|
{
|
|
|
+ struct root_domain *rd =
|
|
|
+ container_of(work, struct root_domain, rto_push_work);
|
|
|
struct rq *rq;
|
|
|
int cpu;
|
|
|
|
|
|
@@ -2013,18 +2014,18 @@ void rto_push_irq_work_func(struct irq_work *work)
|
|
|
raw_spin_unlock(&rq->lock);
|
|
|
}
|
|
|
|
|
|
- raw_spin_lock(&rq->rd->rto_lock);
|
|
|
+ raw_spin_lock(&rd->rto_lock);
|
|
|
|
|
|
/* Pass the IPI to the next rt overloaded queue */
|
|
|
- cpu = rto_next_cpu(rq);
|
|
|
+ cpu = rto_next_cpu(rd);
|
|
|
|
|
|
- raw_spin_unlock(&rq->rd->rto_lock);
|
|
|
+ raw_spin_unlock(&rd->rto_lock);
|
|
|
|
|
|
if (cpu < 0)
|
|
|
return;
|
|
|
|
|
|
/* Try the next RT overloaded CPU */
|
|
|
- irq_work_queue_on(&rq->rd->rto_push_work, cpu);
|
|
|
+ irq_work_queue_on(&rd->rto_push_work, cpu);
|
|
|
}
|
|
|
#endif /* HAVE_RT_PUSH_IPI */
|
|
|
|