|
@@ -1634,6 +1634,25 @@ static void ttwu_queue_remote(struct task_struct *p, int cpu)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+void wake_up_if_idle(int cpu)
|
|
|
|
+{
|
|
|
|
+ struct rq *rq = cpu_rq(cpu);
|
|
|
|
+ unsigned long flags;
|
|
|
|
+
|
|
|
|
+ if (!is_idle_task(rq->curr))
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ if (set_nr_if_polling(rq->idle)) {
|
|
|
|
+ trace_sched_wake_idle_without_ipi(cpu);
|
|
|
|
+ } else {
|
|
|
|
+ raw_spin_lock_irqsave(&rq->lock, flags);
|
|
|
|
+ if (is_idle_task(rq->curr))
|
|
|
|
+ smp_send_reschedule(cpu);
|
|
|
|
+ /* Else cpu is not in idle, do nothing here */
|
|
|
|
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
bool cpus_share_cache(int this_cpu, int that_cpu)
|
|
bool cpus_share_cache(int this_cpu, int that_cpu)
|
|
{
|
|
{
|
|
return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
|
|
return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
|