|
@@ -519,7 +519,7 @@ static inline void init_hrtick(void)
|
|
|
__old; \
|
|
|
})
|
|
|
|
|
|
-#ifdef TIF_POLLING_NRFLAG
|
|
|
+#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
|
|
|
/*
|
|
|
* Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
|
|
|
* this avoids any races wrt polling state changes and thereby avoids
|
|
@@ -530,12 +530,44 @@ static bool set_nr_and_not_polling(struct task_struct *p)
|
|
|
struct thread_info *ti = task_thread_info(p);
|
|
|
return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
|
|
|
}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
|
|
|
+ *
|
|
|
+ * If this returns true, then the idle task promises to call
|
|
|
+ * sched_ttwu_pending() and reschedule soon.
|
|
|
+ */
|
|
|
+static bool set_nr_if_polling(struct task_struct *p)
|
|
|
+{
|
|
|
+ struct thread_info *ti = task_thread_info(p);
|
|
|
+ typeof(ti->flags) old, val = ACCESS_ONCE(ti->flags);
|
|
|
+
|
|
|
+ for (;;) {
|
|
|
+ if (!(val & _TIF_POLLING_NRFLAG))
|
|
|
+ return false;
|
|
|
+ if (val & _TIF_NEED_RESCHED)
|
|
|
+ return true;
|
|
|
+ old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED);
|
|
|
+ if (old == val)
|
|
|
+ break;
|
|
|
+ val = old;
|
|
|
+ }
|
|
|
+ return true;
|
|
|
+}
|
|
|
+
|
|
|
#else
|
|
|
static bool set_nr_and_not_polling(struct task_struct *p)
|
|
|
{
|
|
|
set_tsk_need_resched(p);
|
|
|
return true;
|
|
|
}
|
|
|
+
|
|
|
+#ifdef CONFIG_SMP
|
|
|
+static bool set_nr_if_polling(struct task_struct *p)
|
|
|
+{
|
|
|
+ return false;
|
|
|
+}
|
|
|
+#endif
|
|
|
#endif
|
|
|
|
|
|
/*
|
|
@@ -1490,13 +1522,17 @@ static int ttwu_remote(struct task_struct *p, int wake_flags)
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
-static void sched_ttwu_pending(void)
|
|
|
+void sched_ttwu_pending(void)
|
|
|
{
|
|
|
struct rq *rq = this_rq();
|
|
|
struct llist_node *llist = llist_del_all(&rq->wake_list);
|
|
|
struct task_struct *p;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
- raw_spin_lock(&rq->lock);
|
|
|
+ if (!llist)
|
|
|
+ return;
|
|
|
+
|
|
|
+ raw_spin_lock_irqsave(&rq->lock, flags);
|
|
|
|
|
|
while (llist) {
|
|
|
p = llist_entry(llist, struct task_struct, wake_entry);
|
|
@@ -1504,7 +1540,7 @@ static void sched_ttwu_pending(void)
|
|
|
ttwu_do_activate(rq, p, 0);
|
|
|
}
|
|
|
|
|
|
- raw_spin_unlock(&rq->lock);
|
|
|
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
|
|
|
}
|
|
|
|
|
|
void scheduler_ipi(void)
|
|
@@ -1550,8 +1586,14 @@ void scheduler_ipi(void)
|
|
|
|
|
|
static void ttwu_queue_remote(struct task_struct *p, int cpu)
|
|
|
{
|
|
|
- if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list))
|
|
|
- smp_send_reschedule(cpu);
|
|
|
+ struct rq *rq = cpu_rq(cpu);
|
|
|
+
|
|
|
+ if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) {
|
|
|
+ if (!set_nr_if_polling(rq->idle))
|
|
|
+ smp_send_reschedule(cpu);
|
|
|
+ else
|
|
|
+ trace_sched_wake_idle_without_ipi(cpu);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
bool cpus_share_cache(int this_cpu, int that_cpu)
|