|
@@ -505,6 +505,39 @@ static inline void init_hrtick(void)
|
|
|
}
|
|
|
#endif /* CONFIG_SCHED_HRTICK */
|
|
|
|
|
|
+/*
|
|
|
+ * cmpxchg based fetch_or, macro so it works for different integer types
|
|
|
+ */
|
|
|
+#define fetch_or(ptr, val) \
|
|
|
+({ typeof(*(ptr)) __old, __val = *(ptr); \
|
|
|
+ for (;;) { \
|
|
|
+ __old = cmpxchg((ptr), __val, __val | (val)); \
|
|
|
+ if (__old == __val) \
|
|
|
+ break; \
|
|
|
+ __val = __old; \
|
|
|
+ } \
|
|
|
+ __old; \
|
|
|
+})
|
|
|
+
|
|
|
+#ifdef TIF_POLLING_NRFLAG
|
|
|
+/*
|
|
|
+ * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
|
|
|
+ * this avoids any races wrt polling state changes and thereby avoids
|
|
|
+ * spurious IPIs.
|
|
|
+ */
|
|
|
+static bool set_nr_and_not_polling(struct task_struct *p)
|
|
|
+{
|
|
|
+ struct thread_info *ti = task_thread_info(p);
|
|
|
+ return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
|
|
|
+}
|
|
|
+#else
|
|
|
+static bool set_nr_and_not_polling(struct task_struct *p)
|
|
|
+{
|
|
|
+ set_tsk_need_resched(p);
|
|
|
+ return true;
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
/*
|
|
|
* resched_task - mark a task 'to be rescheduled now'.
|
|
|
*
|
|
@@ -521,17 +554,15 @@ void resched_task(struct task_struct *p)
|
|
|
if (test_tsk_need_resched(p))
|
|
|
return;
|
|
|
|
|
|
- set_tsk_need_resched(p);
|
|
|
-
|
|
|
cpu = task_cpu(p);
|
|
|
+
|
|
|
if (cpu == smp_processor_id()) {
|
|
|
+ set_tsk_need_resched(p);
|
|
|
set_preempt_need_resched();
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
- /* NEED_RESCHED must be visible before we test polling */
|
|
|
- smp_mb();
|
|
|
- if (!tsk_is_polling(p))
|
|
|
+ if (set_nr_and_not_polling(p))
|
|
|
smp_send_reschedule(cpu);
|
|
|
}
|
|
|
|