|
@@ -1,6 +1,8 @@
|
|
|
#ifndef _ASM_X86_MWAIT_H
|
|
|
#define _ASM_X86_MWAIT_H
|
|
|
|
|
|
+#include <linux/sched.h>
|
|
|
+
|
|
|
#define MWAIT_SUBSTATE_MASK 0xf
|
|
|
#define MWAIT_CSTATE_MASK 0xf
|
|
|
#define MWAIT_SUBSTATE_SIZE 4
|
|
@@ -13,4 +15,42 @@
|
|
|
|
|
|
#define MWAIT_ECX_INTERRUPT_BREAK 0x1
|
|
|
|
|
|
+static inline void __monitor(const void *eax, unsigned long ecx,
|
|
|
+ unsigned long edx)
|
|
|
+{
|
|
|
+ /* "monitor %eax, %ecx, %edx;" */
|
|
|
+ asm volatile(".byte 0x0f, 0x01, 0xc8;"
|
|
|
+ :: "a" (eax), "c" (ecx), "d"(edx));
|
|
|
+}
|
|
|
+
|
|
|
+static inline void __mwait(unsigned long eax, unsigned long ecx)
|
|
|
+{
|
|
|
+ /* "mwait %eax, %ecx;" */
|
|
|
+ asm volatile(".byte 0x0f, 0x01, 0xc9;"
|
|
|
+ :: "a" (eax), "c" (ecx));
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
|
|
|
+ * which can obviate IPI to trigger checking of need_resched.
|
|
|
+ * We execute MONITOR against need_resched and enter optimized wait state
|
|
|
+ * through MWAIT. Whenever someone changes need_resched, we would be woken
|
|
|
+ * up from MWAIT (without an IPI).
|
|
|
+ *
|
|
|
+ * New with Core Duo processors, MWAIT can take some hints based on CPU
|
|
|
+ * capability.
|
|
|
+ */
|
|
|
+static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
|
|
|
+{
|
|
|
+ if (!current_set_polling_and_test()) {
|
|
|
+ if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
|
|
|
+ clflush((void *)¤t_thread_info()->flags);
|
|
|
+
|
|
|
+ __monitor((void *)¤t_thread_info()->flags, 0, 0);
|
|
|
+ if (!need_resched())
|
|
|
+ __mwait(eax, ecx);
|
|
|
+ }
|
|
|
+ __current_clr_polling();
|
|
|
+}
|
|
|
+
|
|
|
#endif /* _ASM_X86_MWAIT_H */
|