|
|
@@ -9,14 +9,6 @@
|
|
|
#include <linux/linkage.h>
|
|
|
#include <linux/list.h>
|
|
|
|
|
|
-/*
|
|
|
- * We use the MSB mostly because its available; see <linux/preempt_mask.h> for
|
|
|
- * the other bits -- can't include that header due to inclusion hell.
|
|
|
- */
|
|
|
-#define PREEMPT_NEED_RESCHED 0x80000000
|
|
|
-
|
|
|
-#include <asm/preempt.h>
|
|
|
-
|
|
|
/*
|
|
|
* We put the hardirq and softirq counter into the preemption
|
|
|
* counter. The bitmask has the following meaning:
|
|
|
@@ -30,11 +22,12 @@
|
|
|
* there are a few palaeontologic drivers which reenable interrupts in
|
|
|
* the handler, so we need more than one bit here.
|
|
|
*
|
|
|
- * PREEMPT_MASK: 0x000000ff
|
|
|
- * SOFTIRQ_MASK: 0x0000ff00
|
|
|
- * HARDIRQ_MASK: 0x000f0000
|
|
|
- * NMI_MASK: 0x00100000
|
|
|
- * PREEMPT_ACTIVE: 0x00200000
|
|
|
+ * PREEMPT_MASK: 0x000000ff
|
|
|
+ * SOFTIRQ_MASK: 0x0000ff00
|
|
|
+ * HARDIRQ_MASK: 0x000f0000
|
|
|
+ * NMI_MASK: 0x00100000
|
|
|
+ * PREEMPT_ACTIVE: 0x00200000
|
|
|
+ * PREEMPT_NEED_RESCHED: 0x80000000
|
|
|
*/
|
|
|
#define PREEMPT_BITS 8
|
|
|
#define SOFTIRQ_BITS 8
|
|
|
@@ -64,6 +57,12 @@
|
|
|
#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS)
|
|
|
#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT)
|
|
|
|
|
|
+/* We use the MSB mostly because its available */
|
|
|
+#define PREEMPT_NEED_RESCHED 0x80000000
|
|
|
+
|
|
|
+/* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */
|
|
|
+#include <asm/preempt.h>
|
|
|
+
|
|
|
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
|
|
|
#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
|
|
|
#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
|
|
|
@@ -122,12 +121,6 @@
|
|
|
#define in_atomic_preempt_off() \
|
|
|
((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET)
|
|
|
|
|
|
-#ifdef CONFIG_PREEMPT_COUNT
|
|
|
-# define preemptible() (preempt_count() == 0 && !irqs_disabled())
|
|
|
-#else
|
|
|
-# define preemptible() 0
|
|
|
-#endif
|
|
|
-
|
|
|
#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
|
|
|
extern void preempt_count_add(int val);
|
|
|
extern void preempt_count_sub(int val);
|
|
|
@@ -160,6 +153,8 @@ do { \
|
|
|
|
|
|
#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
|
|
|
|
|
|
+#define preemptible() (preempt_count() == 0 && !irqs_disabled())
|
|
|
+
|
|
|
#ifdef CONFIG_PREEMPT
|
|
|
#define preempt_enable() \
|
|
|
do { \
|
|
|
@@ -232,6 +227,7 @@ do { \
|
|
|
#define preempt_disable_notrace() barrier()
|
|
|
#define preempt_enable_no_resched_notrace() barrier()
|
|
|
#define preempt_enable_notrace() barrier()
|
|
|
+#define preemptible() 0
|
|
|
|
|
|
#endif /* CONFIG_PREEMPT_COUNT */
|
|
|
|