|
@@ -9,6 +9,7 @@
|
|
|
#include <linux/log2.h>
|
|
|
#include <linux/gfp.h>
|
|
|
#include <linux/slab.h>
|
|
|
+#include <linux/atomic.h>
|
|
|
|
|
|
#include <asm/paravirt.h>
|
|
|
#include <asm/qspinlock.h>
|
|
@@ -21,6 +22,7 @@
|
|
|
|
|
|
static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
|
|
|
static DEFINE_PER_CPU(char *, irq_name);
|
|
|
+static DEFINE_PER_CPU(atomic_t, xen_qlock_wait_nest);
|
|
|
static bool xen_pvspin = true;
|
|
|
|
|
|
static void xen_qlock_kick(int cpu)
|
|
@@ -39,25 +41,25 @@ static void xen_qlock_kick(int cpu)
|
|
|
*/
|
|
|
static void xen_qlock_wait(u8 *byte, u8 val)
|
|
|
{
|
|
|
- unsigned long flags;
|
|
|
int irq = __this_cpu_read(lock_kicker_irq);
|
|
|
+ atomic_t *nest_cnt = this_cpu_ptr(&xen_qlock_wait_nest);
|
|
|
|
|
|
/* If kicker interrupts not initialized yet, just spin */
|
|
|
if (irq == -1 || in_nmi())
|
|
|
return;
|
|
|
|
|
|
- /* Guard against reentry. */
|
|
|
- local_irq_save(flags);
|
|
|
+ /* Detect reentry. */
|
|
|
+ atomic_inc(nest_cnt);
|
|
|
|
|
|
- /* If irq pending already clear it. */
|
|
|
- if (xen_test_irq_pending(irq)) {
|
|
|
+ /* If irq pending already and no nested call clear it. */
|
|
|
+ if (atomic_read(nest_cnt) == 1 && xen_test_irq_pending(irq)) {
|
|
|
xen_clear_irq_pending(irq);
|
|
|
} else if (READ_ONCE(*byte) == val) {
|
|
|
/* Block until irq becomes pending (or a spurious wakeup) */
|
|
|
xen_poll_irq(irq);
|
|
|
}
|
|
|
|
|
|
- local_irq_restore(flags);
|
|
|
+ atomic_dec(nest_cnt);
|
|
|
}
|
|
|
|
|
|
static irqreturn_t dummy_handler(int irq, void *dev_id)
|