|
@@ -20,7 +20,6 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *x,
|
|
|
{
|
|
|
volatile unsigned int *a;
|
|
|
|
|
|
- mb();
|
|
|
a = __ldcw_align(x);
|
|
|
while (__ldcw(a) == 0)
|
|
|
while (*a == 0)
|
|
@@ -30,17 +29,16 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *x,
|
|
|
local_irq_disable();
|
|
|
} else
|
|
|
cpu_relax();
|
|
|
- mb();
|
|
|
}
|
|
|
#define arch_spin_lock_flags arch_spin_lock_flags
|
|
|
|
|
|
static inline void arch_spin_unlock(arch_spinlock_t *x)
|
|
|
{
|
|
|
volatile unsigned int *a;
|
|
|
- mb();
|
|
|
+
|
|
|
a = __ldcw_align(x);
|
|
|
- *a = 1;
|
|
|
mb();
|
|
|
+ *a = 1;
|
|
|
}
|
|
|
|
|
|
static inline int arch_spin_trylock(arch_spinlock_t *x)
|
|
@@ -48,10 +46,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *x)
|
|
|
volatile unsigned int *a;
|
|
|
int ret;
|
|
|
|
|
|
- mb();
|
|
|
a = __ldcw_align(x);
|
|
|
ret = __ldcw(a) != 0;
|
|
|
- mb();
|
|
|
|
|
|
return ret;
|
|
|
}
|