|
@@ -17,6 +17,7 @@
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
#include <asm/current.h>
|
|
|
+#include <asm/fence.h>
|
|
|
|
|
|
/*
|
|
|
* Simple spin lock operations. These provide no fairness guarantees.
|
|
@@ -28,10 +29,7 @@
|
|
|
|
|
|
static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
|
|
{
|
|
|
- __asm__ __volatile__ (
|
|
|
- "amoswap.w.rl x0, x0, %0"
|
|
|
- : "=A" (lock->lock)
|
|
|
- :: "memory");
|
|
|
+ smp_store_release(&lock->lock, 0);
|
|
|
}
|
|
|
|
|
|
static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
|
@@ -39,7 +37,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
|
|
int tmp = 1, busy;
|
|
|
|
|
|
__asm__ __volatile__ (
|
|
|
- "amoswap.w.aq %0, %2, %1"
|
|
|
+ " amoswap.w %0, %2, %1\n"
|
|
|
+ RISCV_ACQUIRE_BARRIER
|
|
|
: "=r" (busy), "+A" (lock->lock)
|
|
|
: "r" (tmp)
|
|
|
: "memory");
|
|
@@ -68,8 +67,9 @@ static inline void arch_read_lock(arch_rwlock_t *lock)
|
|
|
"1: lr.w %1, %0\n"
|
|
|
" bltz %1, 1b\n"
|
|
|
" addi %1, %1, 1\n"
|
|
|
- " sc.w.aq %1, %1, %0\n"
|
|
|
+ " sc.w %1, %1, %0\n"
|
|
|
" bnez %1, 1b\n"
|
|
|
+ RISCV_ACQUIRE_BARRIER
|
|
|
: "+A" (lock->lock), "=&r" (tmp)
|
|
|
:: "memory");
|
|
|
}
|
|
@@ -82,8 +82,9 @@ static inline void arch_write_lock(arch_rwlock_t *lock)
|
|
|
"1: lr.w %1, %0\n"
|
|
|
" bnez %1, 1b\n"
|
|
|
" li %1, -1\n"
|
|
|
- " sc.w.aq %1, %1, %0\n"
|
|
|
+ " sc.w %1, %1, %0\n"
|
|
|
" bnez %1, 1b\n"
|
|
|
+ RISCV_ACQUIRE_BARRIER
|
|
|
: "+A" (lock->lock), "=&r" (tmp)
|
|
|
:: "memory");
|
|
|
}
|
|
@@ -96,8 +97,9 @@ static inline int arch_read_trylock(arch_rwlock_t *lock)
|
|
|
"1: lr.w %1, %0\n"
|
|
|
" bltz %1, 1f\n"
|
|
|
" addi %1, %1, 1\n"
|
|
|
- " sc.w.aq %1, %1, %0\n"
|
|
|
+ " sc.w %1, %1, %0\n"
|
|
|
" bnez %1, 1b\n"
|
|
|
+ RISCV_ACQUIRE_BARRIER
|
|
|
"1:\n"
|
|
|
: "+A" (lock->lock), "=&r" (busy)
|
|
|
:: "memory");
|
|
@@ -113,8 +115,9 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
|
|
|
"1: lr.w %1, %0\n"
|
|
|
" bnez %1, 1f\n"
|
|
|
" li %1, -1\n"
|
|
|
- " sc.w.aq %1, %1, %0\n"
|
|
|
+ " sc.w %1, %1, %0\n"
|
|
|
" bnez %1, 1b\n"
|
|
|
+ RISCV_ACQUIRE_BARRIER
|
|
|
"1:\n"
|
|
|
: "+A" (lock->lock), "=&r" (busy)
|
|
|
:: "memory");
|
|
@@ -125,7 +128,8 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
|
|
|
static inline void arch_read_unlock(arch_rwlock_t *lock)
|
|
|
{
|
|
|
__asm__ __volatile__(
|
|
|
- "amoadd.w.rl x0, %1, %0"
|
|
|
+ RISCV_RELEASE_BARRIER
|
|
|
+ " amoadd.w x0, %1, %0\n"
|
|
|
: "+A" (lock->lock)
|
|
|
: "r" (-1)
|
|
|
: "memory");
|
|
@@ -133,10 +137,7 @@ static inline void arch_read_unlock(arch_rwlock_t *lock)
|
|
|
|
|
|
static inline void arch_write_unlock(arch_rwlock_t *lock)
|
|
|
{
|
|
|
- __asm__ __volatile__ (
|
|
|
- "amoswap.w.rl x0, x0, %0"
|
|
|
- : "=A" (lock->lock)
|
|
|
- :: "memory");
|
|
|
+ smp_store_release(&lock->lock, 0);
|
|
|
}
|
|
|
|
|
|
#endif /* _ASM_RISCV_SPINLOCK_H */
|