|
@@ -137,169 +137,7 @@ static inline int arch_spin_is_contended(arch_spinlock_t *lock)
|
|
}
|
|
}
|
|
#define arch_spin_is_contended arch_spin_is_contended
|
|
#define arch_spin_is_contended arch_spin_is_contended
|
|
|
|
|
|
-/*
|
|
|
|
- * Write lock implementation.
|
|
|
|
- *
|
|
|
|
- * Write locks set bit 31. Unlocking, is done by writing 0 since the lock is
|
|
|
|
- * exclusively held.
|
|
|
|
- *
|
|
|
|
- * The memory barriers are implicit with the load-acquire and store-release
|
|
|
|
- * instructions.
|
|
|
|
- */
|
|
|
|
-
|
|
|
|
-static inline void arch_write_lock(arch_rwlock_t *rw)
|
|
|
|
-{
|
|
|
|
- unsigned int tmp;
|
|
|
|
-
|
|
|
|
- asm volatile(ARM64_LSE_ATOMIC_INSN(
|
|
|
|
- /* LL/SC */
|
|
|
|
- " sevl\n"
|
|
|
|
- "1: wfe\n"
|
|
|
|
- "2: ldaxr %w0, %1\n"
|
|
|
|
- " cbnz %w0, 1b\n"
|
|
|
|
- " stxr %w0, %w2, %1\n"
|
|
|
|
- " cbnz %w0, 2b\n"
|
|
|
|
- __nops(1),
|
|
|
|
- /* LSE atomics */
|
|
|
|
- "1: mov %w0, wzr\n"
|
|
|
|
- "2: casa %w0, %w2, %1\n"
|
|
|
|
- " cbz %w0, 3f\n"
|
|
|
|
- " ldxr %w0, %1\n"
|
|
|
|
- " cbz %w0, 2b\n"
|
|
|
|
- " wfe\n"
|
|
|
|
- " b 1b\n"
|
|
|
|
- "3:")
|
|
|
|
- : "=&r" (tmp), "+Q" (rw->lock)
|
|
|
|
- : "r" (0x80000000)
|
|
|
|
- : "memory");
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static inline int arch_write_trylock(arch_rwlock_t *rw)
|
|
|
|
-{
|
|
|
|
- unsigned int tmp;
|
|
|
|
-
|
|
|
|
- asm volatile(ARM64_LSE_ATOMIC_INSN(
|
|
|
|
- /* LL/SC */
|
|
|
|
- "1: ldaxr %w0, %1\n"
|
|
|
|
- " cbnz %w0, 2f\n"
|
|
|
|
- " stxr %w0, %w2, %1\n"
|
|
|
|
- " cbnz %w0, 1b\n"
|
|
|
|
- "2:",
|
|
|
|
- /* LSE atomics */
|
|
|
|
- " mov %w0, wzr\n"
|
|
|
|
- " casa %w0, %w2, %1\n"
|
|
|
|
- __nops(2))
|
|
|
|
- : "=&r" (tmp), "+Q" (rw->lock)
|
|
|
|
- : "r" (0x80000000)
|
|
|
|
- : "memory");
|
|
|
|
-
|
|
|
|
- return !tmp;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static inline void arch_write_unlock(arch_rwlock_t *rw)
|
|
|
|
-{
|
|
|
|
- asm volatile(ARM64_LSE_ATOMIC_INSN(
|
|
|
|
- " stlr wzr, %0",
|
|
|
|
- " swpl wzr, wzr, %0")
|
|
|
|
- : "=Q" (rw->lock) :: "memory");
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/* write_can_lock - would write_trylock() succeed? */
|
|
|
|
-#define arch_write_can_lock(x) ((x)->lock == 0)
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * Read lock implementation.
|
|
|
|
- *
|
|
|
|
- * It exclusively loads the lock value, increments it and stores the new value
|
|
|
|
- * back if positive and the CPU still exclusively owns the location. If the
|
|
|
|
- * value is negative, the lock is already held.
|
|
|
|
- *
|
|
|
|
- * During unlocking there may be multiple active read locks but no write lock.
|
|
|
|
- *
|
|
|
|
- * The memory barriers are implicit with the load-acquire and store-release
|
|
|
|
- * instructions.
|
|
|
|
- *
|
|
|
|
- * Note that in UNDEFINED cases, such as unlocking a lock twice, the LL/SC
|
|
|
|
- * and LSE implementations may exhibit different behaviour (although this
|
|
|
|
- * will have no effect on lockdep).
|
|
|
|
- */
|
|
|
|
-static inline void arch_read_lock(arch_rwlock_t *rw)
|
|
|
|
-{
|
|
|
|
- unsigned int tmp, tmp2;
|
|
|
|
-
|
|
|
|
- asm volatile(
|
|
|
|
- " sevl\n"
|
|
|
|
- ARM64_LSE_ATOMIC_INSN(
|
|
|
|
- /* LL/SC */
|
|
|
|
- "1: wfe\n"
|
|
|
|
- "2: ldaxr %w0, %2\n"
|
|
|
|
- " add %w0, %w0, #1\n"
|
|
|
|
- " tbnz %w0, #31, 1b\n"
|
|
|
|
- " stxr %w1, %w0, %2\n"
|
|
|
|
- " cbnz %w1, 2b\n"
|
|
|
|
- __nops(1),
|
|
|
|
- /* LSE atomics */
|
|
|
|
- "1: wfe\n"
|
|
|
|
- "2: ldxr %w0, %2\n"
|
|
|
|
- " adds %w1, %w0, #1\n"
|
|
|
|
- " tbnz %w1, #31, 1b\n"
|
|
|
|
- " casa %w0, %w1, %2\n"
|
|
|
|
- " sbc %w0, %w1, %w0\n"
|
|
|
|
- " cbnz %w0, 2b")
|
|
|
|
- : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
|
|
|
|
- :
|
|
|
|
- : "cc", "memory");
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static inline void arch_read_unlock(arch_rwlock_t *rw)
|
|
|
|
-{
|
|
|
|
- unsigned int tmp, tmp2;
|
|
|
|
-
|
|
|
|
- asm volatile(ARM64_LSE_ATOMIC_INSN(
|
|
|
|
- /* LL/SC */
|
|
|
|
- "1: ldxr %w0, %2\n"
|
|
|
|
- " sub %w0, %w0, #1\n"
|
|
|
|
- " stlxr %w1, %w0, %2\n"
|
|
|
|
- " cbnz %w1, 1b",
|
|
|
|
- /* LSE atomics */
|
|
|
|
- " movn %w0, #0\n"
|
|
|
|
- " staddl %w0, %2\n"
|
|
|
|
- __nops(2))
|
|
|
|
- : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
|
|
|
|
- :
|
|
|
|
- : "memory");
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static inline int arch_read_trylock(arch_rwlock_t *rw)
|
|
|
|
-{
|
|
|
|
- unsigned int tmp, tmp2;
|
|
|
|
-
|
|
|
|
- asm volatile(ARM64_LSE_ATOMIC_INSN(
|
|
|
|
- /* LL/SC */
|
|
|
|
- " mov %w1, #1\n"
|
|
|
|
- "1: ldaxr %w0, %2\n"
|
|
|
|
- " add %w0, %w0, #1\n"
|
|
|
|
- " tbnz %w0, #31, 2f\n"
|
|
|
|
- " stxr %w1, %w0, %2\n"
|
|
|
|
- " cbnz %w1, 1b\n"
|
|
|
|
- "2:",
|
|
|
|
- /* LSE atomics */
|
|
|
|
- " ldr %w0, %2\n"
|
|
|
|
- " adds %w1, %w0, #1\n"
|
|
|
|
- " tbnz %w1, #31, 1f\n"
|
|
|
|
- " casa %w0, %w1, %2\n"
|
|
|
|
- " sbc %w1, %w1, %w0\n"
|
|
|
|
- __nops(1)
|
|
|
|
- "1:")
|
|
|
|
- : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
|
|
|
|
- :
|
|
|
|
- : "cc", "memory");
|
|
|
|
-
|
|
|
|
- return !tmp2;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/* read_can_lock - would read_trylock() succeed? */
|
|
|
|
-#define arch_read_can_lock(x) ((x)->lock < 0x80000000)
|
|
|
|
|
|
+#include <asm/qrwlock.h>
|
|
|
|
|
|
/* See include/linux/spinlock.h */
|
|
/* See include/linux/spinlock.h */
|
|
#define smp_mb__after_spinlock() smp_mb()
|
|
#define smp_mb__after_spinlock() smp_mb()
|