|
@@ -16,123 +16,8 @@
|
|
|
#ifndef __ASM_SPINLOCK_H
|
|
|
#define __ASM_SPINLOCK_H
|
|
|
|
|
|
-#include <asm/lse.h>
|
|
|
-#include <asm/spinlock_types.h>
|
|
|
-#include <asm/processor.h>
|
|
|
-
|
|
|
-/*
|
|
|
- * Spinlock implementation.
|
|
|
- *
|
|
|
- * The memory barriers are implicit with the load-acquire and store-release
|
|
|
- * instructions.
|
|
|
- */
|
|
|
-
|
|
|
-static inline void arch_spin_lock(arch_spinlock_t *lock)
|
|
|
-{
|
|
|
- unsigned int tmp;
|
|
|
- arch_spinlock_t lockval, newval;
|
|
|
-
|
|
|
- asm volatile(
|
|
|
- /* Atomically increment the next ticket. */
|
|
|
- ARM64_LSE_ATOMIC_INSN(
|
|
|
- /* LL/SC */
|
|
|
-" prfm pstl1strm, %3\n"
|
|
|
-"1: ldaxr %w0, %3\n"
|
|
|
-" add %w1, %w0, %w5\n"
|
|
|
-" stxr %w2, %w1, %3\n"
|
|
|
-" cbnz %w2, 1b\n",
|
|
|
- /* LSE atomics */
|
|
|
-" mov %w2, %w5\n"
|
|
|
-" ldadda %w2, %w0, %3\n"
|
|
|
- __nops(3)
|
|
|
- )
|
|
|
-
|
|
|
- /* Did we get the lock? */
|
|
|
-" eor %w1, %w0, %w0, ror #16\n"
|
|
|
-" cbz %w1, 3f\n"
|
|
|
- /*
|
|
|
- * No: spin on the owner. Send a local event to avoid missing an
|
|
|
- * unlock before the exclusive load.
|
|
|
- */
|
|
|
-" sevl\n"
|
|
|
-"2: wfe\n"
|
|
|
-" ldaxrh %w2, %4\n"
|
|
|
-" eor %w1, %w2, %w0, lsr #16\n"
|
|
|
-" cbnz %w1, 2b\n"
|
|
|
- /* We got the lock. Critical section starts here. */
|
|
|
-"3:"
|
|
|
- : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock)
|
|
|
- : "Q" (lock->owner), "I" (1 << TICKET_SHIFT)
|
|
|
- : "memory");
|
|
|
-}
|
|
|
-
|
|
|
-static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
|
|
-{
|
|
|
- unsigned int tmp;
|
|
|
- arch_spinlock_t lockval;
|
|
|
-
|
|
|
- asm volatile(ARM64_LSE_ATOMIC_INSN(
|
|
|
- /* LL/SC */
|
|
|
- " prfm pstl1strm, %2\n"
|
|
|
- "1: ldaxr %w0, %2\n"
|
|
|
- " eor %w1, %w0, %w0, ror #16\n"
|
|
|
- " cbnz %w1, 2f\n"
|
|
|
- " add %w0, %w0, %3\n"
|
|
|
- " stxr %w1, %w0, %2\n"
|
|
|
- " cbnz %w1, 1b\n"
|
|
|
- "2:",
|
|
|
- /* LSE atomics */
|
|
|
- " ldr %w0, %2\n"
|
|
|
- " eor %w1, %w0, %w0, ror #16\n"
|
|
|
- " cbnz %w1, 1f\n"
|
|
|
- " add %w1, %w0, %3\n"
|
|
|
- " casa %w0, %w1, %2\n"
|
|
|
- " sub %w1, %w1, %3\n"
|
|
|
- " eor %w1, %w1, %w0\n"
|
|
|
- "1:")
|
|
|
- : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
|
|
|
- : "I" (1 << TICKET_SHIFT)
|
|
|
- : "memory");
|
|
|
-
|
|
|
- return !tmp;
|
|
|
-}
|
|
|
-
|
|
|
-static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
|
|
-{
|
|
|
- unsigned long tmp;
|
|
|
-
|
|
|
- asm volatile(ARM64_LSE_ATOMIC_INSN(
|
|
|
- /* LL/SC */
|
|
|
- " ldrh %w1, %0\n"
|
|
|
- " add %w1, %w1, #1\n"
|
|
|
- " stlrh %w1, %0",
|
|
|
- /* LSE atomics */
|
|
|
- " mov %w1, #1\n"
|
|
|
- " staddlh %w1, %0\n"
|
|
|
- __nops(1))
|
|
|
- : "=Q" (lock->owner), "=&r" (tmp)
|
|
|
- :
|
|
|
- : "memory");
|
|
|
-}
|
|
|
-
|
|
|
-static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
|
|
|
-{
|
|
|
- return lock.owner == lock.next;
|
|
|
-}
|
|
|
-
|
|
|
-static inline int arch_spin_is_locked(arch_spinlock_t *lock)
|
|
|
-{
|
|
|
- return !arch_spin_value_unlocked(READ_ONCE(*lock));
|
|
|
-}
|
|
|
-
|
|
|
-static inline int arch_spin_is_contended(arch_spinlock_t *lock)
|
|
|
-{
|
|
|
- arch_spinlock_t lockval = READ_ONCE(*lock);
|
|
|
- return (lockval.next - lockval.owner) > 1;
|
|
|
-}
|
|
|
-#define arch_spin_is_contended arch_spin_is_contended
|
|
|
-
|
|
|
#include <asm/qrwlock.h>
|
|
|
+#include <asm/qspinlock.h>
|
|
|
|
|
|
/* See include/linux/spinlock.h */
|
|
|
#define smp_mb__after_spinlock() smp_mb()
|