|
@@ -30,6 +30,8 @@
|
|
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
|
|
+#include <asm/atomic_ll_sc.h>
|
|
|
+
|
|
|
/*
|
|
|
* On ARM, ordinary assignment (str instruction) doesn't clear the local
|
|
|
* strex/ldrex monitor on some implementations. The reason we can use it for
|
|
@@ -38,86 +40,6 @@
|
|
|
#define atomic_read(v) ACCESS_ONCE((v)->counter)
|
|
|
#define atomic_set(v,i) (((v)->counter) = (i))
|
|
|
|
|
|
-/*
|
|
|
- * AArch64 UP and SMP safe atomic ops. We use load exclusive and
|
|
|
- * store exclusive to ensure that these are atomic. We may loop
|
|
|
- * to ensure that the update happens.
|
|
|
- */
|
|
|
-
|
|
|
-#define ATOMIC_OP(op, asm_op) \
|
|
|
-static inline void atomic_##op(int i, atomic_t *v) \
|
|
|
-{ \
|
|
|
- unsigned long tmp; \
|
|
|
- int result; \
|
|
|
- \
|
|
|
- asm volatile("// atomic_" #op "\n" \
|
|
|
-"1: ldxr %w0, %2\n" \
|
|
|
-" " #asm_op " %w0, %w0, %w3\n" \
|
|
|
-" stxr %w1, %w0, %2\n" \
|
|
|
-" cbnz %w1, 1b" \
|
|
|
- : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
|
|
|
- : "Ir" (i)); \
|
|
|
-} \
|
|
|
-
|
|
|
-#define ATOMIC_OP_RETURN(op, asm_op) \
|
|
|
-static inline int atomic_##op##_return(int i, atomic_t *v) \
|
|
|
-{ \
|
|
|
- unsigned long tmp; \
|
|
|
- int result; \
|
|
|
- \
|
|
|
- asm volatile("// atomic_" #op "_return\n" \
|
|
|
-"1: ldxr %w0, %2\n" \
|
|
|
-" " #asm_op " %w0, %w0, %w3\n" \
|
|
|
-" stlxr %w1, %w0, %2\n" \
|
|
|
-" cbnz %w1, 1b" \
|
|
|
- : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
|
|
|
- : "Ir" (i) \
|
|
|
- : "memory"); \
|
|
|
- \
|
|
|
- smp_mb(); \
|
|
|
- return result; \
|
|
|
-}
|
|
|
-
|
|
|
-#define ATOMIC_OPS(op, asm_op) \
|
|
|
- ATOMIC_OP(op, asm_op) \
|
|
|
- ATOMIC_OP_RETURN(op, asm_op)
|
|
|
-
|
|
|
-ATOMIC_OPS(add, add)
|
|
|
-ATOMIC_OPS(sub, sub)
|
|
|
-
|
|
|
-#define atomic_andnot atomic_andnot
|
|
|
-
|
|
|
-ATOMIC_OP(and, and)
|
|
|
-ATOMIC_OP(andnot, bic)
|
|
|
-ATOMIC_OP(or, orr)
|
|
|
-ATOMIC_OP(xor, eor)
|
|
|
-
|
|
|
-#undef ATOMIC_OPS
|
|
|
-#undef ATOMIC_OP_RETURN
|
|
|
-#undef ATOMIC_OP
|
|
|
-
|
|
|
-static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
|
|
|
-{
|
|
|
- unsigned long tmp;
|
|
|
- int oldval;
|
|
|
-
|
|
|
- smp_mb();
|
|
|
-
|
|
|
- asm volatile("// atomic_cmpxchg\n"
|
|
|
-"1: ldxr %w1, %2\n"
|
|
|
-" cmp %w1, %w3\n"
|
|
|
-" b.ne 2f\n"
|
|
|
-" stxr %w0, %w4, %2\n"
|
|
|
-" cbnz %w0, 1b\n"
|
|
|
-"2:"
|
|
|
- : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
|
|
|
- : "Ir" (old), "r" (new)
|
|
|
- : "cc");
|
|
|
-
|
|
|
- smp_mb();
|
|
|
- return oldval;
|
|
|
-}
|
|
|
-
|
|
|
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
|
|
|
|
|
|
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
|
@@ -141,6 +63,8 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
|
|
|
|
|
#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
|
|
|
|
|
|
+#define atomic_andnot atomic_andnot
|
|
|
+
|
|
|
/*
|
|
|
* 64-bit atomic operations.
|
|
|
*/
|
|
@@ -149,102 +73,8 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
|
|
#define atomic64_read(v) ACCESS_ONCE((v)->counter)
|
|
|
#define atomic64_set(v,i) (((v)->counter) = (i))
|
|
|
|
|
|
-#define ATOMIC64_OP(op, asm_op) \
|
|
|
-static inline void atomic64_##op(long i, atomic64_t *v) \
|
|
|
-{ \
|
|
|
- long result; \
|
|
|
- unsigned long tmp; \
|
|
|
- \
|
|
|
- asm volatile("// atomic64_" #op "\n" \
|
|
|
-"1: ldxr %0, %2\n" \
|
|
|
-" " #asm_op " %0, %0, %3\n" \
|
|
|
-" stxr %w1, %0, %2\n" \
|
|
|
-" cbnz %w1, 1b" \
|
|
|
- : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
|
|
|
- : "Ir" (i)); \
|
|
|
-} \
|
|
|
-
|
|
|
-#define ATOMIC64_OP_RETURN(op, asm_op) \
|
|
|
-static inline long atomic64_##op##_return(long i, atomic64_t *v) \
|
|
|
-{ \
|
|
|
- long result; \
|
|
|
- unsigned long tmp; \
|
|
|
- \
|
|
|
- asm volatile("// atomic64_" #op "_return\n" \
|
|
|
-"1: ldxr %0, %2\n" \
|
|
|
-" " #asm_op " %0, %0, %3\n" \
|
|
|
-" stlxr %w1, %0, %2\n" \
|
|
|
-" cbnz %w1, 1b" \
|
|
|
- : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
|
|
|
- : "Ir" (i) \
|
|
|
- : "memory"); \
|
|
|
- \
|
|
|
- smp_mb(); \
|
|
|
- return result; \
|
|
|
-}
|
|
|
-
|
|
|
-#define ATOMIC64_OPS(op, asm_op) \
|
|
|
- ATOMIC64_OP(op, asm_op) \
|
|
|
- ATOMIC64_OP_RETURN(op, asm_op)
|
|
|
-
|
|
|
-ATOMIC64_OPS(add, add)
|
|
|
-ATOMIC64_OPS(sub, sub)
|
|
|
-
|
|
|
-#define atomic64_andnot atomic64_andnot
|
|
|
-
|
|
|
-ATOMIC64_OP(and, and)
|
|
|
-ATOMIC64_OP(andnot, bic)
|
|
|
-ATOMIC64_OP(or, orr)
|
|
|
-ATOMIC64_OP(xor, eor)
|
|
|
-
|
|
|
-#undef ATOMIC64_OPS
|
|
|
-#undef ATOMIC64_OP_RETURN
|
|
|
-#undef ATOMIC64_OP
|
|
|
-
|
|
|
-static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
|
|
|
-{
|
|
|
- long oldval;
|
|
|
- unsigned long res;
|
|
|
-
|
|
|
- smp_mb();
|
|
|
-
|
|
|
- asm volatile("// atomic64_cmpxchg\n"
|
|
|
-"1: ldxr %1, %2\n"
|
|
|
-" cmp %1, %3\n"
|
|
|
-" b.ne 2f\n"
|
|
|
-" stxr %w0, %4, %2\n"
|
|
|
-" cbnz %w0, 1b\n"
|
|
|
-"2:"
|
|
|
- : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
|
|
|
- : "Ir" (old), "r" (new)
|
|
|
- : "cc");
|
|
|
-
|
|
|
- smp_mb();
|
|
|
- return oldval;
|
|
|
-}
|
|
|
-
|
|
|
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
|
|
|
|
|
|
-static inline long atomic64_dec_if_positive(atomic64_t *v)
|
|
|
-{
|
|
|
- long result;
|
|
|
- unsigned long tmp;
|
|
|
-
|
|
|
- asm volatile("// atomic64_dec_if_positive\n"
|
|
|
-"1: ldxr %0, %2\n"
|
|
|
-" subs %0, %0, #1\n"
|
|
|
-" b.mi 2f\n"
|
|
|
-" stlxr %w1, %0, %2\n"
|
|
|
-" cbnz %w1, 1b\n"
|
|
|
-" dmb ish\n"
|
|
|
-"2:"
|
|
|
- : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
|
|
|
- :
|
|
|
- : "cc", "memory");
|
|
|
-
|
|
|
- return result;
|
|
|
-}
|
|
|
-
|
|
|
static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
|
|
|
{
|
|
|
long c, old;
|
|
@@ -266,5 +96,7 @@ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
|
|
|
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
|
|
|
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
|
|
|
|
|
|
+#define atomic64_andnot atomic64_andnot
|
|
|
+
|
|
|
#endif
|
|
|
#endif
|