|
@@ -26,54 +26,57 @@
|
|
|
#endif
|
|
|
|
|
|
#define __LL_SC_ATOMIC(op) __LL_SC_CALL(atomic_##op)
|
|
|
-
|
|
|
-static inline void atomic_andnot(int i, atomic_t *v)
|
|
|
-{
|
|
|
- register int w0 asm ("w0") = i;
|
|
|
- register atomic_t *x1 asm ("x1") = v;
|
|
|
-
|
|
|
- asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(andnot),
|
|
|
- " stclr %w[i], %[v]\n")
|
|
|
- : [i] "+r" (w0), [v] "+Q" (v->counter)
|
|
|
- : "r" (x1)
|
|
|
- : __LL_SC_CLOBBERS);
|
|
|
+#define ATOMIC_OP(op, asm_op) \
|
|
|
+static inline void atomic_##op(int i, atomic_t *v) \
|
|
|
+{ \
|
|
|
+ register int w0 asm ("w0") = i; \
|
|
|
+ register atomic_t *x1 asm ("x1") = v; \
|
|
|
+ \
|
|
|
+ asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(op), \
|
|
|
+" " #asm_op " %w[i], %[v]\n") \
|
|
|
+ : [i] "+r" (w0), [v] "+Q" (v->counter) \
|
|
|
+ : "r" (x1) \
|
|
|
+ : __LL_SC_CLOBBERS); \
|
|
|
}
|
|
|
|
|
|
-static inline void atomic_or(int i, atomic_t *v)
|
|
|
-{
|
|
|
- register int w0 asm ("w0") = i;
|
|
|
- register atomic_t *x1 asm ("x1") = v;
|
|
|
+ATOMIC_OP(andnot, stclr)
|
|
|
+ATOMIC_OP(or, stset)
|
|
|
+ATOMIC_OP(xor, steor)
|
|
|
+ATOMIC_OP(add, stadd)
|
|
|
|
|
|
- asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(or),
|
|
|
- " stset %w[i], %[v]\n")
|
|
|
- : [i] "+r" (w0), [v] "+Q" (v->counter)
|
|
|
- : "r" (x1)
|
|
|
- : __LL_SC_CLOBBERS);
|
|
|
-}
|
|
|
+#undef ATOMIC_OP
|
|
|
|
|
|
-static inline void atomic_xor(int i, atomic_t *v)
|
|
|
-{
|
|
|
- register int w0 asm ("w0") = i;
|
|
|
- register atomic_t *x1 asm ("x1") = v;
|
|
|
-
|
|
|
- asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(xor),
|
|
|
- " steor %w[i], %[v]\n")
|
|
|
- : [i] "+r" (w0), [v] "+Q" (v->counter)
|
|
|
- : "r" (x1)
|
|
|
- : __LL_SC_CLOBBERS);
|
|
|
+#define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \
|
|
|
+static inline int atomic_fetch_##op##name(int i, atomic_t *v) \
|
|
|
+{ \
|
|
|
+ register int w0 asm ("w0") = i; \
|
|
|
+ register atomic_t *x1 asm ("x1") = v; \
|
|
|
+ \
|
|
|
+ asm volatile(ARM64_LSE_ATOMIC_INSN( \
|
|
|
+ /* LL/SC */ \
|
|
|
+ __LL_SC_ATOMIC(fetch_##op##name), \
|
|
|
+ /* LSE atomics */ \
|
|
|
+" " #asm_op #mb " %w[i], %w[i], %[v]") \
|
|
|
+ : [i] "+r" (w0), [v] "+Q" (v->counter) \
|
|
|
+ : "r" (x1) \
|
|
|
+ : __LL_SC_CLOBBERS, ##cl); \
|
|
|
+ \
|
|
|
+ return w0; \
|
|
|
}
|
|
|
|
|
|
-static inline void atomic_add(int i, atomic_t *v)
|
|
|
-{
|
|
|
- register int w0 asm ("w0") = i;
|
|
|
- register atomic_t *x1 asm ("x1") = v;
|
|
|
+#define ATOMIC_FETCH_OPS(op, asm_op) \
|
|
|
+ ATOMIC_FETCH_OP(_relaxed, , op, asm_op) \
|
|
|
+ ATOMIC_FETCH_OP(_acquire, a, op, asm_op, "memory") \
|
|
|
+ ATOMIC_FETCH_OP(_release, l, op, asm_op, "memory") \
|
|
|
+ ATOMIC_FETCH_OP( , al, op, asm_op, "memory")
|
|
|
|
|
|
- asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(add),
|
|
|
- " stadd %w[i], %[v]\n")
|
|
|
- : [i] "+r" (w0), [v] "+Q" (v->counter)
|
|
|
- : "r" (x1)
|
|
|
- : __LL_SC_CLOBBERS);
|
|
|
-}
|
|
|
+ATOMIC_FETCH_OPS(andnot, ldclr)
|
|
|
+ATOMIC_FETCH_OPS(or, ldset)
|
|
|
+ATOMIC_FETCH_OPS(xor, ldeor)
|
|
|
+ATOMIC_FETCH_OPS(add, ldadd)
|
|
|
+
|
|
|
+#undef ATOMIC_FETCH_OP
|
|
|
+#undef ATOMIC_FETCH_OPS
|
|
|
|
|
|
#define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \
|
|
|
static inline int atomic_add_return##name(int i, atomic_t *v) \
|
|
@@ -119,6 +122,33 @@ static inline void atomic_and(int i, atomic_t *v)
|
|
|
: __LL_SC_CLOBBERS);
|
|
|
}
|
|
|
|
|
|
+#define ATOMIC_FETCH_OP_AND(name, mb, cl...) \
|
|
|
+static inline int atomic_fetch_and##name(int i, atomic_t *v) \
|
|
|
+{ \
|
|
|
+ register int w0 asm ("w0") = i; \
|
|
|
+ register atomic_t *x1 asm ("x1") = v; \
|
|
|
+ \
|
|
|
+ asm volatile(ARM64_LSE_ATOMIC_INSN( \
|
|
|
+ /* LL/SC */ \
|
|
|
+ " nop\n" \
|
|
|
+ __LL_SC_ATOMIC(fetch_and##name), \
|
|
|
+ /* LSE atomics */ \
|
|
|
+ " mvn %w[i], %w[i]\n" \
|
|
|
+ " ldclr" #mb " %w[i], %w[i], %[v]") \
|
|
|
+ : [i] "+r" (w0), [v] "+Q" (v->counter) \
|
|
|
+ : "r" (x1) \
|
|
|
+ : __LL_SC_CLOBBERS, ##cl); \
|
|
|
+ \
|
|
|
+ return w0; \
|
|
|
+}
|
|
|
+
|
|
|
+ATOMIC_FETCH_OP_AND(_relaxed, )
|
|
|
+ATOMIC_FETCH_OP_AND(_acquire, a, "memory")
|
|
|
+ATOMIC_FETCH_OP_AND(_release, l, "memory")
|
|
|
+ATOMIC_FETCH_OP_AND( , al, "memory")
|
|
|
+
|
|
|
+#undef ATOMIC_FETCH_OP_AND
|
|
|
+
|
|
|
static inline void atomic_sub(int i, atomic_t *v)
|
|
|
{
|
|
|
register int w0 asm ("w0") = i;
|
|
@@ -164,57 +194,87 @@ ATOMIC_OP_SUB_RETURN(_release, l, "memory")
|
|
|
ATOMIC_OP_SUB_RETURN( , al, "memory")
|
|
|
|
|
|
#undef ATOMIC_OP_SUB_RETURN
|
|
|
-#undef __LL_SC_ATOMIC
|
|
|
-
|
|
|
-#define __LL_SC_ATOMIC64(op) __LL_SC_CALL(atomic64_##op)
|
|
|
-
|
|
|
-static inline void atomic64_andnot(long i, atomic64_t *v)
|
|
|
-{
|
|
|
- register long x0 asm ("x0") = i;
|
|
|
- register atomic64_t *x1 asm ("x1") = v;
|
|
|
|
|
|
- asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(andnot),
|
|
|
- " stclr %[i], %[v]\n")
|
|
|
- : [i] "+r" (x0), [v] "+Q" (v->counter)
|
|
|
- : "r" (x1)
|
|
|
- : __LL_SC_CLOBBERS);
|
|
|
+#define ATOMIC_FETCH_OP_SUB(name, mb, cl...) \
|
|
|
+static inline int atomic_fetch_sub##name(int i, atomic_t *v) \
|
|
|
+{ \
|
|
|
+ register int w0 asm ("w0") = i; \
|
|
|
+ register atomic_t *x1 asm ("x1") = v; \
|
|
|
+ \
|
|
|
+ asm volatile(ARM64_LSE_ATOMIC_INSN( \
|
|
|
+ /* LL/SC */ \
|
|
|
+ " nop\n" \
|
|
|
+ __LL_SC_ATOMIC(fetch_sub##name), \
|
|
|
+ /* LSE atomics */ \
|
|
|
+ " neg %w[i], %w[i]\n" \
|
|
|
+ " ldadd" #mb " %w[i], %w[i], %[v]") \
|
|
|
+ : [i] "+r" (w0), [v] "+Q" (v->counter) \
|
|
|
+ : "r" (x1) \
|
|
|
+ : __LL_SC_CLOBBERS, ##cl); \
|
|
|
+ \
|
|
|
+ return w0; \
|
|
|
}
|
|
|
|
|
|
-static inline void atomic64_or(long i, atomic64_t *v)
|
|
|
-{
|
|
|
- register long x0 asm ("x0") = i;
|
|
|
- register atomic64_t *x1 asm ("x1") = v;
|
|
|
+ATOMIC_FETCH_OP_SUB(_relaxed, )
|
|
|
+ATOMIC_FETCH_OP_SUB(_acquire, a, "memory")
|
|
|
+ATOMIC_FETCH_OP_SUB(_release, l, "memory")
|
|
|
+ATOMIC_FETCH_OP_SUB( , al, "memory")
|
|
|
|
|
|
- asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(or),
|
|
|
- " stset %[i], %[v]\n")
|
|
|
- : [i] "+r" (x0), [v] "+Q" (v->counter)
|
|
|
- : "r" (x1)
|
|
|
- : __LL_SC_CLOBBERS);
|
|
|
+#undef ATOMIC_FETCH_OP_SUB
|
|
|
+#undef __LL_SC_ATOMIC
|
|
|
+
|
|
|
+#define __LL_SC_ATOMIC64(op) __LL_SC_CALL(atomic64_##op)
|
|
|
+#define ATOMIC64_OP(op, asm_op) \
|
|
|
+static inline void atomic64_##op(long i, atomic64_t *v) \
|
|
|
+{ \
|
|
|
+ register long x0 asm ("x0") = i; \
|
|
|
+ register atomic64_t *x1 asm ("x1") = v; \
|
|
|
+ \
|
|
|
+ asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(op), \
|
|
|
+" " #asm_op " %[i], %[v]\n") \
|
|
|
+ : [i] "+r" (x0), [v] "+Q" (v->counter) \
|
|
|
+ : "r" (x1) \
|
|
|
+ : __LL_SC_CLOBBERS); \
|
|
|
}
|
|
|
|
|
|
-static inline void atomic64_xor(long i, atomic64_t *v)
|
|
|
-{
|
|
|
- register long x0 asm ("x0") = i;
|
|
|
- register atomic64_t *x1 asm ("x1") = v;
|
|
|
+ATOMIC64_OP(andnot, stclr)
|
|
|
+ATOMIC64_OP(or, stset)
|
|
|
+ATOMIC64_OP(xor, steor)
|
|
|
+ATOMIC64_OP(add, stadd)
|
|
|
|
|
|
- asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(xor),
|
|
|
- " steor %[i], %[v]\n")
|
|
|
- : [i] "+r" (x0), [v] "+Q" (v->counter)
|
|
|
- : "r" (x1)
|
|
|
- : __LL_SC_CLOBBERS);
|
|
|
+#undef ATOMIC64_OP
|
|
|
+
|
|
|
+#define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \
|
|
|
+static inline long atomic64_fetch_##op##name(long i, atomic64_t *v) \
|
|
|
+{ \
|
|
|
+ register long x0 asm ("x0") = i; \
|
|
|
+ register atomic64_t *x1 asm ("x1") = v; \
|
|
|
+ \
|
|
|
+ asm volatile(ARM64_LSE_ATOMIC_INSN( \
|
|
|
+ /* LL/SC */ \
|
|
|
+ __LL_SC_ATOMIC64(fetch_##op##name), \
|
|
|
+ /* LSE atomics */ \
|
|
|
+" " #asm_op #mb " %[i], %[i], %[v]") \
|
|
|
+ : [i] "+r" (x0), [v] "+Q" (v->counter) \
|
|
|
+ : "r" (x1) \
|
|
|
+ : __LL_SC_CLOBBERS, ##cl); \
|
|
|
+ \
|
|
|
+ return x0; \
|
|
|
}
|
|
|
|
|
|
-static inline void atomic64_add(long i, atomic64_t *v)
|
|
|
-{
|
|
|
- register long x0 asm ("x0") = i;
|
|
|
- register atomic64_t *x1 asm ("x1") = v;
|
|
|
+#define ATOMIC64_FETCH_OPS(op, asm_op) \
|
|
|
+ ATOMIC64_FETCH_OP(_relaxed, , op, asm_op) \
|
|
|
+ ATOMIC64_FETCH_OP(_acquire, a, op, asm_op, "memory") \
|
|
|
+ ATOMIC64_FETCH_OP(_release, l, op, asm_op, "memory") \
|
|
|
+ ATOMIC64_FETCH_OP( , al, op, asm_op, "memory")
|
|
|
|
|
|
- asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(add),
|
|
|
- " stadd %[i], %[v]\n")
|
|
|
- : [i] "+r" (x0), [v] "+Q" (v->counter)
|
|
|
- : "r" (x1)
|
|
|
- : __LL_SC_CLOBBERS);
|
|
|
-}
|
|
|
+ATOMIC64_FETCH_OPS(andnot, ldclr)
|
|
|
+ATOMIC64_FETCH_OPS(or, ldset)
|
|
|
+ATOMIC64_FETCH_OPS(xor, ldeor)
|
|
|
+ATOMIC64_FETCH_OPS(add, ldadd)
|
|
|
+
|
|
|
+#undef ATOMIC64_FETCH_OP
|
|
|
+#undef ATOMIC64_FETCH_OPS
|
|
|
|
|
|
#define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \
|
|
|
static inline long atomic64_add_return##name(long i, atomic64_t *v) \
|
|
@@ -260,6 +320,33 @@ static inline void atomic64_and(long i, atomic64_t *v)
|
|
|
: __LL_SC_CLOBBERS);
|
|
|
}
|
|
|
|
|
|
+#define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \
|
|
|
+static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \
|
|
|
+{ \
|
|
|
+ register long x0 asm ("w0") = i; \
|
|
|
+ register atomic64_t *x1 asm ("x1") = v; \
|
|
|
+ \
|
|
|
+ asm volatile(ARM64_LSE_ATOMIC_INSN( \
|
|
|
+ /* LL/SC */ \
|
|
|
+ " nop\n" \
|
|
|
+ __LL_SC_ATOMIC64(fetch_and##name), \
|
|
|
+ /* LSE atomics */ \
|
|
|
+ " mvn %[i], %[i]\n" \
|
|
|
+ " ldclr" #mb " %[i], %[i], %[v]") \
|
|
|
+ : [i] "+r" (x0), [v] "+Q" (v->counter) \
|
|
|
+ : "r" (x1) \
|
|
|
+ : __LL_SC_CLOBBERS, ##cl); \
|
|
|
+ \
|
|
|
+ return x0; \
|
|
|
+}
|
|
|
+
|
|
|
+ATOMIC64_FETCH_OP_AND(_relaxed, )
|
|
|
+ATOMIC64_FETCH_OP_AND(_acquire, a, "memory")
|
|
|
+ATOMIC64_FETCH_OP_AND(_release, l, "memory")
|
|
|
+ATOMIC64_FETCH_OP_AND( , al, "memory")
|
|
|
+
|
|
|
+#undef ATOMIC64_FETCH_OP_AND
|
|
|
+
|
|
|
static inline void atomic64_sub(long i, atomic64_t *v)
|
|
|
{
|
|
|
register long x0 asm ("x0") = i;
|
|
@@ -306,6 +393,33 @@ ATOMIC64_OP_SUB_RETURN( , al, "memory")
|
|
|
|
|
|
#undef ATOMIC64_OP_SUB_RETURN
|
|
|
|
|
|
+#define ATOMIC64_FETCH_OP_SUB(name, mb, cl...) \
|
|
|
+static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \
|
|
|
+{ \
|
|
|
+ register long x0 asm ("w0") = i; \
|
|
|
+ register atomic64_t *x1 asm ("x1") = v; \
|
|
|
+ \
|
|
|
+ asm volatile(ARM64_LSE_ATOMIC_INSN( \
|
|
|
+ /* LL/SC */ \
|
|
|
+ " nop\n" \
|
|
|
+ __LL_SC_ATOMIC64(fetch_sub##name), \
|
|
|
+ /* LSE atomics */ \
|
|
|
+ " neg %[i], %[i]\n" \
|
|
|
+ " ldadd" #mb " %[i], %[i], %[v]") \
|
|
|
+ : [i] "+r" (x0), [v] "+Q" (v->counter) \
|
|
|
+ : "r" (x1) \
|
|
|
+ : __LL_SC_CLOBBERS, ##cl); \
|
|
|
+ \
|
|
|
+ return x0; \
|
|
|
+}
|
|
|
+
|
|
|
+ATOMIC64_FETCH_OP_SUB(_relaxed, )
|
|
|
+ATOMIC64_FETCH_OP_SUB(_acquire, a, "memory")
|
|
|
+ATOMIC64_FETCH_OP_SUB(_release, l, "memory")
|
|
|
+ATOMIC64_FETCH_OP_SUB( , al, "memory")
|
|
|
+
|
|
|
+#undef ATOMIC64_FETCH_OP_SUB
|
|
|
+
|
|
|
static inline long atomic64_dec_if_positive(atomic64_t *v)
|
|
|
{
|
|
|
register long x0 asm ("x0") = (long)v;
|