|
@@ -43,69 +43,51 @@
|
|
* store exclusive to ensure that these are atomic. We may loop
|
|
* store exclusive to ensure that these are atomic. We may loop
|
|
* to ensure that the update happens.
|
|
* to ensure that the update happens.
|
|
*/
|
|
*/
|
|
-static inline void atomic_add(int i, atomic_t *v)
|
|
|
|
-{
|
|
|
|
- unsigned long tmp;
|
|
|
|
- int result;
|
|
|
|
-
|
|
|
|
- asm volatile("// atomic_add\n"
|
|
|
|
-"1: ldxr %w0, %2\n"
|
|
|
|
-" add %w0, %w0, %w3\n"
|
|
|
|
-" stxr %w1, %w0, %2\n"
|
|
|
|
-" cbnz %w1, 1b"
|
|
|
|
- : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
|
|
|
|
- : "Ir" (i));
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static inline int atomic_add_return(int i, atomic_t *v)
|
|
|
|
-{
|
|
|
|
- unsigned long tmp;
|
|
|
|
- int result;
|
|
|
|
-
|
|
|
|
- asm volatile("// atomic_add_return\n"
|
|
|
|
-"1: ldxr %w0, %2\n"
|
|
|
|
-" add %w0, %w0, %w3\n"
|
|
|
|
-" stlxr %w1, %w0, %2\n"
|
|
|
|
-" cbnz %w1, 1b"
|
|
|
|
- : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
|
|
|
|
- : "Ir" (i)
|
|
|
|
- : "memory");
|
|
|
|
-
|
|
|
|
- smp_mb();
|
|
|
|
- return result;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static inline void atomic_sub(int i, atomic_t *v)
|
|
|
|
-{
|
|
|
|
- unsigned long tmp;
|
|
|
|
- int result;
|
|
|
|
|
|
|
|
- asm volatile("// atomic_sub\n"
|
|
|
|
-"1: ldxr %w0, %2\n"
|
|
|
|
-" sub %w0, %w0, %w3\n"
|
|
|
|
-" stxr %w1, %w0, %2\n"
|
|
|
|
-" cbnz %w1, 1b"
|
|
|
|
- : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
|
|
|
|
- : "Ir" (i));
|
|
|
|
|
|
+#define ATOMIC_OP(op, asm_op) \
|
|
|
|
+static inline void atomic_##op(int i, atomic_t *v) \
|
|
|
|
+{ \
|
|
|
|
+ unsigned long tmp; \
|
|
|
|
+ int result; \
|
|
|
|
+ \
|
|
|
|
+ asm volatile("// atomic_" #op "\n" \
|
|
|
|
+"1: ldxr %w0, %2\n" \
|
|
|
|
+" " #asm_op " %w0, %w0, %w3\n" \
|
|
|
|
+" stxr %w1, %w0, %2\n" \
|
|
|
|
+" cbnz %w1, 1b" \
|
|
|
|
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
|
|
|
|
+ : "Ir" (i)); \
|
|
|
|
+} \
|
|
|
|
+
|
|
|
|
+#define ATOMIC_OP_RETURN(op, asm_op) \
|
|
|
|
+static inline int atomic_##op##_return(int i, atomic_t *v) \
|
|
|
|
+{ \
|
|
|
|
+ unsigned long tmp; \
|
|
|
|
+ int result; \
|
|
|
|
+ \
|
|
|
|
+ asm volatile("// atomic_" #op "_return\n" \
|
|
|
|
+"1: ldxr %w0, %2\n" \
|
|
|
|
+" " #asm_op " %w0, %w0, %w3\n" \
|
|
|
|
+" stlxr %w1, %w0, %2\n" \
|
|
|
|
+" cbnz %w1, 1b" \
|
|
|
|
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
|
|
|
|
+ : "Ir" (i) \
|
|
|
|
+ : "memory"); \
|
|
|
|
+ \
|
|
|
|
+ smp_mb(); \
|
|
|
|
+ return result; \
|
|
}
|
|
}
|
|
|
|
|
|
-static inline int atomic_sub_return(int i, atomic_t *v)
|
|
|
|
-{
|
|
|
|
- unsigned long tmp;
|
|
|
|
- int result;
|
|
|
|
|
|
+#define ATOMIC_OPS(op, asm_op) \
|
|
|
|
+ ATOMIC_OP(op, asm_op) \
|
|
|
|
+ ATOMIC_OP_RETURN(op, asm_op)
|
|
|
|
|
|
- asm volatile("// atomic_sub_return\n"
|
|
|
|
-"1: ldxr %w0, %2\n"
|
|
|
|
-" sub %w0, %w0, %w3\n"
|
|
|
|
-" stlxr %w1, %w0, %2\n"
|
|
|
|
-" cbnz %w1, 1b"
|
|
|
|
- : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
|
|
|
|
- : "Ir" (i)
|
|
|
|
- : "memory");
|
|
|
|
|
|
+ATOMIC_OPS(add, add)
|
|
|
|
+ATOMIC_OPS(sub, sub)
|
|
|
|
|
|
- smp_mb();
|
|
|
|
- return result;
|
|
|
|
-}
|
|
|
|
|
|
+#undef ATOMIC_OPS
|
|
|
|
+#undef ATOMIC_OP_RETURN
|
|
|
|
+#undef ATOMIC_OP
|
|
|
|
|
|
static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
|
|
static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
|
|
{
|
|
{
|
|
@@ -160,69 +142,50 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
|
#define atomic64_read(v) (*(volatile long *)&(v)->counter)
|
|
#define atomic64_read(v) (*(volatile long *)&(v)->counter)
|
|
#define atomic64_set(v,i) (((v)->counter) = (i))
|
|
#define atomic64_set(v,i) (((v)->counter) = (i))
|
|
|
|
|
|
-static inline void atomic64_add(u64 i, atomic64_t *v)
|
|
|
|
-{
|
|
|
|
- long result;
|
|
|
|
- unsigned long tmp;
|
|
|
|
-
|
|
|
|
- asm volatile("// atomic64_add\n"
|
|
|
|
-"1: ldxr %0, %2\n"
|
|
|
|
-" add %0, %0, %3\n"
|
|
|
|
-" stxr %w1, %0, %2\n"
|
|
|
|
-" cbnz %w1, 1b"
|
|
|
|
- : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
|
|
|
|
- : "Ir" (i));
|
|
|
|
|
|
+#define ATOMIC64_OP(op, asm_op) \
|
|
|
|
+static inline void atomic64_##op(long i, atomic64_t *v) \
|
|
|
|
+{ \
|
|
|
|
+ long result; \
|
|
|
|
+ unsigned long tmp; \
|
|
|
|
+ \
|
|
|
|
+ asm volatile("// atomic64_" #op "\n" \
|
|
|
|
+"1: ldxr %0, %2\n" \
|
|
|
|
+" " #asm_op " %0, %0, %3\n" \
|
|
|
|
+" stxr %w1, %0, %2\n" \
|
|
|
|
+" cbnz %w1, 1b" \
|
|
|
|
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
|
|
|
|
+ : "Ir" (i)); \
|
|
|
|
+} \
|
|
|
|
+
|
|
|
|
+#define ATOMIC64_OP_RETURN(op, asm_op) \
|
|
|
|
+static inline long atomic64_##op##_return(long i, atomic64_t *v) \
|
|
|
|
+{ \
|
|
|
|
+ long result; \
|
|
|
|
+ unsigned long tmp; \
|
|
|
|
+ \
|
|
|
|
+ asm volatile("// atomic64_" #op "_return\n" \
|
|
|
|
+"1: ldxr %0, %2\n" \
|
|
|
|
+" " #asm_op " %0, %0, %3\n" \
|
|
|
|
+" stlxr %w1, %0, %2\n" \
|
|
|
|
+" cbnz %w1, 1b" \
|
|
|
|
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
|
|
|
|
+ : "Ir" (i) \
|
|
|
|
+ : "memory"); \
|
|
|
|
+ \
|
|
|
|
+ smp_mb(); \
|
|
|
|
+ return result; \
|
|
}
|
|
}
|
|
|
|
|
|
-static inline long atomic64_add_return(long i, atomic64_t *v)
|
|
|
|
-{
|
|
|
|
- long result;
|
|
|
|
- unsigned long tmp;
|
|
|
|
|
|
+#define ATOMIC64_OPS(op, asm_op) \
|
|
|
|
+ ATOMIC64_OP(op, asm_op) \
|
|
|
|
+ ATOMIC64_OP_RETURN(op, asm_op)
|
|
|
|
|
|
- asm volatile("// atomic64_add_return\n"
|
|
|
|
-"1: ldxr %0, %2\n"
|
|
|
|
-" add %0, %0, %3\n"
|
|
|
|
-" stlxr %w1, %0, %2\n"
|
|
|
|
-" cbnz %w1, 1b"
|
|
|
|
- : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
|
|
|
|
- : "Ir" (i)
|
|
|
|
- : "memory");
|
|
|
|
|
|
+ATOMIC64_OPS(add, add)
|
|
|
|
+ATOMIC64_OPS(sub, sub)
|
|
|
|
|
|
- smp_mb();
|
|
|
|
- return result;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static inline void atomic64_sub(u64 i, atomic64_t *v)
|
|
|
|
-{
|
|
|
|
- long result;
|
|
|
|
- unsigned long tmp;
|
|
|
|
-
|
|
|
|
- asm volatile("// atomic64_sub\n"
|
|
|
|
-"1: ldxr %0, %2\n"
|
|
|
|
-" sub %0, %0, %3\n"
|
|
|
|
-" stxr %w1, %0, %2\n"
|
|
|
|
-" cbnz %w1, 1b"
|
|
|
|
- : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
|
|
|
|
- : "Ir" (i));
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static inline long atomic64_sub_return(long i, atomic64_t *v)
|
|
|
|
-{
|
|
|
|
- long result;
|
|
|
|
- unsigned long tmp;
|
|
|
|
-
|
|
|
|
- asm volatile("// atomic64_sub_return\n"
|
|
|
|
-"1: ldxr %0, %2\n"
|
|
|
|
-" sub %0, %0, %3\n"
|
|
|
|
-" stlxr %w1, %0, %2\n"
|
|
|
|
-" cbnz %w1, 1b"
|
|
|
|
- : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
|
|
|
|
- : "Ir" (i)
|
|
|
|
- : "memory");
|
|
|
|
-
|
|
|
|
- smp_mb();
|
|
|
|
- return result;
|
|
|
|
-}
|
|
|
|
|
|
+#undef ATOMIC64_OPS
|
|
|
|
+#undef ATOMIC64_OP_RETURN
|
|
|
|
+#undef ATOMIC64_OP
|
|
|
|
|
|
static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
|
|
static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
|
|
{
|
|
{
|