|
@@ -27,62 +27,94 @@
|
|
|
#define atomic_set(v,i) (((v)->counter) = (i))
|
|
|
#define atomic64_set(v,i) (((v)->counter) = (i))
|
|
|
|
|
|
-static __inline__ int
|
|
|
-ia64_atomic_add (int i, atomic_t *v)
|
|
|
-{
|
|
|
- __s32 old, new;
|
|
|
- CMPXCHG_BUGCHECK_DECL
|
|
|
-
|
|
|
- do {
|
|
|
- CMPXCHG_BUGCHECK(v);
|
|
|
- old = atomic_read(v);
|
|
|
- new = old + i;
|
|
|
- } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
|
|
|
- return new;
|
|
|
+#define ATOMIC_OP(op, c_op) \
|
|
|
+static __inline__ int \
|
|
|
+ia64_atomic_##op (int i, atomic_t *v) \
|
|
|
+{ \
|
|
|
+ __s32 old, new; \
|
|
|
+ CMPXCHG_BUGCHECK_DECL \
|
|
|
+ \
|
|
|
+ do { \
|
|
|
+ CMPXCHG_BUGCHECK(v); \
|
|
|
+ old = atomic_read(v); \
|
|
|
+ new = old c_op i; \
|
|
|
+ } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
|
|
|
+ return new; \
|
|
|
}
|
|
|
|
|
|
-static __inline__ long
|
|
|
-ia64_atomic64_add (__s64 i, atomic64_t *v)
|
|
|
-{
|
|
|
- __s64 old, new;
|
|
|
- CMPXCHG_BUGCHECK_DECL
|
|
|
-
|
|
|
- do {
|
|
|
- CMPXCHG_BUGCHECK(v);
|
|
|
- old = atomic64_read(v);
|
|
|
- new = old + i;
|
|
|
- } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
|
|
|
- return new;
|
|
|
-}
|
|
|
+ATOMIC_OP(add, +)
|
|
|
+ATOMIC_OP(sub, -)
|
|
|
|
|
|
-static __inline__ int
|
|
|
-ia64_atomic_sub (int i, atomic_t *v)
|
|
|
-{
|
|
|
- __s32 old, new;
|
|
|
- CMPXCHG_BUGCHECK_DECL
|
|
|
-
|
|
|
- do {
|
|
|
- CMPXCHG_BUGCHECK(v);
|
|
|
- old = atomic_read(v);
|
|
|
- new = old - i;
|
|
|
- } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
|
|
|
- return new;
|
|
|
-}
|
|
|
+#undef ATOMIC_OP
|
|
|
|
|
|
-static __inline__ long
|
|
|
-ia64_atomic64_sub (__s64 i, atomic64_t *v)
|
|
|
-{
|
|
|
- __s64 old, new;
|
|
|
- CMPXCHG_BUGCHECK_DECL
|
|
|
-
|
|
|
- do {
|
|
|
- CMPXCHG_BUGCHECK(v);
|
|
|
- old = atomic64_read(v);
|
|
|
- new = old - i;
|
|
|
- } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
|
|
|
- return new;
|
|
|
+#define atomic_add_return(i,v) \
|
|
|
+({ \
|
|
|
+ int __ia64_aar_i = (i); \
|
|
|
+ (__builtin_constant_p(i) \
|
|
|
+ && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
|
|
|
+ || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
|
|
|
+ || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
|
|
|
+ || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
|
|
|
+ ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
|
|
|
+ : ia64_atomic_add(__ia64_aar_i, v); \
|
|
|
+})
|
|
|
+
|
|
|
+#define atomic_sub_return(i,v) \
|
|
|
+({ \
|
|
|
+ int __ia64_asr_i = (i); \
|
|
|
+ (__builtin_constant_p(i) \
|
|
|
+ && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
|
|
|
+ || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
|
|
|
+ || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
|
|
|
+ || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
|
|
|
+ ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
|
|
|
+ : ia64_atomic_sub(__ia64_asr_i, v); \
|
|
|
+})
|
|
|
+
|
|
|
+#define ATOMIC64_OP(op, c_op) \
|
|
|
+static __inline__ long \
|
|
|
+ia64_atomic64_##op (__s64 i, atomic64_t *v) \
|
|
|
+{ \
|
|
|
+ __s64 old, new; \
|
|
|
+ CMPXCHG_BUGCHECK_DECL \
|
|
|
+ \
|
|
|
+ do { \
|
|
|
+ CMPXCHG_BUGCHECK(v); \
|
|
|
+ old = atomic64_read(v); \
|
|
|
+ new = old c_op i; \
|
|
|
+ } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
|
|
|
+ return new; \
|
|
|
}
|
|
|
|
|
|
+ATOMIC64_OP(add, +)
|
|
|
+ATOMIC64_OP(sub, -)
|
|
|
+
|
|
|
+#undef ATOMIC64_OP
|
|
|
+
|
|
|
+#define atomic64_add_return(i,v) \
|
|
|
+({ \
|
|
|
+ long __ia64_aar_i = (i); \
|
|
|
+ (__builtin_constant_p(i) \
|
|
|
+ && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
|
|
|
+ || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
|
|
|
+ || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
|
|
|
+ || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
|
|
|
+ ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
|
|
|
+ : ia64_atomic64_add(__ia64_aar_i, v); \
|
|
|
+})
|
|
|
+
|
|
|
+#define atomic64_sub_return(i,v) \
|
|
|
+({ \
|
|
|
+ long __ia64_asr_i = (i); \
|
|
|
+ (__builtin_constant_p(i) \
|
|
|
+ && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
|
|
|
+ || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
|
|
|
+ || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
|
|
|
+ || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
|
|
|
+ ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
|
|
|
+ : ia64_atomic64_sub(__ia64_asr_i, v); \
|
|
|
+})
|
|
|
+
|
|
|
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
|
|
|
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
|
|
|
|
|
@@ -123,30 +155,6 @@ static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u)
|
|
|
|
|
|
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
|
|
|
|
|
|
-#define atomic_add_return(i,v) \
|
|
|
-({ \
|
|
|
- int __ia64_aar_i = (i); \
|
|
|
- (__builtin_constant_p(i) \
|
|
|
- && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
|
|
|
- || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
|
|
|
- || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
|
|
|
- || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
|
|
|
- ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
|
|
|
- : ia64_atomic_add(__ia64_aar_i, v); \
|
|
|
-})
|
|
|
-
|
|
|
-#define atomic64_add_return(i,v) \
|
|
|
-({ \
|
|
|
- long __ia64_aar_i = (i); \
|
|
|
- (__builtin_constant_p(i) \
|
|
|
- && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
|
|
|
- || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
|
|
|
- || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
|
|
|
- || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
|
|
|
- ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
|
|
|
- : ia64_atomic64_add(__ia64_aar_i, v); \
|
|
|
-})
|
|
|
-
|
|
|
/*
|
|
|
* Atomically add I to V and return TRUE if the resulting value is
|
|
|
* negative.
|
|
@@ -163,30 +171,6 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
|
|
|
return atomic64_add_return(i, v) < 0;
|
|
|
}
|
|
|
|
|
|
-#define atomic_sub_return(i,v) \
|
|
|
-({ \
|
|
|
- int __ia64_asr_i = (i); \
|
|
|
- (__builtin_constant_p(i) \
|
|
|
- && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
|
|
|
- || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
|
|
|
- || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
|
|
|
- || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
|
|
|
- ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
|
|
|
- : ia64_atomic_sub(__ia64_asr_i, v); \
|
|
|
-})
|
|
|
-
|
|
|
-#define atomic64_sub_return(i,v) \
|
|
|
-({ \
|
|
|
- long __ia64_asr_i = (i); \
|
|
|
- (__builtin_constant_p(i) \
|
|
|
- && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
|
|
|
- || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
|
|
|
- || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
|
|
|
- || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
|
|
|
- ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
|
|
|
- : ia64_atomic64_sub(__ia64_asr_i, v); \
|
|
|
-})
|
|
|
-
|
|
|
#define atomic_dec_return(v) atomic_sub_return(1, (v))
|
|
|
#define atomic_inc_return(v) atomic_add_return(1, (v))
|
|
|
#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
|
|
@@ -199,13 +183,13 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
|
|
|
#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
|
|
|
#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
|
|
|
|
|
|
-#define atomic_add(i,v) atomic_add_return((i), (v))
|
|
|
-#define atomic_sub(i,v) atomic_sub_return((i), (v))
|
|
|
+#define atomic_add(i,v) (void)atomic_add_return((i), (v))
|
|
|
+#define atomic_sub(i,v) (void)atomic_sub_return((i), (v))
|
|
|
#define atomic_inc(v) atomic_add(1, (v))
|
|
|
#define atomic_dec(v) atomic_sub(1, (v))
|
|
|
|
|
|
-#define atomic64_add(i,v) atomic64_add_return((i), (v))
|
|
|
-#define atomic64_sub(i,v) atomic64_sub_return((i), (v))
|
|
|
+#define atomic64_add(i,v) (void)atomic64_add_return((i), (v))
|
|
|
+#define atomic64_sub(i,v) (void)atomic64_sub_return((i), (v))
|
|
|
#define atomic64_inc(v) atomic64_add(1, (v))
|
|
|
#define atomic64_dec(v) atomic64_sub(1, (v))
|
|
|
|