|
@@ -29,145 +29,92 @@
|
|
|
* branch back to restart the operation.
|
|
|
*/
|
|
|
|
|
|
-static __inline__ void atomic_add(int i, atomic_t * v)
|
|
|
-{
|
|
|
- unsigned long temp;
|
|
|
- __asm__ __volatile__(
|
|
|
- "1: ldl_l %0,%1\n"
|
|
|
- " addl %0,%2,%0\n"
|
|
|
- " stl_c %0,%1\n"
|
|
|
- " beq %0,2f\n"
|
|
|
- ".subsection 2\n"
|
|
|
- "2: br 1b\n"
|
|
|
- ".previous"
|
|
|
- :"=&r" (temp), "=m" (v->counter)
|
|
|
- :"Ir" (i), "m" (v->counter));
|
|
|
-}
|
|
|
-
|
|
|
-static __inline__ void atomic64_add(long i, atomic64_t * v)
|
|
|
-{
|
|
|
- unsigned long temp;
|
|
|
- __asm__ __volatile__(
|
|
|
- "1: ldq_l %0,%1\n"
|
|
|
- " addq %0,%2,%0\n"
|
|
|
- " stq_c %0,%1\n"
|
|
|
- " beq %0,2f\n"
|
|
|
- ".subsection 2\n"
|
|
|
- "2: br 1b\n"
|
|
|
- ".previous"
|
|
|
- :"=&r" (temp), "=m" (v->counter)
|
|
|
- :"Ir" (i), "m" (v->counter));
|
|
|
-}
|
|
|
-
|
|
|
-static __inline__ void atomic_sub(int i, atomic_t * v)
|
|
|
-{
|
|
|
- unsigned long temp;
|
|
|
- __asm__ __volatile__(
|
|
|
- "1: ldl_l %0,%1\n"
|
|
|
- " subl %0,%2,%0\n"
|
|
|
- " stl_c %0,%1\n"
|
|
|
- " beq %0,2f\n"
|
|
|
- ".subsection 2\n"
|
|
|
- "2: br 1b\n"
|
|
|
- ".previous"
|
|
|
- :"=&r" (temp), "=m" (v->counter)
|
|
|
- :"Ir" (i), "m" (v->counter));
|
|
|
+#define ATOMIC_OP(op) \
|
|
|
+static __inline__ void atomic_##op(int i, atomic_t * v) \
|
|
|
+{ \
|
|
|
+ unsigned long temp; \
|
|
|
+ __asm__ __volatile__( \
|
|
|
+ "1: ldl_l %0,%1\n" \
|
|
|
+ " " #op "l %0,%2,%0\n" \
|
|
|
+ " stl_c %0,%1\n" \
|
|
|
+ " beq %0,2f\n" \
|
|
|
+ ".subsection 2\n" \
|
|
|
+ "2: br 1b\n" \
|
|
|
+ ".previous" \
|
|
|
+ :"=&r" (temp), "=m" (v->counter) \
|
|
|
+ :"Ir" (i), "m" (v->counter)); \
|
|
|
+} \
|
|
|
+
|
|
|
+#define ATOMIC_OP_RETURN(op) \
|
|
|
+static inline int atomic_##op##_return(int i, atomic_t *v) \
|
|
|
+{ \
|
|
|
+ long temp, result; \
|
|
|
+ smp_mb(); \
|
|
|
+ __asm__ __volatile__( \
|
|
|
+ "1: ldl_l %0,%1\n" \
|
|
|
+ " " #op "l %0,%3,%2\n" \
|
|
|
+ " " #op "l %0,%3,%0\n" \
|
|
|
+ " stl_c %0,%1\n" \
|
|
|
+ " beq %0,2f\n" \
|
|
|
+ ".subsection 2\n" \
|
|
|
+ "2: br 1b\n" \
|
|
|
+ ".previous" \
|
|
|
+ :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
|
|
|
+ :"Ir" (i), "m" (v->counter) : "memory"); \
|
|
|
+ smp_mb(); \
|
|
|
+ return result; \
|
|
|
}
|
|
|
|
|
|
-static __inline__ void atomic64_sub(long i, atomic64_t * v)
|
|
|
-{
|
|
|
- unsigned long temp;
|
|
|
- __asm__ __volatile__(
|
|
|
- "1: ldq_l %0,%1\n"
|
|
|
- " subq %0,%2,%0\n"
|
|
|
- " stq_c %0,%1\n"
|
|
|
- " beq %0,2f\n"
|
|
|
- ".subsection 2\n"
|
|
|
- "2: br 1b\n"
|
|
|
- ".previous"
|
|
|
- :"=&r" (temp), "=m" (v->counter)
|
|
|
- :"Ir" (i), "m" (v->counter));
|
|
|
-}
|
|
|
-
|
|
|
-
|
|
|
-/*
|
|
|
- * Same as above, but return the result value
|
|
|
- */
|
|
|
-static inline int atomic_add_return(int i, atomic_t *v)
|
|
|
-{
|
|
|
- long temp, result;
|
|
|
- smp_mb();
|
|
|
- __asm__ __volatile__(
|
|
|
- "1: ldl_l %0,%1\n"
|
|
|
- " addl %0,%3,%2\n"
|
|
|
- " addl %0,%3,%0\n"
|
|
|
- " stl_c %0,%1\n"
|
|
|
- " beq %0,2f\n"
|
|
|
- ".subsection 2\n"
|
|
|
- "2: br 1b\n"
|
|
|
- ".previous"
|
|
|
- :"=&r" (temp), "=m" (v->counter), "=&r" (result)
|
|
|
- :"Ir" (i), "m" (v->counter) : "memory");
|
|
|
- smp_mb();
|
|
|
- return result;
|
|
|
+#define ATOMIC64_OP(op) \
|
|
|
+static __inline__ void atomic64_##op(long i, atomic64_t * v) \
|
|
|
+{ \
|
|
|
+ unsigned long temp; \
|
|
|
+ __asm__ __volatile__( \
|
|
|
+ "1: ldq_l %0,%1\n" \
|
|
|
+ " " #op "q %0,%2,%0\n" \
|
|
|
+ " stq_c %0,%1\n" \
|
|
|
+ " beq %0,2f\n" \
|
|
|
+ ".subsection 2\n" \
|
|
|
+ "2: br 1b\n" \
|
|
|
+ ".previous" \
|
|
|
+ :"=&r" (temp), "=m" (v->counter) \
|
|
|
+ :"Ir" (i), "m" (v->counter)); \
|
|
|
+} \
|
|
|
+
|
|
|
+#define ATOMIC64_OP_RETURN(op) \
|
|
|
+static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
|
|
|
+{ \
|
|
|
+ long temp, result; \
|
|
|
+ smp_mb(); \
|
|
|
+ __asm__ __volatile__( \
|
|
|
+ "1: ldq_l %0,%1\n" \
|
|
|
+ " " #op "q %0,%3,%2\n" \
|
|
|
+ " " #op "q %0,%3,%0\n" \
|
|
|
+ " stq_c %0,%1\n" \
|
|
|
+ " beq %0,2f\n" \
|
|
|
+ ".subsection 2\n" \
|
|
|
+ "2: br 1b\n" \
|
|
|
+ ".previous" \
|
|
|
+ :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
|
|
|
+ :"Ir" (i), "m" (v->counter) : "memory"); \
|
|
|
+ smp_mb(); \
|
|
|
+ return result; \
|
|
|
}
|
|
|
|
|
|
-static __inline__ long atomic64_add_return(long i, atomic64_t * v)
|
|
|
-{
|
|
|
- long temp, result;
|
|
|
- smp_mb();
|
|
|
- __asm__ __volatile__(
|
|
|
- "1: ldq_l %0,%1\n"
|
|
|
- " addq %0,%3,%2\n"
|
|
|
- " addq %0,%3,%0\n"
|
|
|
- " stq_c %0,%1\n"
|
|
|
- " beq %0,2f\n"
|
|
|
- ".subsection 2\n"
|
|
|
- "2: br 1b\n"
|
|
|
- ".previous"
|
|
|
- :"=&r" (temp), "=m" (v->counter), "=&r" (result)
|
|
|
- :"Ir" (i), "m" (v->counter) : "memory");
|
|
|
- smp_mb();
|
|
|
- return result;
|
|
|
-}
|
|
|
+#define ATOMIC_OPS(opg) \
|
|
|
+ ATOMIC_OP(opg) \
|
|
|
+ ATOMIC_OP_RETURN(opg) \
|
|
|
+ ATOMIC64_OP(opg) \
|
|
|
+ ATOMIC64_OP_RETURN(opg)
|
|
|
|
|
|
-static __inline__ long atomic_sub_return(int i, atomic_t * v)
|
|
|
-{
|
|
|
- long temp, result;
|
|
|
- smp_mb();
|
|
|
- __asm__ __volatile__(
|
|
|
- "1: ldl_l %0,%1\n"
|
|
|
- " subl %0,%3,%2\n"
|
|
|
- " subl %0,%3,%0\n"
|
|
|
- " stl_c %0,%1\n"
|
|
|
- " beq %0,2f\n"
|
|
|
- ".subsection 2\n"
|
|
|
- "2: br 1b\n"
|
|
|
- ".previous"
|
|
|
- :"=&r" (temp), "=m" (v->counter), "=&r" (result)
|
|
|
- :"Ir" (i), "m" (v->counter) : "memory");
|
|
|
- smp_mb();
|
|
|
- return result;
|
|
|
-}
|
|
|
+ATOMIC_OPS(add)
|
|
|
+ATOMIC_OPS(sub)
|
|
|
|
|
|
-static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
|
|
|
-{
|
|
|
- long temp, result;
|
|
|
- smp_mb();
|
|
|
- __asm__ __volatile__(
|
|
|
- "1: ldq_l %0,%1\n"
|
|
|
- " subq %0,%3,%2\n"
|
|
|
- " subq %0,%3,%0\n"
|
|
|
- " stq_c %0,%1\n"
|
|
|
- " beq %0,2f\n"
|
|
|
- ".subsection 2\n"
|
|
|
- "2: br 1b\n"
|
|
|
- ".previous"
|
|
|
- :"=&r" (temp), "=m" (v->counter), "=&r" (result)
|
|
|
- :"Ir" (i), "m" (v->counter) : "memory");
|
|
|
- smp_mb();
|
|
|
- return result;
|
|
|
-}
|
|
|
+#undef ATOMIC_OPS
|
|
|
+#undef ATOMIC64_OP_RETURN
|
|
|
+#undef ATOMIC64_OP
|
|
|
+#undef ATOMIC_OP_RETURN
|
|
|
+#undef ATOMIC_OP
|
|
|
|
|
|
#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
|
|
|
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
|