|
@@ -37,84 +37,47 @@
|
|
* store exclusive to ensure that these are atomic. We may loop
|
|
* store exclusive to ensure that these are atomic. We may loop
|
|
* to ensure that the update happens.
|
|
* to ensure that the update happens.
|
|
*/
|
|
*/
|
|
-static inline void atomic_add(int i, atomic_t *v)
|
|
|
|
-{
|
|
|
|
- unsigned long tmp;
|
|
|
|
- int result;
|
|
|
|
-
|
|
|
|
- prefetchw(&v->counter);
|
|
|
|
- __asm__ __volatile__("@ atomic_add\n"
|
|
|
|
-"1: ldrex %0, [%3]\n"
|
|
|
|
-" add %0, %0, %4\n"
|
|
|
|
-" strex %1, %0, [%3]\n"
|
|
|
|
-" teq %1, #0\n"
|
|
|
|
-" bne 1b"
|
|
|
|
- : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
|
|
|
|
- : "r" (&v->counter), "Ir" (i)
|
|
|
|
- : "cc");
|
|
|
|
-}
|
|
|
|
|
|
|
|
-static inline int atomic_add_return(int i, atomic_t *v)
|
|
|
|
-{
|
|
|
|
- unsigned long tmp;
|
|
|
|
- int result;
|
|
|
|
-
|
|
|
|
- smp_mb();
|
|
|
|
- prefetchw(&v->counter);
|
|
|
|
-
|
|
|
|
- __asm__ __volatile__("@ atomic_add_return\n"
|
|
|
|
-"1: ldrex %0, [%3]\n"
|
|
|
|
-" add %0, %0, %4\n"
|
|
|
|
-" strex %1, %0, [%3]\n"
|
|
|
|
-" teq %1, #0\n"
|
|
|
|
-" bne 1b"
|
|
|
|
- : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
|
|
|
|
- : "r" (&v->counter), "Ir" (i)
|
|
|
|
- : "cc");
|
|
|
|
-
|
|
|
|
- smp_mb();
|
|
|
|
-
|
|
|
|
- return result;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static inline void atomic_sub(int i, atomic_t *v)
|
|
|
|
-{
|
|
|
|
- unsigned long tmp;
|
|
|
|
- int result;
|
|
|
|
-
|
|
|
|
- prefetchw(&v->counter);
|
|
|
|
- __asm__ __volatile__("@ atomic_sub\n"
|
|
|
|
-"1: ldrex %0, [%3]\n"
|
|
|
|
-" sub %0, %0, %4\n"
|
|
|
|
-" strex %1, %0, [%3]\n"
|
|
|
|
-" teq %1, #0\n"
|
|
|
|
-" bne 1b"
|
|
|
|
- : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
|
|
|
|
- : "r" (&v->counter), "Ir" (i)
|
|
|
|
- : "cc");
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static inline int atomic_sub_return(int i, atomic_t *v)
|
|
|
|
-{
|
|
|
|
- unsigned long tmp;
|
|
|
|
- int result;
|
|
|
|
-
|
|
|
|
- smp_mb();
|
|
|
|
- prefetchw(&v->counter);
|
|
|
|
-
|
|
|
|
- __asm__ __volatile__("@ atomic_sub_return\n"
|
|
|
|
-"1: ldrex %0, [%3]\n"
|
|
|
|
-" sub %0, %0, %4\n"
|
|
|
|
-" strex %1, %0, [%3]\n"
|
|
|
|
-" teq %1, #0\n"
|
|
|
|
-" bne 1b"
|
|
|
|
- : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
|
|
|
|
- : "r" (&v->counter), "Ir" (i)
|
|
|
|
- : "cc");
|
|
|
|
-
|
|
|
|
- smp_mb();
|
|
|
|
-
|
|
|
|
- return result;
|
|
|
|
|
|
+#define ATOMIC_OP(op, c_op, asm_op) \
|
|
|
|
+static inline void atomic_##op(int i, atomic_t *v) \
|
|
|
|
+{ \
|
|
|
|
+ unsigned long tmp; \
|
|
|
|
+ int result; \
|
|
|
|
+ \
|
|
|
|
+ prefetchw(&v->counter); \
|
|
|
|
+ __asm__ __volatile__("@ atomic_" #op "\n" \
|
|
|
|
+"1: ldrex %0, [%3]\n" \
|
|
|
|
+" " #asm_op " %0, %0, %4\n" \
|
|
|
|
+" strex %1, %0, [%3]\n" \
|
|
|
|
+" teq %1, #0\n" \
|
|
|
|
+" bne 1b" \
|
|
|
|
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
|
|
|
|
+ : "r" (&v->counter), "Ir" (i) \
|
|
|
|
+ : "cc"); \
|
|
|
|
+} \
|
|
|
|
+
|
|
|
|
+#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
|
|
|
|
+static inline int atomic_##op##_return(int i, atomic_t *v) \
|
|
|
|
+{ \
|
|
|
|
+ unsigned long tmp; \
|
|
|
|
+ int result; \
|
|
|
|
+ \
|
|
|
|
+ smp_mb(); \
|
|
|
|
+ prefetchw(&v->counter); \
|
|
|
|
+ \
|
|
|
|
+ __asm__ __volatile__("@ atomic_" #op "_return\n" \
|
|
|
|
+"1: ldrex %0, [%3]\n" \
|
|
|
|
+" " #asm_op " %0, %0, %4\n" \
|
|
|
|
+" strex %1, %0, [%3]\n" \
|
|
|
|
+" teq %1, #0\n" \
|
|
|
|
+" bne 1b" \
|
|
|
|
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
|
|
|
|
+ : "r" (&v->counter), "Ir" (i) \
|
|
|
|
+ : "cc"); \
|
|
|
|
+ \
|
|
|
|
+ smp_mb(); \
|
|
|
|
+ \
|
|
|
|
+ return result; \
|
|
}
|
|
}
|
|
|
|
|
|
static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
|
|
static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
|
|
@@ -174,33 +137,29 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
|
#error SMP not supported on pre-ARMv6 CPUs
|
|
#error SMP not supported on pre-ARMv6 CPUs
|
|
#endif
|
|
#endif
|
|
|
|
|
|
-static inline int atomic_add_return(int i, atomic_t *v)
|
|
|
|
-{
|
|
|
|
- unsigned long flags;
|
|
|
|
- int val;
|
|
|
|
-
|
|
|
|
- raw_local_irq_save(flags);
|
|
|
|
- val = v->counter;
|
|
|
|
- v->counter = val += i;
|
|
|
|
- raw_local_irq_restore(flags);
|
|
|
|
-
|
|
|
|
- return val;
|
|
|
|
-}
|
|
|
|
-#define atomic_add(i, v) (void) atomic_add_return(i, v)
|
|
|
|
-
|
|
|
|
-static inline int atomic_sub_return(int i, atomic_t *v)
|
|
|
|
-{
|
|
|
|
- unsigned long flags;
|
|
|
|
- int val;
|
|
|
|
-
|
|
|
|
- raw_local_irq_save(flags);
|
|
|
|
- val = v->counter;
|
|
|
|
- v->counter = val -= i;
|
|
|
|
- raw_local_irq_restore(flags);
|
|
|
|
-
|
|
|
|
- return val;
|
|
|
|
|
|
+#define ATOMIC_OP(op, c_op, asm_op) \
|
|
|
|
+static inline void atomic_##op(int i, atomic_t *v) \
|
|
|
|
+{ \
|
|
|
|
+ unsigned long flags; \
|
|
|
|
+ \
|
|
|
|
+ raw_local_irq_save(flags); \
|
|
|
|
+ v->counter c_op i; \
|
|
|
|
+ raw_local_irq_restore(flags); \
|
|
|
|
+} \
|
|
|
|
+
|
|
|
|
+#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
|
|
|
|
+static inline int atomic_##op##_return(int i, atomic_t *v) \
|
|
|
|
+{ \
|
|
|
|
+ unsigned long flags; \
|
|
|
|
+ int val; \
|
|
|
|
+ \
|
|
|
|
+ raw_local_irq_save(flags); \
|
|
|
|
+ v->counter c_op i; \
|
|
|
|
+ val = v->counter; \
|
|
|
|
+ raw_local_irq_restore(flags); \
|
|
|
|
+ \
|
|
|
|
+ return val; \
|
|
}
|
|
}
|
|
-#define atomic_sub(i, v) (void) atomic_sub_return(i, v)
|
|
|
|
|
|
|
|
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
|
|
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
|
|
{
|
|
{
|
|
@@ -228,6 +187,17 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
|
|
|
|
|
#endif /* __LINUX_ARM_ARCH__ */
|
|
#endif /* __LINUX_ARM_ARCH__ */
|
|
|
|
|
|
|
|
+#define ATOMIC_OPS(op, c_op, asm_op) \
|
|
|
|
+ ATOMIC_OP(op, c_op, asm_op) \
|
|
|
|
+ ATOMIC_OP_RETURN(op, c_op, asm_op)
|
|
|
|
+
|
|
|
|
+ATOMIC_OPS(add, +=, add)
|
|
|
|
+ATOMIC_OPS(sub, -=, sub)
|
|
|
|
+
|
|
|
|
+#undef ATOMIC_OPS
|
|
|
|
+#undef ATOMIC_OP_RETURN
|
|
|
|
+#undef ATOMIC_OP
|
|
|
|
+
|
|
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
|
|
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
|
|
|
|
|
|
#define atomic_inc(v) atomic_add(1, v)
|
|
#define atomic_inc(v) atomic_add(1, v)
|
|
@@ -300,89 +270,60 @@ static inline void atomic64_set(atomic64_t *v, long long i)
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
|
|
|
|
-static inline void atomic64_add(long long i, atomic64_t *v)
|
|
|
|
-{
|
|
|
|
- long long result;
|
|
|
|
- unsigned long tmp;
|
|
|
|
-
|
|
|
|
- prefetchw(&v->counter);
|
|
|
|
- __asm__ __volatile__("@ atomic64_add\n"
|
|
|
|
-"1: ldrexd %0, %H0, [%3]\n"
|
|
|
|
-" adds %Q0, %Q0, %Q4\n"
|
|
|
|
-" adc %R0, %R0, %R4\n"
|
|
|
|
-" strexd %1, %0, %H0, [%3]\n"
|
|
|
|
-" teq %1, #0\n"
|
|
|
|
-" bne 1b"
|
|
|
|
- : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
|
|
|
|
- : "r" (&v->counter), "r" (i)
|
|
|
|
- : "cc");
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static inline long long atomic64_add_return(long long i, atomic64_t *v)
|
|
|
|
-{
|
|
|
|
- long long result;
|
|
|
|
- unsigned long tmp;
|
|
|
|
-
|
|
|
|
- smp_mb();
|
|
|
|
- prefetchw(&v->counter);
|
|
|
|
-
|
|
|
|
- __asm__ __volatile__("@ atomic64_add_return\n"
|
|
|
|
-"1: ldrexd %0, %H0, [%3]\n"
|
|
|
|
-" adds %Q0, %Q0, %Q4\n"
|
|
|
|
-" adc %R0, %R0, %R4\n"
|
|
|
|
-" strexd %1, %0, %H0, [%3]\n"
|
|
|
|
-" teq %1, #0\n"
|
|
|
|
-" bne 1b"
|
|
|
|
- : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
|
|
|
|
- : "r" (&v->counter), "r" (i)
|
|
|
|
- : "cc");
|
|
|
|
-
|
|
|
|
- smp_mb();
|
|
|
|
-
|
|
|
|
- return result;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static inline void atomic64_sub(long long i, atomic64_t *v)
|
|
|
|
-{
|
|
|
|
- long long result;
|
|
|
|
- unsigned long tmp;
|
|
|
|
-
|
|
|
|
- prefetchw(&v->counter);
|
|
|
|
- __asm__ __volatile__("@ atomic64_sub\n"
|
|
|
|
-"1: ldrexd %0, %H0, [%3]\n"
|
|
|
|
-" subs %Q0, %Q0, %Q4\n"
|
|
|
|
-" sbc %R0, %R0, %R4\n"
|
|
|
|
-" strexd %1, %0, %H0, [%3]\n"
|
|
|
|
-" teq %1, #0\n"
|
|
|
|
-" bne 1b"
|
|
|
|
- : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
|
|
|
|
- : "r" (&v->counter), "r" (i)
|
|
|
|
- : "cc");
|
|
|
|
|
|
+#define ATOMIC64_OP(op, op1, op2) \
|
|
|
|
+static inline void atomic64_##op(long long i, atomic64_t *v) \
|
|
|
|
+{ \
|
|
|
|
+ long long result; \
|
|
|
|
+ unsigned long tmp; \
|
|
|
|
+ \
|
|
|
|
+ prefetchw(&v->counter); \
|
|
|
|
+ __asm__ __volatile__("@ atomic64_" #op "\n" \
|
|
|
|
+"1: ldrexd %0, %H0, [%3]\n" \
|
|
|
|
+" " #op1 " %Q0, %Q0, %Q4\n" \
|
|
|
|
+" " #op2 " %R0, %R0, %R4\n" \
|
|
|
|
+" strexd %1, %0, %H0, [%3]\n" \
|
|
|
|
+" teq %1, #0\n" \
|
|
|
|
+" bne 1b" \
|
|
|
|
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
|
|
|
|
+ : "r" (&v->counter), "r" (i) \
|
|
|
|
+ : "cc"); \
|
|
|
|
+} \
|
|
|
|
+
|
|
|
|
+#define ATOMIC64_OP_RETURN(op, op1, op2) \
|
|
|
|
+static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
|
|
|
|
+{ \
|
|
|
|
+ long long result; \
|
|
|
|
+ unsigned long tmp; \
|
|
|
|
+ \
|
|
|
|
+ smp_mb(); \
|
|
|
|
+ prefetchw(&v->counter); \
|
|
|
|
+ \
|
|
|
|
+ __asm__ __volatile__("@ atomic64_" #op "_return\n" \
|
|
|
|
+"1: ldrexd %0, %H0, [%3]\n" \
|
|
|
|
+" " #op1 " %Q0, %Q0, %Q4\n" \
|
|
|
|
+" " #op2 " %R0, %R0, %R4\n" \
|
|
|
|
+" strexd %1, %0, %H0, [%3]\n" \
|
|
|
|
+" teq %1, #0\n" \
|
|
|
|
+" bne 1b" \
|
|
|
|
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
|
|
|
|
+ : "r" (&v->counter), "r" (i) \
|
|
|
|
+ : "cc"); \
|
|
|
|
+ \
|
|
|
|
+ smp_mb(); \
|
|
|
|
+ \
|
|
|
|
+ return result; \
|
|
}
|
|
}
|
|
|
|
|
|
-static inline long long atomic64_sub_return(long long i, atomic64_t *v)
|
|
|
|
-{
|
|
|
|
- long long result;
|
|
|
|
- unsigned long tmp;
|
|
|
|
-
|
|
|
|
- smp_mb();
|
|
|
|
- prefetchw(&v->counter);
|
|
|
|
-
|
|
|
|
- __asm__ __volatile__("@ atomic64_sub_return\n"
|
|
|
|
-"1: ldrexd %0, %H0, [%3]\n"
|
|
|
|
-" subs %Q0, %Q0, %Q4\n"
|
|
|
|
-" sbc %R0, %R0, %R4\n"
|
|
|
|
-" strexd %1, %0, %H0, [%3]\n"
|
|
|
|
-" teq %1, #0\n"
|
|
|
|
-" bne 1b"
|
|
|
|
- : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
|
|
|
|
- : "r" (&v->counter), "r" (i)
|
|
|
|
- : "cc");
|
|
|
|
|
|
+#define ATOMIC64_OPS(op, op1, op2) \
|
|
|
|
+ ATOMIC64_OP(op, op1, op2) \
|
|
|
|
+ ATOMIC64_OP_RETURN(op, op1, op2)
|
|
|
|
|
|
- smp_mb();
|
|
|
|
|
|
+ATOMIC64_OPS(add, adds, adc)
|
|
|
|
+ATOMIC64_OPS(sub, subs, sbc)
|
|
|
|
|
|
- return result;
|
|
|
|
-}
|
|
|
|
|
|
+#undef ATOMIC64_OPS
|
|
|
|
+#undef ATOMIC64_OP_RETURN
|
|
|
|
+#undef ATOMIC64_OP
|
|
|
|
|
|
static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
|
|
static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
|
|
long long new)
|
|
long long new)
|