|
@@ -141,6 +141,33 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
|
|
|
return oldval;
|
|
return oldval;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
+static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
|
|
|
|
+{
|
|
|
|
|
+ int oldval, newval;
|
|
|
|
|
+ unsigned long tmp;
|
|
|
|
|
+
|
|
|
|
|
+ smp_mb();
|
|
|
|
|
+ prefetchw(&v->counter);
|
|
|
|
|
+
|
|
|
|
|
+ __asm__ __volatile__ ("@ atomic_add_unless\n"
|
|
|
|
|
+"1: ldrex %0, [%4]\n"
|
|
|
|
|
+" teq %0, %5\n"
|
|
|
|
|
+" beq 2f\n"
|
|
|
|
|
+" add %1, %0, %6\n"
|
|
|
|
|
+" strex %2, %1, [%4]\n"
|
|
|
|
|
+" teq %2, #0\n"
|
|
|
|
|
+" bne 1b\n"
|
|
|
|
|
+"2:"
|
|
|
|
|
+ : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
|
|
|
|
|
+ : "r" (&v->counter), "r" (u), "r" (a)
|
|
|
|
|
+ : "cc");
|
|
|
|
|
+
|
|
|
|
|
+ if (oldval != u)
|
|
|
|
|
+ smp_mb();
|
|
|
|
|
+
|
|
|
|
|
+ return oldval;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
#else /* ARM_ARCH_6 */
|
|
#else /* ARM_ARCH_6 */
|
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
#ifdef CONFIG_SMP
|
|
@@ -189,10 +216,6 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
|
|
|
return ret;
|
|
return ret;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
-#endif /* __LINUX_ARM_ARCH__ */
|
|
|
|
|
-
|
|
|
|
|
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
|
|
|
|
|
-
|
|
|
|
|
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
|
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
|
|
{
|
|
{
|
|
|
int c, old;
|
|
int c, old;
|
|
@@ -203,6 +226,10 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
|
|
return c;
|
|
return c;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
+#endif /* __LINUX_ARM_ARCH__ */
|
|
|
|
|
+
|
|
|
|
|
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
|
|
|
|
|
+
|
|
|
#define atomic_inc(v) atomic_add(1, v)
|
|
#define atomic_inc(v) atomic_add(1, v)
|
|
|
#define atomic_dec(v) atomic_sub(1, v)
|
|
#define atomic_dec(v) atomic_sub(1, v)
|
|
|
|
|
|