|
@@ -77,6 +77,12 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
|
|
|
ATOMIC_OPS(add, +=, add)
|
|
|
ATOMIC_OPS(sub, -=, sub)
|
|
|
|
|
|
+#define CONFIG_ARCH_HAS_ATOMIC_OR
|
|
|
+
|
|
|
+ATOMIC_OP(and, &=, and)
|
|
|
+ATOMIC_OP(or, |=, or)
|
|
|
+ATOMIC_OP(xor, ^=, eor)
|
|
|
+
|
|
|
#undef ATOMIC_OPS
|
|
|
#undef ATOMIC_OP_RETURN
|
|
|
#undef ATOMIC_OP
|
|
@@ -170,14 +176,14 @@ static inline int atomic_add_negative(int i, atomic_t *v)
|
|
|
return c != 0;
|
|
|
}
|
|
|
|
|
|
-static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
|
|
|
+static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
|
|
|
{
|
|
|
- __asm__ __volatile__("andl %1,%0" : "+m" (*v) : ASM_DI (~(mask)));
|
|
|
+ atomic_and(~mask, v);
|
|
|
}
|
|
|
|
|
|
-static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
|
|
|
+static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
|
|
|
{
|
|
|
- __asm__ __volatile__("orl %1,%0" : "+m" (*v) : ASM_DI (mask));
|
|
|
+ atomic_or(mask, v);
|
|
|
}
|
|
|
|
|
|
static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
|