|
@@ -158,53 +158,9 @@ extern void __add_wrong_size(void)
|
|
|
* value of "*ptr".
|
|
|
*
|
|
|
* xadd() is locked when multiple CPUs are online
|
|
|
- * xadd_sync() is always locked
|
|
|
- * xadd_local() is never locked
|
|
|
*/
|
|
|
#define __xadd(ptr, inc, lock) __xchg_op((ptr), (inc), xadd, lock)
|
|
|
#define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX)
|
|
|
-#define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
|
|
|
-#define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
|
|
|
-
|
|
|
-#define __add(ptr, inc, lock) \
|
|
|
- ({ \
|
|
|
- __typeof__ (*(ptr)) __ret = (inc); \
|
|
|
- switch (sizeof(*(ptr))) { \
|
|
|
- case __X86_CASE_B: \
|
|
|
- asm volatile (lock "addb %b1, %0\n" \
|
|
|
- : "+m" (*(ptr)) : "qi" (inc) \
|
|
|
- : "memory", "cc"); \
|
|
|
- break; \
|
|
|
- case __X86_CASE_W: \
|
|
|
- asm volatile (lock "addw %w1, %0\n" \
|
|
|
- : "+m" (*(ptr)) : "ri" (inc) \
|
|
|
- : "memory", "cc"); \
|
|
|
- break; \
|
|
|
- case __X86_CASE_L: \
|
|
|
- asm volatile (lock "addl %1, %0\n" \
|
|
|
- : "+m" (*(ptr)) : "ri" (inc) \
|
|
|
- : "memory", "cc"); \
|
|
|
- break; \
|
|
|
- case __X86_CASE_Q: \
|
|
|
- asm volatile (lock "addq %1, %0\n" \
|
|
|
- : "+m" (*(ptr)) : "ri" (inc) \
|
|
|
- : "memory", "cc"); \
|
|
|
- break; \
|
|
|
- default: \
|
|
|
- __add_wrong_size(); \
|
|
|
- } \
|
|
|
- __ret; \
|
|
|
- })
|
|
|
-
|
|
|
-/*
|
|
|
- * add_*() adds "inc" to "*ptr"
|
|
|
- *
|
|
|
- * __add() takes a lock prefix
|
|
|
- * add_smp() is locked when multiple CPUs are online
|
|
|
- * add_sync() is always locked
|
|
|
- */
|
|
|
-#define add_smp(ptr, inc) __add((ptr), (inc), LOCK_PREFIX)
|
|
|
-#define add_sync(ptr, inc) __add((ptr), (inc), "lock; ")
|
|
|
|
|
|
#define __cmpxchg_double(pfx, p1, p2, o1, o2, n1, n2) \
|
|
|
({ \
|