|
|
@@ -25,154 +25,151 @@
|
|
|
#include <asm/barrier.h>
|
|
|
#include <asm/lse.h>
|
|
|
|
|
|
-static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
|
|
|
-{
|
|
|
- unsigned long ret, tmp;
|
|
|
-
|
|
|
- switch (size) {
|
|
|
- case 1:
|
|
|
- asm volatile(ARM64_LSE_ATOMIC_INSN(
|
|
|
- /* LL/SC */
|
|
|
- " prfm pstl1strm, %2\n"
|
|
|
- "1: ldxrb %w0, %2\n"
|
|
|
- " stlxrb %w1, %w3, %2\n"
|
|
|
- " cbnz %w1, 1b\n"
|
|
|
- " dmb ish",
|
|
|
- /* LSE atomics */
|
|
|
- " nop\n"
|
|
|
- " nop\n"
|
|
|
- " swpalb %w3, %w0, %2\n"
|
|
|
- " nop\n"
|
|
|
- " nop")
|
|
|
- : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
|
|
|
- : "r" (x)
|
|
|
- : "memory");
|
|
|
- break;
|
|
|
- case 2:
|
|
|
- asm volatile(ARM64_LSE_ATOMIC_INSN(
|
|
|
- /* LL/SC */
|
|
|
- " prfm pstl1strm, %2\n"
|
|
|
- "1: ldxrh %w0, %2\n"
|
|
|
- " stlxrh %w1, %w3, %2\n"
|
|
|
- " cbnz %w1, 1b\n"
|
|
|
- " dmb ish",
|
|
|
- /* LSE atomics */
|
|
|
- " nop\n"
|
|
|
- " nop\n"
|
|
|
- " swpalh %w3, %w0, %2\n"
|
|
|
- " nop\n"
|
|
|
- " nop")
|
|
|
- : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
|
|
|
- : "r" (x)
|
|
|
- : "memory");
|
|
|
- break;
|
|
|
- case 4:
|
|
|
- asm volatile(ARM64_LSE_ATOMIC_INSN(
|
|
|
- /* LL/SC */
|
|
|
- " prfm pstl1strm, %2\n"
|
|
|
- "1: ldxr %w0, %2\n"
|
|
|
- " stlxr %w1, %w3, %2\n"
|
|
|
- " cbnz %w1, 1b\n"
|
|
|
- " dmb ish",
|
|
|
- /* LSE atomics */
|
|
|
- " nop\n"
|
|
|
- " nop\n"
|
|
|
- " swpal %w3, %w0, %2\n"
|
|
|
- " nop\n"
|
|
|
- " nop")
|
|
|
- : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
|
|
|
- : "r" (x)
|
|
|
- : "memory");
|
|
|
- break;
|
|
|
- case 8:
|
|
|
- asm volatile(ARM64_LSE_ATOMIC_INSN(
|
|
|
- /* LL/SC */
|
|
|
- " prfm pstl1strm, %2\n"
|
|
|
- "1: ldxr %0, %2\n"
|
|
|
- " stlxr %w1, %3, %2\n"
|
|
|
- " cbnz %w1, 1b\n"
|
|
|
- " dmb ish",
|
|
|
- /* LSE atomics */
|
|
|
- " nop\n"
|
|
|
- " nop\n"
|
|
|
- " swpal %3, %0, %2\n"
|
|
|
- " nop\n"
|
|
|
- " nop")
|
|
|
- : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
|
|
|
- : "r" (x)
|
|
|
- : "memory");
|
|
|
- break;
|
|
|
- default:
|
|
|
- BUILD_BUG();
|
|
|
- }
|
|
|
-
|
|
|
- return ret;
|
|
|
+/*
|
|
|
+ * We need separate acquire parameters for ll/sc and lse, since the full
|
|
|
+ * barrier case is generated as release+dmb for the former and
|
|
|
+ * acquire+release for the latter.
|
|
|
+ */
|
|
|
+#define __XCHG_CASE(w, sz, name, mb, nop_lse, acq, acq_lse, rel, cl) \
|
|
|
+static inline unsigned long __xchg_case_##name(unsigned long x, \
|
|
|
+ volatile void *ptr) \
|
|
|
+{ \
|
|
|
+ unsigned long ret, tmp; \
|
|
|
+ \
|
|
|
+ asm volatile(ARM64_LSE_ATOMIC_INSN( \
|
|
|
+ /* LL/SC */ \
|
|
|
+ " prfm pstl1strm, %2\n" \
|
|
|
+ "1: ld" #acq "xr" #sz "\t%" #w "0, %2\n" \
|
|
|
+ " st" #rel "xr" #sz "\t%w1, %" #w "3, %2\n" \
|
|
|
+ " cbnz %w1, 1b\n" \
|
|
|
+ " " #mb, \
|
|
|
+ /* LSE atomics */ \
|
|
|
+ " nop\n" \
|
|
|
+ " nop\n" \
|
|
|
+ " swp" #acq_lse #rel #sz "\t%" #w "3, %" #w "0, %2\n" \
|
|
|
+ " nop\n" \
|
|
|
+ " " #nop_lse) \
|
|
|
+ : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr) \
|
|
|
+ : "r" (x) \
|
|
|
+ : cl); \
|
|
|
+ \
|
|
|
+ return ret; \
|
|
|
}
|
|
|
|
|
|
-#define xchg(ptr,x) \
|
|
|
-({ \
|
|
|
- __typeof__(*(ptr)) __ret; \
|
|
|
- __ret = (__typeof__(*(ptr))) \
|
|
|
- __xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \
|
|
|
- __ret; \
|
|
|
+__XCHG_CASE(w, b, 1, , , , , , )
|
|
|
+__XCHG_CASE(w, h, 2, , , , , , )
|
|
|
+__XCHG_CASE(w, , 4, , , , , , )
|
|
|
+__XCHG_CASE( , , 8, , , , , , )
|
|
|
+__XCHG_CASE(w, b, acq_1, , , a, a, , "memory")
|
|
|
+__XCHG_CASE(w, h, acq_2, , , a, a, , "memory")
|
|
|
+__XCHG_CASE(w, , acq_4, , , a, a, , "memory")
|
|
|
+__XCHG_CASE( , , acq_8, , , a, a, , "memory")
|
|
|
+__XCHG_CASE(w, b, rel_1, , , , , l, "memory")
|
|
|
+__XCHG_CASE(w, h, rel_2, , , , , l, "memory")
|
|
|
+__XCHG_CASE(w, , rel_4, , , , , l, "memory")
|
|
|
+__XCHG_CASE( , , rel_8, , , , , l, "memory")
|
|
|
+__XCHG_CASE(w, b, mb_1, dmb ish, nop, , a, l, "memory")
|
|
|
+__XCHG_CASE(w, h, mb_2, dmb ish, nop, , a, l, "memory")
|
|
|
+__XCHG_CASE(w, , mb_4, dmb ish, nop, , a, l, "memory")
|
|
|
+__XCHG_CASE( , , mb_8, dmb ish, nop, , a, l, "memory")
|
|
|
+
|
|
|
+#undef __XCHG_CASE
|
|
|
+
|
|
|
+#define __XCHG_GEN(sfx) \
|
|
|
+static inline unsigned long __xchg##sfx(unsigned long x, \
|
|
|
+ volatile void *ptr, \
|
|
|
+ int size) \
|
|
|
+{ \
|
|
|
+ switch (size) { \
|
|
|
+ case 1: \
|
|
|
+ return __xchg_case##sfx##_1(x, ptr); \
|
|
|
+ case 2: \
|
|
|
+ return __xchg_case##sfx##_2(x, ptr); \
|
|
|
+ case 4: \
|
|
|
+ return __xchg_case##sfx##_4(x, ptr); \
|
|
|
+ case 8: \
|
|
|
+ return __xchg_case##sfx##_8(x, ptr); \
|
|
|
+ default: \
|
|
|
+ BUILD_BUG(); \
|
|
|
+ } \
|
|
|
+ \
|
|
|
+ unreachable(); \
|
|
|
+}
|
|
|
+
|
|
|
+__XCHG_GEN()
|
|
|
+__XCHG_GEN(_acq)
|
|
|
+__XCHG_GEN(_rel)
|
|
|
+__XCHG_GEN(_mb)
|
|
|
+
|
|
|
+#undef __XCHG_GEN
|
|
|
+
|
|
|
+#define __xchg_wrapper(sfx, ptr, x) \
|
|
|
+({ \
|
|
|
+ __typeof__(*(ptr)) __ret; \
|
|
|
+ __ret = (__typeof__(*(ptr))) \
|
|
|
+ __xchg##sfx((unsigned long)(x), (ptr), sizeof(*(ptr))); \
|
|
|
+ __ret; \
|
|
|
})
|
|
|
|
|
|
-static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
|
|
|
- unsigned long new, int size)
|
|
|
-{
|
|
|
- switch (size) {
|
|
|
- case 1:
|
|
|
- return __cmpxchg_case_1(ptr, (u8)old, new);
|
|
|
- case 2:
|
|
|
- return __cmpxchg_case_2(ptr, (u16)old, new);
|
|
|
- case 4:
|
|
|
- return __cmpxchg_case_4(ptr, old, new);
|
|
|
- case 8:
|
|
|
- return __cmpxchg_case_8(ptr, old, new);
|
|
|
- default:
|
|
|
- BUILD_BUG();
|
|
|
- }
|
|
|
-
|
|
|
- unreachable();
|
|
|
+/* xchg */
|
|
|
+#define xchg_relaxed(...) __xchg_wrapper( , __VA_ARGS__)
|
|
|
+#define xchg_acquire(...) __xchg_wrapper(_acq, __VA_ARGS__)
|
|
|
+#define xchg_release(...) __xchg_wrapper(_rel, __VA_ARGS__)
|
|
|
+#define xchg(...) __xchg_wrapper( _mb, __VA_ARGS__)
|
|
|
+
|
|
|
+#define __CMPXCHG_GEN(sfx) \
|
|
|
+static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
|
|
|
+ unsigned long old, \
|
|
|
+ unsigned long new, \
|
|
|
+ int size) \
|
|
|
+{ \
|
|
|
+ switch (size) { \
|
|
|
+ case 1: \
|
|
|
+ return __cmpxchg_case##sfx##_1(ptr, (u8)old, new); \
|
|
|
+ case 2: \
|
|
|
+ return __cmpxchg_case##sfx##_2(ptr, (u16)old, new); \
|
|
|
+ case 4: \
|
|
|
+ return __cmpxchg_case##sfx##_4(ptr, old, new); \
|
|
|
+ case 8: \
|
|
|
+ return __cmpxchg_case##sfx##_8(ptr, old, new); \
|
|
|
+ default: \
|
|
|
+ BUILD_BUG(); \
|
|
|
+ } \
|
|
|
+ \
|
|
|
+ unreachable(); \
|
|
|
}
|
|
|
|
|
|
-static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
|
|
|
- unsigned long new, int size)
|
|
|
-{
|
|
|
- switch (size) {
|
|
|
- case 1:
|
|
|
- return __cmpxchg_case_mb_1(ptr, (u8)old, new);
|
|
|
- case 2:
|
|
|
- return __cmpxchg_case_mb_2(ptr, (u16)old, new);
|
|
|
- case 4:
|
|
|
- return __cmpxchg_case_mb_4(ptr, old, new);
|
|
|
- case 8:
|
|
|
- return __cmpxchg_case_mb_8(ptr, old, new);
|
|
|
- default:
|
|
|
- BUILD_BUG();
|
|
|
- }
|
|
|
-
|
|
|
- unreachable();
|
|
|
-}
|
|
|
+__CMPXCHG_GEN()
|
|
|
+__CMPXCHG_GEN(_acq)
|
|
|
+__CMPXCHG_GEN(_rel)
|
|
|
+__CMPXCHG_GEN(_mb)
|
|
|
|
|
|
-#define cmpxchg(ptr, o, n) \
|
|
|
-({ \
|
|
|
- __typeof__(*(ptr)) __ret; \
|
|
|
- __ret = (__typeof__(*(ptr))) \
|
|
|
- __cmpxchg_mb((ptr), (unsigned long)(o), (unsigned long)(n), \
|
|
|
- sizeof(*(ptr))); \
|
|
|
- __ret; \
|
|
|
-})
|
|
|
+#undef __CMPXCHG_GEN
|
|
|
|
|
|
-#define cmpxchg_local(ptr, o, n) \
|
|
|
-({ \
|
|
|
- __typeof__(*(ptr)) __ret; \
|
|
|
- __ret = (__typeof__(*(ptr))) \
|
|
|
- __cmpxchg((ptr), (unsigned long)(o), \
|
|
|
- (unsigned long)(n), sizeof(*(ptr))); \
|
|
|
- __ret; \
|
|
|
+#define __cmpxchg_wrapper(sfx, ptr, o, n) \
|
|
|
+({ \
|
|
|
+ __typeof__(*(ptr)) __ret; \
|
|
|
+ __ret = (__typeof__(*(ptr))) \
|
|
|
+ __cmpxchg##sfx((ptr), (unsigned long)(o), \
|
|
|
+ (unsigned long)(n), sizeof(*(ptr))); \
|
|
|
+ __ret; \
|
|
|
})
|
|
|
|
|
|
+/* cmpxchg */
|
|
|
+#define cmpxchg_relaxed(...) __cmpxchg_wrapper( , __VA_ARGS__)
|
|
|
+#define cmpxchg_acquire(...) __cmpxchg_wrapper(_acq, __VA_ARGS__)
|
|
|
+#define cmpxchg_release(...) __cmpxchg_wrapper(_rel, __VA_ARGS__)
|
|
|
+#define cmpxchg(...) __cmpxchg_wrapper( _mb, __VA_ARGS__)
|
|
|
+#define cmpxchg_local cmpxchg_relaxed
|
|
|
+
|
|
|
+/* cmpxchg64 */
|
|
|
+#define cmpxchg64_relaxed cmpxchg_relaxed
|
|
|
+#define cmpxchg64_acquire cmpxchg_acquire
|
|
|
+#define cmpxchg64_release cmpxchg_release
|
|
|
+#define cmpxchg64 cmpxchg
|
|
|
+#define cmpxchg64_local cmpxchg_local
|
|
|
+
|
|
|
+/* cmpxchg_double */
|
|
|
#define system_has_cmpxchg_double() 1
|
|
|
|
|
|
#define __cmpxchg_double_check(ptr1, ptr2) \
|
|
|
@@ -202,6 +199,7 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
|
|
|
__ret; \
|
|
|
})
|
|
|
|
|
|
+/* this_cpu_cmpxchg */
|
|
|
#define _protect_cmpxchg_local(pcp, o, n) \
|
|
|
({ \
|
|
|
typeof(*raw_cpu_ptr(&(pcp))) __ret; \
|
|
|
@@ -227,9 +225,4 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
|
|
|
__ret; \
|
|
|
})
|
|
|
|
|
|
-#define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n))
|
|
|
-#define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n))
|
|
|
-
|
|
|
-#define cmpxchg64_relaxed(ptr,o,n) cmpxchg_local((ptr),(o),(n))
|
|
|
-
|
|
|
#endif /* __ASM_CMPXCHG_H */
|