atomic_ll_sc.h 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331
  1. /*
  2. * Based on arch/arm/include/asm/atomic.h
  3. *
  4. * Copyright (C) 1996 Russell King.
  5. * Copyright (C) 2002 Deep Blue Solutions Ltd.
  6. * Copyright (C) 2012 ARM Ltd.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  19. */
  20. #ifndef __ASM_ATOMIC_LL_SC_H
  21. #define __ASM_ATOMIC_LL_SC_H
  22. #ifndef __ARM64_IN_ATOMIC_IMPL
  23. #error "please don't include this file directly"
  24. #endif
  25. /*
  26. * AArch64 UP and SMP safe atomic ops. We use load exclusive and
  27. * store exclusive to ensure that these are atomic. We may loop
  28. * to ensure that the update happens.
  29. *
  30. * NOTE: these functions do *not* follow the PCS and must explicitly
  31. * save any clobbered registers other than x0 (regardless of return
  32. * value). This is achieved through -fcall-saved-* compiler flags for
  33. * this file, which unfortunately don't work on a per-function basis
  34. * (the optimize attribute silently ignores these options).
  35. */
  36. #define ATOMIC_OP(op, asm_op) \
  37. __LL_SC_INLINE void \
  38. __LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \
  39. { \
  40. unsigned long tmp; \
  41. int result; \
  42. \
  43. asm volatile("// atomic_" #op "\n" \
  44. " prfm pstl1strm, %2\n" \
  45. "1: ldxr %w0, %2\n" \
  46. " " #asm_op " %w0, %w0, %w3\n" \
  47. " stxr %w1, %w0, %2\n" \
  48. " cbnz %w1, 1b" \
  49. : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
  50. : "Ir" (i)); \
  51. } \
  52. __LL_SC_EXPORT(atomic_##op);
  53. #define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \
  54. __LL_SC_INLINE int \
  55. __LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \
  56. { \
  57. unsigned long tmp; \
  58. int result; \
  59. \
  60. asm volatile("// atomic_" #op "_return" #name "\n" \
  61. " prfm pstl1strm, %2\n" \
  62. "1: ld" #acq "xr %w0, %2\n" \
  63. " " #asm_op " %w0, %w0, %w3\n" \
  64. " st" #rel "xr %w1, %w0, %2\n" \
  65. " cbnz %w1, 1b\n" \
  66. " " #mb \
  67. : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
  68. : "Ir" (i) \
  69. : cl); \
  70. \
  71. return result; \
  72. } \
  73. __LL_SC_EXPORT(atomic_##op##_return##name);
  74. #define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \
  75. __LL_SC_INLINE int \
  76. __LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v)) \
  77. { \
  78. unsigned long tmp; \
  79. int val, result; \
  80. \
  81. asm volatile("// atomic_fetch_" #op #name "\n" \
  82. " prfm pstl1strm, %3\n" \
  83. "1: ld" #acq "xr %w0, %3\n" \
  84. " " #asm_op " %w1, %w0, %w4\n" \
  85. " st" #rel "xr %w2, %w1, %3\n" \
  86. " cbnz %w2, 1b\n" \
  87. " " #mb \
  88. : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
  89. : "Ir" (i) \
  90. : cl); \
  91. \
  92. return result; \
  93. } \
  94. __LL_SC_EXPORT(atomic_fetch_##op##name);
  95. #define ATOMIC_OPS(...) \
  96. ATOMIC_OP(__VA_ARGS__) \
  97. ATOMIC_OP_RETURN( , dmb ish, , l, "memory", __VA_ARGS__)\
  98. ATOMIC_OP_RETURN(_relaxed, , , , , __VA_ARGS__)\
  99. ATOMIC_OP_RETURN(_acquire, , a, , "memory", __VA_ARGS__)\
  100. ATOMIC_OP_RETURN(_release, , , l, "memory", __VA_ARGS__)\
  101. ATOMIC_FETCH_OP ( , dmb ish, , l, "memory", __VA_ARGS__)\
  102. ATOMIC_FETCH_OP (_relaxed, , , , , __VA_ARGS__)\
  103. ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\
  104. ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__)
  105. ATOMIC_OPS(add, add)
  106. ATOMIC_OPS(sub, sub)
  107. #undef ATOMIC_OPS
  108. #define ATOMIC_OPS(...) \
  109. ATOMIC_OP(__VA_ARGS__) \
  110. ATOMIC_FETCH_OP ( , dmb ish, , l, "memory", __VA_ARGS__)\
  111. ATOMIC_FETCH_OP (_relaxed, , , , , __VA_ARGS__)\
  112. ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\
  113. ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__)
  114. ATOMIC_OPS(and, and)
  115. ATOMIC_OPS(andnot, bic)
  116. ATOMIC_OPS(or, orr)
  117. ATOMIC_OPS(xor, eor)
  118. #undef ATOMIC_OPS
  119. #undef ATOMIC_FETCH_OP
  120. #undef ATOMIC_OP_RETURN
  121. #undef ATOMIC_OP
  122. #define ATOMIC64_OP(op, asm_op) \
  123. __LL_SC_INLINE void \
  124. __LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \
  125. { \
  126. long result; \
  127. unsigned long tmp; \
  128. \
  129. asm volatile("// atomic64_" #op "\n" \
  130. " prfm pstl1strm, %2\n" \
  131. "1: ldxr %0, %2\n" \
  132. " " #asm_op " %0, %0, %3\n" \
  133. " stxr %w1, %0, %2\n" \
  134. " cbnz %w1, 1b" \
  135. : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
  136. : "Ir" (i)); \
  137. } \
  138. __LL_SC_EXPORT(atomic64_##op);
  139. #define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \
  140. __LL_SC_INLINE long \
  141. __LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \
  142. { \
  143. long result; \
  144. unsigned long tmp; \
  145. \
  146. asm volatile("// atomic64_" #op "_return" #name "\n" \
  147. " prfm pstl1strm, %2\n" \
  148. "1: ld" #acq "xr %0, %2\n" \
  149. " " #asm_op " %0, %0, %3\n" \
  150. " st" #rel "xr %w1, %0, %2\n" \
  151. " cbnz %w1, 1b\n" \
  152. " " #mb \
  153. : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
  154. : "Ir" (i) \
  155. : cl); \
  156. \
  157. return result; \
  158. } \
  159. __LL_SC_EXPORT(atomic64_##op##_return##name);
  160. #define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \
  161. __LL_SC_INLINE long \
  162. __LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v)) \
  163. { \
  164. long result, val; \
  165. unsigned long tmp; \
  166. \
  167. asm volatile("// atomic64_fetch_" #op #name "\n" \
  168. " prfm pstl1strm, %3\n" \
  169. "1: ld" #acq "xr %0, %3\n" \
  170. " " #asm_op " %1, %0, %4\n" \
  171. " st" #rel "xr %w2, %1, %3\n" \
  172. " cbnz %w2, 1b\n" \
  173. " " #mb \
  174. : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
  175. : "Ir" (i) \
  176. : cl); \
  177. \
  178. return result; \
  179. } \
  180. __LL_SC_EXPORT(atomic64_fetch_##op##name);
  181. #define ATOMIC64_OPS(...) \
  182. ATOMIC64_OP(__VA_ARGS__) \
  183. ATOMIC64_OP_RETURN(, dmb ish, , l, "memory", __VA_ARGS__) \
  184. ATOMIC64_OP_RETURN(_relaxed,, , , , __VA_ARGS__) \
  185. ATOMIC64_OP_RETURN(_acquire,, a, , "memory", __VA_ARGS__) \
  186. ATOMIC64_OP_RETURN(_release,, , l, "memory", __VA_ARGS__) \
  187. ATOMIC64_FETCH_OP (, dmb ish, , l, "memory", __VA_ARGS__) \
  188. ATOMIC64_FETCH_OP (_relaxed,, , , , __VA_ARGS__) \
  189. ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \
  190. ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__)
  191. ATOMIC64_OPS(add, add)
  192. ATOMIC64_OPS(sub, sub)
  193. #undef ATOMIC64_OPS
  194. #define ATOMIC64_OPS(...) \
  195. ATOMIC64_OP(__VA_ARGS__) \
  196. ATOMIC64_FETCH_OP (, dmb ish, , l, "memory", __VA_ARGS__) \
  197. ATOMIC64_FETCH_OP (_relaxed,, , , , __VA_ARGS__) \
  198. ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \
  199. ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__)
  200. ATOMIC64_OPS(and, and)
  201. ATOMIC64_OPS(andnot, bic)
  202. ATOMIC64_OPS(or, orr)
  203. ATOMIC64_OPS(xor, eor)
  204. #undef ATOMIC64_OPS
  205. #undef ATOMIC64_FETCH_OP
  206. #undef ATOMIC64_OP_RETURN
  207. #undef ATOMIC64_OP
  208. __LL_SC_INLINE long
  209. __LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v))
  210. {
  211. long result;
  212. unsigned long tmp;
  213. asm volatile("// atomic64_dec_if_positive\n"
  214. " prfm pstl1strm, %2\n"
  215. "1: ldxr %0, %2\n"
  216. " subs %0, %0, #1\n"
  217. " b.lt 2f\n"
  218. " stlxr %w1, %0, %2\n"
  219. " cbnz %w1, 1b\n"
  220. " dmb ish\n"
  221. "2:"
  222. : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
  223. :
  224. : "cc", "memory");
  225. return result;
  226. }
  227. __LL_SC_EXPORT(atomic64_dec_if_positive);
  228. #define __CMPXCHG_CASE(w, sz, name, mb, acq, rel, cl) \
  229. __LL_SC_INLINE unsigned long \
  230. __LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr, \
  231. unsigned long old, \
  232. unsigned long new)) \
  233. { \
  234. unsigned long tmp, oldval; \
  235. \
  236. asm volatile( \
  237. " prfm pstl1strm, %[v]\n" \
  238. "1: ld" #acq "xr" #sz "\t%" #w "[oldval], %[v]\n" \
  239. " eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \
  240. " cbnz %" #w "[tmp], 2f\n" \
  241. " st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n" \
  242. " cbnz %w[tmp], 1b\n" \
  243. " " #mb "\n" \
  244. " mov %" #w "[oldval], %" #w "[old]\n" \
  245. "2:" \
  246. : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
  247. [v] "+Q" (*(unsigned long *)ptr) \
  248. : [old] "Lr" (old), [new] "r" (new) \
  249. : cl); \
  250. \
  251. return oldval; \
  252. } \
  253. __LL_SC_EXPORT(__cmpxchg_case_##name);
  254. __CMPXCHG_CASE(w, b, 1, , , , )
  255. __CMPXCHG_CASE(w, h, 2, , , , )
  256. __CMPXCHG_CASE(w, , 4, , , , )
  257. __CMPXCHG_CASE( , , 8, , , , )
  258. __CMPXCHG_CASE(w, b, acq_1, , a, , "memory")
  259. __CMPXCHG_CASE(w, h, acq_2, , a, , "memory")
  260. __CMPXCHG_CASE(w, , acq_4, , a, , "memory")
  261. __CMPXCHG_CASE( , , acq_8, , a, , "memory")
  262. __CMPXCHG_CASE(w, b, rel_1, , , l, "memory")
  263. __CMPXCHG_CASE(w, h, rel_2, , , l, "memory")
  264. __CMPXCHG_CASE(w, , rel_4, , , l, "memory")
  265. __CMPXCHG_CASE( , , rel_8, , , l, "memory")
  266. __CMPXCHG_CASE(w, b, mb_1, dmb ish, , l, "memory")
  267. __CMPXCHG_CASE(w, h, mb_2, dmb ish, , l, "memory")
  268. __CMPXCHG_CASE(w, , mb_4, dmb ish, , l, "memory")
  269. __CMPXCHG_CASE( , , mb_8, dmb ish, , l, "memory")
  270. #undef __CMPXCHG_CASE
  271. #define __CMPXCHG_DBL(name, mb, rel, cl) \
  272. __LL_SC_INLINE long \
  273. __LL_SC_PREFIX(__cmpxchg_double##name(unsigned long old1, \
  274. unsigned long old2, \
  275. unsigned long new1, \
  276. unsigned long new2, \
  277. volatile void *ptr)) \
  278. { \
  279. unsigned long tmp, ret; \
  280. \
  281. asm volatile("// __cmpxchg_double" #name "\n" \
  282. " prfm pstl1strm, %2\n" \
  283. "1: ldxp %0, %1, %2\n" \
  284. " eor %0, %0, %3\n" \
  285. " eor %1, %1, %4\n" \
  286. " orr %1, %0, %1\n" \
  287. " cbnz %1, 2f\n" \
  288. " st" #rel "xp %w0, %5, %6, %2\n" \
  289. " cbnz %w0, 1b\n" \
  290. " " #mb "\n" \
  291. "2:" \
  292. : "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr) \
  293. : "r" (old1), "r" (old2), "r" (new1), "r" (new2) \
  294. : cl); \
  295. \
  296. return ret; \
  297. } \
  298. __LL_SC_EXPORT(__cmpxchg_double##name);
  299. __CMPXCHG_DBL( , , , )
  300. __CMPXCHG_DBL(_mb, dmb ish, l, "memory")
  301. #undef __CMPXCHG_DBL
  302. #endif /* __ASM_ATOMIC_LL_SC_H */