atomic_ll_sc.h 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247
  1. /*
  2. * Based on arch/arm/include/asm/atomic.h
  3. *
  4. * Copyright (C) 1996 Russell King.
  5. * Copyright (C) 2002 Deep Blue Solutions Ltd.
  6. * Copyright (C) 2012 ARM Ltd.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  19. */
  20. #ifndef __ASM_ATOMIC_LL_SC_H
  21. #define __ASM_ATOMIC_LL_SC_H
  22. #ifndef __ARM64_IN_ATOMIC_IMPL
  23. #error "please don't include this file directly"
  24. #endif
  25. /*
  26. * AArch64 UP and SMP safe atomic ops. We use load exclusive and
  27. * store exclusive to ensure that these are atomic. We may loop
  28. * to ensure that the update happens.
  29. *
  30. * NOTE: these functions do *not* follow the PCS and must explicitly
  31. * save any clobbered registers other than x0 (regardless of return
  32. * value). This is achieved through -fcall-saved-* compiler flags for
  33. * this file, which unfortunately don't work on a per-function basis
  34. * (the optimize attribute silently ignores these options).
  35. */
  36. #define ATOMIC_OP(op, asm_op) \
  37. __LL_SC_INLINE void \
  38. __LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \
  39. { \
  40. unsigned long tmp; \
  41. int result; \
  42. \
  43. asm volatile("// atomic_" #op "\n" \
  44. " prfm pstl1strm, %2\n" \
  45. "1: ldxr %w0, %2\n" \
  46. " " #asm_op " %w0, %w0, %w3\n" \
  47. " stxr %w1, %w0, %2\n" \
  48. " cbnz %w1, 1b" \
  49. : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
  50. : "Ir" (i)); \
  51. } \
  52. __LL_SC_EXPORT(atomic_##op);
  53. #define ATOMIC_OP_RETURN(op, asm_op) \
  54. __LL_SC_INLINE int \
  55. __LL_SC_PREFIX(atomic_##op##_return(int i, atomic_t *v)) \
  56. { \
  57. unsigned long tmp; \
  58. int result; \
  59. \
  60. asm volatile("// atomic_" #op "_return\n" \
  61. " prfm pstl1strm, %2\n" \
  62. "1: ldxr %w0, %2\n" \
  63. " " #asm_op " %w0, %w0, %w3\n" \
  64. " stlxr %w1, %w0, %2\n" \
  65. " cbnz %w1, 1b" \
  66. : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
  67. : "Ir" (i) \
  68. : "memory"); \
  69. \
  70. smp_mb(); \
  71. return result; \
  72. } \
  73. __LL_SC_EXPORT(atomic_##op##_return);
  74. #define ATOMIC_OPS(op, asm_op) \
  75. ATOMIC_OP(op, asm_op) \
  76. ATOMIC_OP_RETURN(op, asm_op)
  77. ATOMIC_OPS(add, add)
  78. ATOMIC_OPS(sub, sub)
  79. ATOMIC_OP(and, and)
  80. ATOMIC_OP(andnot, bic)
  81. ATOMIC_OP(or, orr)
  82. ATOMIC_OP(xor, eor)
  83. #undef ATOMIC_OPS
  84. #undef ATOMIC_OP_RETURN
  85. #undef ATOMIC_OP
  86. #define ATOMIC64_OP(op, asm_op) \
  87. __LL_SC_INLINE void \
  88. __LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \
  89. { \
  90. long result; \
  91. unsigned long tmp; \
  92. \
  93. asm volatile("// atomic64_" #op "\n" \
  94. " prfm pstl1strm, %2\n" \
  95. "1: ldxr %0, %2\n" \
  96. " " #asm_op " %0, %0, %3\n" \
  97. " stxr %w1, %0, %2\n" \
  98. " cbnz %w1, 1b" \
  99. : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
  100. : "Ir" (i)); \
  101. } \
  102. __LL_SC_EXPORT(atomic64_##op);
  103. #define ATOMIC64_OP_RETURN(op, asm_op) \
  104. __LL_SC_INLINE long \
  105. __LL_SC_PREFIX(atomic64_##op##_return(long i, atomic64_t *v)) \
  106. { \
  107. long result; \
  108. unsigned long tmp; \
  109. \
  110. asm volatile("// atomic64_" #op "_return\n" \
  111. " prfm pstl1strm, %2\n" \
  112. "1: ldxr %0, %2\n" \
  113. " " #asm_op " %0, %0, %3\n" \
  114. " stlxr %w1, %0, %2\n" \
  115. " cbnz %w1, 1b" \
  116. : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
  117. : "Ir" (i) \
  118. : "memory"); \
  119. \
  120. smp_mb(); \
  121. return result; \
  122. } \
  123. __LL_SC_EXPORT(atomic64_##op##_return);
  124. #define ATOMIC64_OPS(op, asm_op) \
  125. ATOMIC64_OP(op, asm_op) \
  126. ATOMIC64_OP_RETURN(op, asm_op)
  127. ATOMIC64_OPS(add, add)
  128. ATOMIC64_OPS(sub, sub)
  129. ATOMIC64_OP(and, and)
  130. ATOMIC64_OP(andnot, bic)
  131. ATOMIC64_OP(or, orr)
  132. ATOMIC64_OP(xor, eor)
  133. #undef ATOMIC64_OPS
  134. #undef ATOMIC64_OP_RETURN
  135. #undef ATOMIC64_OP
  136. __LL_SC_INLINE long
  137. __LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v))
  138. {
  139. long result;
  140. unsigned long tmp;
  141. asm volatile("// atomic64_dec_if_positive\n"
  142. " prfm pstl1strm, %2\n"
  143. "1: ldxr %0, %2\n"
  144. " subs %0, %0, #1\n"
  145. " b.lt 2f\n"
  146. " stlxr %w1, %0, %2\n"
  147. " cbnz %w1, 1b\n"
  148. " dmb ish\n"
  149. "2:"
  150. : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
  151. :
  152. : "cc", "memory");
  153. return result;
  154. }
  155. __LL_SC_EXPORT(atomic64_dec_if_positive);
  156. #define __CMPXCHG_CASE(w, sz, name, mb, rel, cl) \
  157. __LL_SC_INLINE unsigned long \
  158. __LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr, \
  159. unsigned long old, \
  160. unsigned long new)) \
  161. { \
  162. unsigned long tmp, oldval; \
  163. \
  164. asm volatile( \
  165. " prfm pstl1strm, %[v]\n" \
  166. "1: ldxr" #sz "\t%" #w "[oldval], %[v]\n" \
  167. " eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \
  168. " cbnz %" #w "[tmp], 2f\n" \
  169. " st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n" \
  170. " cbnz %w[tmp], 1b\n" \
  171. " " #mb "\n" \
  172. " mov %" #w "[oldval], %" #w "[old]\n" \
  173. "2:" \
  174. : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
  175. [v] "+Q" (*(unsigned long *)ptr) \
  176. : [old] "Lr" (old), [new] "r" (new) \
  177. : cl); \
  178. \
  179. return oldval; \
  180. } \
  181. __LL_SC_EXPORT(__cmpxchg_case_##name);
  182. __CMPXCHG_CASE(w, b, 1, , , )
  183. __CMPXCHG_CASE(w, h, 2, , , )
  184. __CMPXCHG_CASE(w, , 4, , , )
  185. __CMPXCHG_CASE( , , 8, , , )
  186. __CMPXCHG_CASE(w, b, mb_1, dmb ish, l, "memory")
  187. __CMPXCHG_CASE(w, h, mb_2, dmb ish, l, "memory")
  188. __CMPXCHG_CASE(w, , mb_4, dmb ish, l, "memory")
  189. __CMPXCHG_CASE( , , mb_8, dmb ish, l, "memory")
  190. #undef __CMPXCHG_CASE
  191. #define __CMPXCHG_DBL(name, mb, rel, cl) \
  192. __LL_SC_INLINE int \
  193. __LL_SC_PREFIX(__cmpxchg_double##name(unsigned long old1, \
  194. unsigned long old2, \
  195. unsigned long new1, \
  196. unsigned long new2, \
  197. volatile void *ptr)) \
  198. { \
  199. unsigned long tmp, ret; \
  200. \
  201. asm volatile("// __cmpxchg_double" #name "\n" \
  202. " prfm pstl1strm, %2\n" \
  203. "1: ldxp %0, %1, %2\n" \
  204. " eor %0, %0, %3\n" \
  205. " eor %1, %1, %4\n" \
  206. " orr %1, %0, %1\n" \
  207. " cbnz %1, 2f\n" \
  208. " st" #rel "xp %w0, %5, %6, %2\n" \
  209. " cbnz %w0, 1b\n" \
  210. " " #mb "\n" \
  211. "2:" \
  212. : "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr) \
  213. : "r" (old1), "r" (old2), "r" (new1), "r" (new2) \
  214. : cl); \
  215. \
  216. return ret; \
  217. } \
  218. __LL_SC_EXPORT(__cmpxchg_double##name);
  219. __CMPXCHG_DBL( , , , )
  220. __CMPXCHG_DBL(_mb, dmb ish, l, "memory")
  221. #undef __CMPXCHG_DBL
  222. #endif /* __ASM_ATOMIC_LL_SC_H */