atomic_lse.h 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391
  1. /*
  2. * Based on arch/arm/include/asm/atomic.h
  3. *
  4. * Copyright (C) 1996 Russell King.
  5. * Copyright (C) 2002 Deep Blue Solutions Ltd.
  6. * Copyright (C) 2012 ARM Ltd.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  19. */
  20. #ifndef __ASM_ATOMIC_LSE_H
  21. #define __ASM_ATOMIC_LSE_H
  22. #ifndef __ARM64_IN_ATOMIC_IMPL
  23. #error "please don't include this file directly"
  24. #endif
  25. #define __LL_SC_ATOMIC(op) __LL_SC_CALL(atomic_##op)
  26. static inline void atomic_andnot(int i, atomic_t *v)
  27. {
  28. register int w0 asm ("w0") = i;
  29. register atomic_t *x1 asm ("x1") = v;
  30. asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(andnot),
  31. " stclr %w[i], %[v]\n")
  32. : [i] "+r" (w0), [v] "+Q" (v->counter)
  33. : "r" (x1)
  34. : "x30");
  35. }
  36. static inline void atomic_or(int i, atomic_t *v)
  37. {
  38. register int w0 asm ("w0") = i;
  39. register atomic_t *x1 asm ("x1") = v;
  40. asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(or),
  41. " stset %w[i], %[v]\n")
  42. : [i] "+r" (w0), [v] "+Q" (v->counter)
  43. : "r" (x1)
  44. : "x30");
  45. }
  46. static inline void atomic_xor(int i, atomic_t *v)
  47. {
  48. register int w0 asm ("w0") = i;
  49. register atomic_t *x1 asm ("x1") = v;
  50. asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(xor),
  51. " steor %w[i], %[v]\n")
  52. : [i] "+r" (w0), [v] "+Q" (v->counter)
  53. : "r" (x1)
  54. : "x30");
  55. }
  56. static inline void atomic_add(int i, atomic_t *v)
  57. {
  58. register int w0 asm ("w0") = i;
  59. register atomic_t *x1 asm ("x1") = v;
  60. asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(add),
  61. " stadd %w[i], %[v]\n")
  62. : [i] "+r" (w0), [v] "+Q" (v->counter)
  63. : "r" (x1)
  64. : "x30");
  65. }
  66. static inline int atomic_add_return(int i, atomic_t *v)
  67. {
  68. register int w0 asm ("w0") = i;
  69. register atomic_t *x1 asm ("x1") = v;
  70. asm volatile(ARM64_LSE_ATOMIC_INSN(
  71. /* LL/SC */
  72. " nop\n"
  73. __LL_SC_ATOMIC(add_return),
  74. /* LSE atomics */
  75. " ldaddal %w[i], w30, %[v]\n"
  76. " add %w[i], %w[i], w30")
  77. : [i] "+r" (w0), [v] "+Q" (v->counter)
  78. : "r" (x1)
  79. : "x30", "memory");
  80. return w0;
  81. }
  82. static inline void atomic_and(int i, atomic_t *v)
  83. {
  84. register int w0 asm ("w0") = i;
  85. register atomic_t *x1 asm ("x1") = v;
  86. asm volatile(ARM64_LSE_ATOMIC_INSN(
  87. /* LL/SC */
  88. " nop\n"
  89. __LL_SC_ATOMIC(and),
  90. /* LSE atomics */
  91. " mvn %w[i], %w[i]\n"
  92. " stclr %w[i], %[v]")
  93. : [i] "+r" (w0), [v] "+Q" (v->counter)
  94. : "r" (x1)
  95. : "x30");
  96. }
  97. static inline void atomic_sub(int i, atomic_t *v)
  98. {
  99. register int w0 asm ("w0") = i;
  100. register atomic_t *x1 asm ("x1") = v;
  101. asm volatile(ARM64_LSE_ATOMIC_INSN(
  102. /* LL/SC */
  103. " nop\n"
  104. __LL_SC_ATOMIC(sub),
  105. /* LSE atomics */
  106. " neg %w[i], %w[i]\n"
  107. " stadd %w[i], %[v]")
  108. : [i] "+r" (w0), [v] "+Q" (v->counter)
  109. : "r" (x1)
  110. : "x30");
  111. }
  112. static inline int atomic_sub_return(int i, atomic_t *v)
  113. {
  114. register int w0 asm ("w0") = i;
  115. register atomic_t *x1 asm ("x1") = v;
  116. asm volatile(ARM64_LSE_ATOMIC_INSN(
  117. /* LL/SC */
  118. " nop\n"
  119. __LL_SC_ATOMIC(sub_return)
  120. " nop",
  121. /* LSE atomics */
  122. " neg %w[i], %w[i]\n"
  123. " ldaddal %w[i], w30, %[v]\n"
  124. " add %w[i], %w[i], w30")
  125. : [i] "+r" (w0), [v] "+Q" (v->counter)
  126. : "r" (x1)
  127. : "x30", "memory");
  128. return w0;
  129. }
  130. #undef __LL_SC_ATOMIC
  131. #define __LL_SC_ATOMIC64(op) __LL_SC_CALL(atomic64_##op)
  132. static inline void atomic64_andnot(long i, atomic64_t *v)
  133. {
  134. register long x0 asm ("x0") = i;
  135. register atomic64_t *x1 asm ("x1") = v;
  136. asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(andnot),
  137. " stclr %[i], %[v]\n")
  138. : [i] "+r" (x0), [v] "+Q" (v->counter)
  139. : "r" (x1)
  140. : "x30");
  141. }
  142. static inline void atomic64_or(long i, atomic64_t *v)
  143. {
  144. register long x0 asm ("x0") = i;
  145. register atomic64_t *x1 asm ("x1") = v;
  146. asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(or),
  147. " stset %[i], %[v]\n")
  148. : [i] "+r" (x0), [v] "+Q" (v->counter)
  149. : "r" (x1)
  150. : "x30");
  151. }
  152. static inline void atomic64_xor(long i, atomic64_t *v)
  153. {
  154. register long x0 asm ("x0") = i;
  155. register atomic64_t *x1 asm ("x1") = v;
  156. asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(xor),
  157. " steor %[i], %[v]\n")
  158. : [i] "+r" (x0), [v] "+Q" (v->counter)
  159. : "r" (x1)
  160. : "x30");
  161. }
  162. static inline void atomic64_add(long i, atomic64_t *v)
  163. {
  164. register long x0 asm ("x0") = i;
  165. register atomic64_t *x1 asm ("x1") = v;
  166. asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(add),
  167. " stadd %[i], %[v]\n")
  168. : [i] "+r" (x0), [v] "+Q" (v->counter)
  169. : "r" (x1)
  170. : "x30");
  171. }
  172. static inline long atomic64_add_return(long i, atomic64_t *v)
  173. {
  174. register long x0 asm ("x0") = i;
  175. register atomic64_t *x1 asm ("x1") = v;
  176. asm volatile(ARM64_LSE_ATOMIC_INSN(
  177. /* LL/SC */
  178. " nop\n"
  179. __LL_SC_ATOMIC64(add_return),
  180. /* LSE atomics */
  181. " ldaddal %[i], x30, %[v]\n"
  182. " add %[i], %[i], x30")
  183. : [i] "+r" (x0), [v] "+Q" (v->counter)
  184. : "r" (x1)
  185. : "x30", "memory");
  186. return x0;
  187. }
  188. static inline void atomic64_and(long i, atomic64_t *v)
  189. {
  190. register long x0 asm ("x0") = i;
  191. register atomic64_t *x1 asm ("x1") = v;
  192. asm volatile(ARM64_LSE_ATOMIC_INSN(
  193. /* LL/SC */
  194. " nop\n"
  195. __LL_SC_ATOMIC64(and),
  196. /* LSE atomics */
  197. " mvn %[i], %[i]\n"
  198. " stclr %[i], %[v]")
  199. : [i] "+r" (x0), [v] "+Q" (v->counter)
  200. : "r" (x1)
  201. : "x30");
  202. }
  203. static inline void atomic64_sub(long i, atomic64_t *v)
  204. {
  205. register long x0 asm ("x0") = i;
  206. register atomic64_t *x1 asm ("x1") = v;
  207. asm volatile(ARM64_LSE_ATOMIC_INSN(
  208. /* LL/SC */
  209. " nop\n"
  210. __LL_SC_ATOMIC64(sub),
  211. /* LSE atomics */
  212. " neg %[i], %[i]\n"
  213. " stadd %[i], %[v]")
  214. : [i] "+r" (x0), [v] "+Q" (v->counter)
  215. : "r" (x1)
  216. : "x30");
  217. }
  218. static inline long atomic64_sub_return(long i, atomic64_t *v)
  219. {
  220. register long x0 asm ("x0") = i;
  221. register atomic64_t *x1 asm ("x1") = v;
  222. asm volatile(ARM64_LSE_ATOMIC_INSN(
  223. /* LL/SC */
  224. " nop\n"
  225. __LL_SC_ATOMIC64(sub_return)
  226. " nop",
  227. /* LSE atomics */
  228. " neg %[i], %[i]\n"
  229. " ldaddal %[i], x30, %[v]\n"
  230. " add %[i], %[i], x30")
  231. : [i] "+r" (x0), [v] "+Q" (v->counter)
  232. : "r" (x1)
  233. : "x30", "memory");
  234. return x0;
  235. }
  236. static inline long atomic64_dec_if_positive(atomic64_t *v)
  237. {
  238. register long x0 asm ("x0") = (long)v;
  239. asm volatile(ARM64_LSE_ATOMIC_INSN(
  240. /* LL/SC */
  241. " nop\n"
  242. __LL_SC_ATOMIC64(dec_if_positive)
  243. " nop\n"
  244. " nop\n"
  245. " nop\n"
  246. " nop\n"
  247. " nop",
  248. /* LSE atomics */
  249. "1: ldr x30, %[v]\n"
  250. " subs %[ret], x30, #1\n"
  251. " b.lt 2f\n"
  252. " casal x30, %[ret], %[v]\n"
  253. " sub x30, x30, #1\n"
  254. " sub x30, x30, %[ret]\n"
  255. " cbnz x30, 1b\n"
  256. "2:")
  257. : [ret] "+&r" (x0), [v] "+Q" (v->counter)
  258. :
  259. : "x30", "cc", "memory");
  260. return x0;
  261. }
  262. #undef __LL_SC_ATOMIC64
  263. #define __LL_SC_CMPXCHG(op) __LL_SC_CALL(__cmpxchg_case_##op)
  264. #define __CMPXCHG_CASE(w, sz, name, mb, cl...) \
  265. static inline unsigned long __cmpxchg_case_##name(volatile void *ptr, \
  266. unsigned long old, \
  267. unsigned long new) \
  268. { \
  269. register unsigned long x0 asm ("x0") = (unsigned long)ptr; \
  270. register unsigned long x1 asm ("x1") = old; \
  271. register unsigned long x2 asm ("x2") = new; \
  272. \
  273. asm volatile(ARM64_LSE_ATOMIC_INSN( \
  274. /* LL/SC */ \
  275. " nop\n" \
  276. __LL_SC_CMPXCHG(name) \
  277. " nop", \
  278. /* LSE atomics */ \
  279. " mov " #w "30, %" #w "[old]\n" \
  280. " cas" #mb #sz "\t" #w "30, %" #w "[new], %[v]\n" \
  281. " mov %" #w "[ret], " #w "30") \
  282. : [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr) \
  283. : [old] "r" (x1), [new] "r" (x2) \
  284. : "x30" , ##cl); \
  285. \
  286. return x0; \
  287. }
  288. __CMPXCHG_CASE(w, b, 1, )
  289. __CMPXCHG_CASE(w, h, 2, )
  290. __CMPXCHG_CASE(w, , 4, )
  291. __CMPXCHG_CASE(x, , 8, )
  292. __CMPXCHG_CASE(w, b, mb_1, al, "memory")
  293. __CMPXCHG_CASE(w, h, mb_2, al, "memory")
  294. __CMPXCHG_CASE(w, , mb_4, al, "memory")
  295. __CMPXCHG_CASE(x, , mb_8, al, "memory")
  296. #undef __LL_SC_CMPXCHG
  297. #undef __CMPXCHG_CASE
  298. #define __LL_SC_CMPXCHG_DBL(op) __LL_SC_CALL(__cmpxchg_double##op)
  299. #define __CMPXCHG_DBL(name, mb, cl...) \
  300. static inline int __cmpxchg_double##name(unsigned long old1, \
  301. unsigned long old2, \
  302. unsigned long new1, \
  303. unsigned long new2, \
  304. volatile void *ptr) \
  305. { \
  306. unsigned long oldval1 = old1; \
  307. unsigned long oldval2 = old2; \
  308. register unsigned long x0 asm ("x0") = old1; \
  309. register unsigned long x1 asm ("x1") = old2; \
  310. register unsigned long x2 asm ("x2") = new1; \
  311. register unsigned long x3 asm ("x3") = new2; \
  312. register unsigned long x4 asm ("x4") = (unsigned long)ptr; \
  313. \
  314. asm volatile(ARM64_LSE_ATOMIC_INSN( \
  315. /* LL/SC */ \
  316. " nop\n" \
  317. " nop\n" \
  318. " nop\n" \
  319. __LL_SC_CMPXCHG_DBL(name), \
  320. /* LSE atomics */ \
  321. " casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
  322. " eor %[old1], %[old1], %[oldval1]\n" \
  323. " eor %[old2], %[old2], %[oldval2]\n" \
  324. " orr %[old1], %[old1], %[old2]") \
  325. : [old1] "+r" (x0), [old2] "+r" (x1), \
  326. [v] "+Q" (*(unsigned long *)ptr) \
  327. : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
  328. [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \
  329. : "x30" , ##cl); \
  330. \
  331. return x0; \
  332. }
  333. __CMPXCHG_DBL( , )
  334. __CMPXCHG_DBL(_mb, al, "memory")
  335. #undef __LL_SC_CMPXCHG_DBL
  336. #undef __CMPXCHG_DBL
  337. #endif /* __ASM_ATOMIC_LSE_H */