atomic_lse.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544
  1. /*
  2. * Based on arch/arm/include/asm/atomic.h
  3. *
  4. * Copyright (C) 1996 Russell King.
  5. * Copyright (C) 2002 Deep Blue Solutions Ltd.
  6. * Copyright (C) 2012 ARM Ltd.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  19. */
  20. #ifndef __ASM_ATOMIC_LSE_H
  21. #define __ASM_ATOMIC_LSE_H
  22. #ifndef __ARM64_IN_ATOMIC_IMPL
  23. #error "please don't include this file directly"
  24. #endif
  25. #define __LL_SC_ATOMIC(op) __LL_SC_CALL(atomic_##op)
  26. #define ATOMIC_OP(op, asm_op) \
  27. static inline void atomic_##op(int i, atomic_t *v) \
  28. { \
  29. register int w0 asm ("w0") = i; \
  30. register atomic_t *x1 asm ("x1") = v; \
  31. \
  32. asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(op), \
  33. " " #asm_op " %w[i], %[v]\n") \
  34. : [i] "+r" (w0), [v] "+Q" (v->counter) \
  35. : "r" (x1) \
  36. : __LL_SC_CLOBBERS); \
  37. }
  38. ATOMIC_OP(andnot, stclr)
  39. ATOMIC_OP(or, stset)
  40. ATOMIC_OP(xor, steor)
  41. ATOMIC_OP(add, stadd)
  42. #undef ATOMIC_OP
  43. #define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \
  44. static inline int atomic_fetch_##op##name(int i, atomic_t *v) \
  45. { \
  46. register int w0 asm ("w0") = i; \
  47. register atomic_t *x1 asm ("x1") = v; \
  48. \
  49. asm volatile(ARM64_LSE_ATOMIC_INSN( \
  50. /* LL/SC */ \
  51. __LL_SC_ATOMIC(fetch_##op##name), \
  52. /* LSE atomics */ \
  53. " " #asm_op #mb " %w[i], %w[i], %[v]") \
  54. : [i] "+r" (w0), [v] "+Q" (v->counter) \
  55. : "r" (x1) \
  56. : __LL_SC_CLOBBERS, ##cl); \
  57. \
  58. return w0; \
  59. }
  60. #define ATOMIC_FETCH_OPS(op, asm_op) \
  61. ATOMIC_FETCH_OP(_relaxed, , op, asm_op) \
  62. ATOMIC_FETCH_OP(_acquire, a, op, asm_op, "memory") \
  63. ATOMIC_FETCH_OP(_release, l, op, asm_op, "memory") \
  64. ATOMIC_FETCH_OP( , al, op, asm_op, "memory")
  65. ATOMIC_FETCH_OPS(andnot, ldclr)
  66. ATOMIC_FETCH_OPS(or, ldset)
  67. ATOMIC_FETCH_OPS(xor, ldeor)
  68. ATOMIC_FETCH_OPS(add, ldadd)
  69. #undef ATOMIC_FETCH_OP
  70. #undef ATOMIC_FETCH_OPS
  71. #define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \
  72. static inline int atomic_add_return##name(int i, atomic_t *v) \
  73. { \
  74. register int w0 asm ("w0") = i; \
  75. register atomic_t *x1 asm ("x1") = v; \
  76. \
  77. asm volatile(ARM64_LSE_ATOMIC_INSN( \
  78. /* LL/SC */ \
  79. " nop\n" \
  80. __LL_SC_ATOMIC(add_return##name), \
  81. /* LSE atomics */ \
  82. " ldadd" #mb " %w[i], w30, %[v]\n" \
  83. " add %w[i], %w[i], w30") \
  84. : [i] "+r" (w0), [v] "+Q" (v->counter) \
  85. : "r" (x1) \
  86. : __LL_SC_CLOBBERS, ##cl); \
  87. \
  88. return w0; \
  89. }
  90. ATOMIC_OP_ADD_RETURN(_relaxed, )
  91. ATOMIC_OP_ADD_RETURN(_acquire, a, "memory")
  92. ATOMIC_OP_ADD_RETURN(_release, l, "memory")
  93. ATOMIC_OP_ADD_RETURN( , al, "memory")
  94. #undef ATOMIC_OP_ADD_RETURN
  95. static inline void atomic_and(int i, atomic_t *v)
  96. {
  97. register int w0 asm ("w0") = i;
  98. register atomic_t *x1 asm ("x1") = v;
  99. asm volatile(ARM64_LSE_ATOMIC_INSN(
  100. /* LL/SC */
  101. " nop\n"
  102. __LL_SC_ATOMIC(and),
  103. /* LSE atomics */
  104. " mvn %w[i], %w[i]\n"
  105. " stclr %w[i], %[v]")
  106. : [i] "+r" (w0), [v] "+Q" (v->counter)
  107. : "r" (x1)
  108. : __LL_SC_CLOBBERS);
  109. }
  110. #define ATOMIC_FETCH_OP_AND(name, mb, cl...) \
  111. static inline int atomic_fetch_and##name(int i, atomic_t *v) \
  112. { \
  113. register int w0 asm ("w0") = i; \
  114. register atomic_t *x1 asm ("x1") = v; \
  115. \
  116. asm volatile(ARM64_LSE_ATOMIC_INSN( \
  117. /* LL/SC */ \
  118. " nop\n" \
  119. __LL_SC_ATOMIC(fetch_and##name), \
  120. /* LSE atomics */ \
  121. " mvn %w[i], %w[i]\n" \
  122. " ldclr" #mb " %w[i], %w[i], %[v]") \
  123. : [i] "+r" (w0), [v] "+Q" (v->counter) \
  124. : "r" (x1) \
  125. : __LL_SC_CLOBBERS, ##cl); \
  126. \
  127. return w0; \
  128. }
  129. ATOMIC_FETCH_OP_AND(_relaxed, )
  130. ATOMIC_FETCH_OP_AND(_acquire, a, "memory")
  131. ATOMIC_FETCH_OP_AND(_release, l, "memory")
  132. ATOMIC_FETCH_OP_AND( , al, "memory")
  133. #undef ATOMIC_FETCH_OP_AND
  134. static inline void atomic_sub(int i, atomic_t *v)
  135. {
  136. register int w0 asm ("w0") = i;
  137. register atomic_t *x1 asm ("x1") = v;
  138. asm volatile(ARM64_LSE_ATOMIC_INSN(
  139. /* LL/SC */
  140. " nop\n"
  141. __LL_SC_ATOMIC(sub),
  142. /* LSE atomics */
  143. " neg %w[i], %w[i]\n"
  144. " stadd %w[i], %[v]")
  145. : [i] "+r" (w0), [v] "+Q" (v->counter)
  146. : "r" (x1)
  147. : __LL_SC_CLOBBERS);
  148. }
  149. #define ATOMIC_OP_SUB_RETURN(name, mb, cl...) \
  150. static inline int atomic_sub_return##name(int i, atomic_t *v) \
  151. { \
  152. register int w0 asm ("w0") = i; \
  153. register atomic_t *x1 asm ("x1") = v; \
  154. \
  155. asm volatile(ARM64_LSE_ATOMIC_INSN( \
  156. /* LL/SC */ \
  157. " nop\n" \
  158. __LL_SC_ATOMIC(sub_return##name) \
  159. " nop", \
  160. /* LSE atomics */ \
  161. " neg %w[i], %w[i]\n" \
  162. " ldadd" #mb " %w[i], w30, %[v]\n" \
  163. " add %w[i], %w[i], w30") \
  164. : [i] "+r" (w0), [v] "+Q" (v->counter) \
  165. : "r" (x1) \
  166. : __LL_SC_CLOBBERS , ##cl); \
  167. \
  168. return w0; \
  169. }
  170. ATOMIC_OP_SUB_RETURN(_relaxed, )
  171. ATOMIC_OP_SUB_RETURN(_acquire, a, "memory")
  172. ATOMIC_OP_SUB_RETURN(_release, l, "memory")
  173. ATOMIC_OP_SUB_RETURN( , al, "memory")
  174. #undef ATOMIC_OP_SUB_RETURN
  175. #define ATOMIC_FETCH_OP_SUB(name, mb, cl...) \
  176. static inline int atomic_fetch_sub##name(int i, atomic_t *v) \
  177. { \
  178. register int w0 asm ("w0") = i; \
  179. register atomic_t *x1 asm ("x1") = v; \
  180. \
  181. asm volatile(ARM64_LSE_ATOMIC_INSN( \
  182. /* LL/SC */ \
  183. " nop\n" \
  184. __LL_SC_ATOMIC(fetch_sub##name), \
  185. /* LSE atomics */ \
  186. " neg %w[i], %w[i]\n" \
  187. " ldadd" #mb " %w[i], %w[i], %[v]") \
  188. : [i] "+r" (w0), [v] "+Q" (v->counter) \
  189. : "r" (x1) \
  190. : __LL_SC_CLOBBERS, ##cl); \
  191. \
  192. return w0; \
  193. }
  194. ATOMIC_FETCH_OP_SUB(_relaxed, )
  195. ATOMIC_FETCH_OP_SUB(_acquire, a, "memory")
  196. ATOMIC_FETCH_OP_SUB(_release, l, "memory")
  197. ATOMIC_FETCH_OP_SUB( , al, "memory")
  198. #undef ATOMIC_FETCH_OP_SUB
  199. #undef __LL_SC_ATOMIC
  200. #define __LL_SC_ATOMIC64(op) __LL_SC_CALL(atomic64_##op)
  201. #define ATOMIC64_OP(op, asm_op) \
  202. static inline void atomic64_##op(long i, atomic64_t *v) \
  203. { \
  204. register long x0 asm ("x0") = i; \
  205. register atomic64_t *x1 asm ("x1") = v; \
  206. \
  207. asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(op), \
  208. " " #asm_op " %[i], %[v]\n") \
  209. : [i] "+r" (x0), [v] "+Q" (v->counter) \
  210. : "r" (x1) \
  211. : __LL_SC_CLOBBERS); \
  212. }
  213. ATOMIC64_OP(andnot, stclr)
  214. ATOMIC64_OP(or, stset)
  215. ATOMIC64_OP(xor, steor)
  216. ATOMIC64_OP(add, stadd)
  217. #undef ATOMIC64_OP
  218. #define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \
  219. static inline long atomic64_fetch_##op##name(long i, atomic64_t *v) \
  220. { \
  221. register long x0 asm ("x0") = i; \
  222. register atomic64_t *x1 asm ("x1") = v; \
  223. \
  224. asm volatile(ARM64_LSE_ATOMIC_INSN( \
  225. /* LL/SC */ \
  226. __LL_SC_ATOMIC64(fetch_##op##name), \
  227. /* LSE atomics */ \
  228. " " #asm_op #mb " %[i], %[i], %[v]") \
  229. : [i] "+r" (x0), [v] "+Q" (v->counter) \
  230. : "r" (x1) \
  231. : __LL_SC_CLOBBERS, ##cl); \
  232. \
  233. return x0; \
  234. }
  235. #define ATOMIC64_FETCH_OPS(op, asm_op) \
  236. ATOMIC64_FETCH_OP(_relaxed, , op, asm_op) \
  237. ATOMIC64_FETCH_OP(_acquire, a, op, asm_op, "memory") \
  238. ATOMIC64_FETCH_OP(_release, l, op, asm_op, "memory") \
  239. ATOMIC64_FETCH_OP( , al, op, asm_op, "memory")
  240. ATOMIC64_FETCH_OPS(andnot, ldclr)
  241. ATOMIC64_FETCH_OPS(or, ldset)
  242. ATOMIC64_FETCH_OPS(xor, ldeor)
  243. ATOMIC64_FETCH_OPS(add, ldadd)
  244. #undef ATOMIC64_FETCH_OP
  245. #undef ATOMIC64_FETCH_OPS
  246. #define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \
  247. static inline long atomic64_add_return##name(long i, atomic64_t *v) \
  248. { \
  249. register long x0 asm ("x0") = i; \
  250. register atomic64_t *x1 asm ("x1") = v; \
  251. \
  252. asm volatile(ARM64_LSE_ATOMIC_INSN( \
  253. /* LL/SC */ \
  254. " nop\n" \
  255. __LL_SC_ATOMIC64(add_return##name), \
  256. /* LSE atomics */ \
  257. " ldadd" #mb " %[i], x30, %[v]\n" \
  258. " add %[i], %[i], x30") \
  259. : [i] "+r" (x0), [v] "+Q" (v->counter) \
  260. : "r" (x1) \
  261. : __LL_SC_CLOBBERS, ##cl); \
  262. \
  263. return x0; \
  264. }
  265. ATOMIC64_OP_ADD_RETURN(_relaxed, )
  266. ATOMIC64_OP_ADD_RETURN(_acquire, a, "memory")
  267. ATOMIC64_OP_ADD_RETURN(_release, l, "memory")
  268. ATOMIC64_OP_ADD_RETURN( , al, "memory")
  269. #undef ATOMIC64_OP_ADD_RETURN
  270. static inline void atomic64_and(long i, atomic64_t *v)
  271. {
  272. register long x0 asm ("x0") = i;
  273. register atomic64_t *x1 asm ("x1") = v;
  274. asm volatile(ARM64_LSE_ATOMIC_INSN(
  275. /* LL/SC */
  276. " nop\n"
  277. __LL_SC_ATOMIC64(and),
  278. /* LSE atomics */
  279. " mvn %[i], %[i]\n"
  280. " stclr %[i], %[v]")
  281. : [i] "+r" (x0), [v] "+Q" (v->counter)
  282. : "r" (x1)
  283. : __LL_SC_CLOBBERS);
  284. }
  285. #define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \
  286. static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \
  287. { \
  288. register long x0 asm ("w0") = i; \
  289. register atomic64_t *x1 asm ("x1") = v; \
  290. \
  291. asm volatile(ARM64_LSE_ATOMIC_INSN( \
  292. /* LL/SC */ \
  293. " nop\n" \
  294. __LL_SC_ATOMIC64(fetch_and##name), \
  295. /* LSE atomics */ \
  296. " mvn %[i], %[i]\n" \
  297. " ldclr" #mb " %[i], %[i], %[v]") \
  298. : [i] "+r" (x0), [v] "+Q" (v->counter) \
  299. : "r" (x1) \
  300. : __LL_SC_CLOBBERS, ##cl); \
  301. \
  302. return x0; \
  303. }
  304. ATOMIC64_FETCH_OP_AND(_relaxed, )
  305. ATOMIC64_FETCH_OP_AND(_acquire, a, "memory")
  306. ATOMIC64_FETCH_OP_AND(_release, l, "memory")
  307. ATOMIC64_FETCH_OP_AND( , al, "memory")
  308. #undef ATOMIC64_FETCH_OP_AND
  309. static inline void atomic64_sub(long i, atomic64_t *v)
  310. {
  311. register long x0 asm ("x0") = i;
  312. register atomic64_t *x1 asm ("x1") = v;
  313. asm volatile(ARM64_LSE_ATOMIC_INSN(
  314. /* LL/SC */
  315. " nop\n"
  316. __LL_SC_ATOMIC64(sub),
  317. /* LSE atomics */
  318. " neg %[i], %[i]\n"
  319. " stadd %[i], %[v]")
  320. : [i] "+r" (x0), [v] "+Q" (v->counter)
  321. : "r" (x1)
  322. : __LL_SC_CLOBBERS);
  323. }
  324. #define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \
  325. static inline long atomic64_sub_return##name(long i, atomic64_t *v) \
  326. { \
  327. register long x0 asm ("x0") = i; \
  328. register atomic64_t *x1 asm ("x1") = v; \
  329. \
  330. asm volatile(ARM64_LSE_ATOMIC_INSN( \
  331. /* LL/SC */ \
  332. " nop\n" \
  333. __LL_SC_ATOMIC64(sub_return##name) \
  334. " nop", \
  335. /* LSE atomics */ \
  336. " neg %[i], %[i]\n" \
  337. " ldadd" #mb " %[i], x30, %[v]\n" \
  338. " add %[i], %[i], x30") \
  339. : [i] "+r" (x0), [v] "+Q" (v->counter) \
  340. : "r" (x1) \
  341. : __LL_SC_CLOBBERS, ##cl); \
  342. \
  343. return x0; \
  344. }
  345. ATOMIC64_OP_SUB_RETURN(_relaxed, )
  346. ATOMIC64_OP_SUB_RETURN(_acquire, a, "memory")
  347. ATOMIC64_OP_SUB_RETURN(_release, l, "memory")
  348. ATOMIC64_OP_SUB_RETURN( , al, "memory")
  349. #undef ATOMIC64_OP_SUB_RETURN
  350. #define ATOMIC64_FETCH_OP_SUB(name, mb, cl...) \
  351. static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \
  352. { \
  353. register long x0 asm ("w0") = i; \
  354. register atomic64_t *x1 asm ("x1") = v; \
  355. \
  356. asm volatile(ARM64_LSE_ATOMIC_INSN( \
  357. /* LL/SC */ \
  358. " nop\n" \
  359. __LL_SC_ATOMIC64(fetch_sub##name), \
  360. /* LSE atomics */ \
  361. " neg %[i], %[i]\n" \
  362. " ldadd" #mb " %[i], %[i], %[v]") \
  363. : [i] "+r" (x0), [v] "+Q" (v->counter) \
  364. : "r" (x1) \
  365. : __LL_SC_CLOBBERS, ##cl); \
  366. \
  367. return x0; \
  368. }
  369. ATOMIC64_FETCH_OP_SUB(_relaxed, )
  370. ATOMIC64_FETCH_OP_SUB(_acquire, a, "memory")
  371. ATOMIC64_FETCH_OP_SUB(_release, l, "memory")
  372. ATOMIC64_FETCH_OP_SUB( , al, "memory")
  373. #undef ATOMIC64_FETCH_OP_SUB
  374. static inline long atomic64_dec_if_positive(atomic64_t *v)
  375. {
  376. register long x0 asm ("x0") = (long)v;
  377. asm volatile(ARM64_LSE_ATOMIC_INSN(
  378. /* LL/SC */
  379. " nop\n"
  380. __LL_SC_ATOMIC64(dec_if_positive)
  381. " nop\n"
  382. " nop\n"
  383. " nop\n"
  384. " nop\n"
  385. " nop",
  386. /* LSE atomics */
  387. "1: ldr x30, %[v]\n"
  388. " subs %[ret], x30, #1\n"
  389. " b.lt 2f\n"
  390. " casal x30, %[ret], %[v]\n"
  391. " sub x30, x30, #1\n"
  392. " sub x30, x30, %[ret]\n"
  393. " cbnz x30, 1b\n"
  394. "2:")
  395. : [ret] "+&r" (x0), [v] "+Q" (v->counter)
  396. :
  397. : __LL_SC_CLOBBERS, "cc", "memory");
  398. return x0;
  399. }
  400. #undef __LL_SC_ATOMIC64
  401. #define __LL_SC_CMPXCHG(op) __LL_SC_CALL(__cmpxchg_case_##op)
  402. #define __CMPXCHG_CASE(w, sz, name, mb, cl...) \
  403. static inline unsigned long __cmpxchg_case_##name(volatile void *ptr, \
  404. unsigned long old, \
  405. unsigned long new) \
  406. { \
  407. register unsigned long x0 asm ("x0") = (unsigned long)ptr; \
  408. register unsigned long x1 asm ("x1") = old; \
  409. register unsigned long x2 asm ("x2") = new; \
  410. \
  411. asm volatile(ARM64_LSE_ATOMIC_INSN( \
  412. /* LL/SC */ \
  413. " nop\n" \
  414. __LL_SC_CMPXCHG(name) \
  415. " nop", \
  416. /* LSE atomics */ \
  417. " mov " #w "30, %" #w "[old]\n" \
  418. " cas" #mb #sz "\t" #w "30, %" #w "[new], %[v]\n" \
  419. " mov %" #w "[ret], " #w "30") \
  420. : [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr) \
  421. : [old] "r" (x1), [new] "r" (x2) \
  422. : __LL_SC_CLOBBERS, ##cl); \
  423. \
  424. return x0; \
  425. }
  426. __CMPXCHG_CASE(w, b, 1, )
  427. __CMPXCHG_CASE(w, h, 2, )
  428. __CMPXCHG_CASE(w, , 4, )
  429. __CMPXCHG_CASE(x, , 8, )
  430. __CMPXCHG_CASE(w, b, acq_1, a, "memory")
  431. __CMPXCHG_CASE(w, h, acq_2, a, "memory")
  432. __CMPXCHG_CASE(w, , acq_4, a, "memory")
  433. __CMPXCHG_CASE(x, , acq_8, a, "memory")
  434. __CMPXCHG_CASE(w, b, rel_1, l, "memory")
  435. __CMPXCHG_CASE(w, h, rel_2, l, "memory")
  436. __CMPXCHG_CASE(w, , rel_4, l, "memory")
  437. __CMPXCHG_CASE(x, , rel_8, l, "memory")
  438. __CMPXCHG_CASE(w, b, mb_1, al, "memory")
  439. __CMPXCHG_CASE(w, h, mb_2, al, "memory")
  440. __CMPXCHG_CASE(w, , mb_4, al, "memory")
  441. __CMPXCHG_CASE(x, , mb_8, al, "memory")
  442. #undef __LL_SC_CMPXCHG
  443. #undef __CMPXCHG_CASE
  444. #define __LL_SC_CMPXCHG_DBL(op) __LL_SC_CALL(__cmpxchg_double##op)
  445. #define __CMPXCHG_DBL(name, mb, cl...) \
  446. static inline long __cmpxchg_double##name(unsigned long old1, \
  447. unsigned long old2, \
  448. unsigned long new1, \
  449. unsigned long new2, \
  450. volatile void *ptr) \
  451. { \
  452. unsigned long oldval1 = old1; \
  453. unsigned long oldval2 = old2; \
  454. register unsigned long x0 asm ("x0") = old1; \
  455. register unsigned long x1 asm ("x1") = old2; \
  456. register unsigned long x2 asm ("x2") = new1; \
  457. register unsigned long x3 asm ("x3") = new2; \
  458. register unsigned long x4 asm ("x4") = (unsigned long)ptr; \
  459. \
  460. asm volatile(ARM64_LSE_ATOMIC_INSN( \
  461. /* LL/SC */ \
  462. " nop\n" \
  463. " nop\n" \
  464. " nop\n" \
  465. __LL_SC_CMPXCHG_DBL(name), \
  466. /* LSE atomics */ \
  467. " casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
  468. " eor %[old1], %[old1], %[oldval1]\n" \
  469. " eor %[old2], %[old2], %[oldval2]\n" \
  470. " orr %[old1], %[old1], %[old2]") \
  471. : [old1] "+r" (x0), [old2] "+r" (x1), \
  472. [v] "+Q" (*(unsigned long *)ptr) \
  473. : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
  474. [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \
  475. : __LL_SC_CLOBBERS, ##cl); \
  476. \
  477. return x0; \
  478. }
  479. __CMPXCHG_DBL( , )
  480. __CMPXCHG_DBL(_mb, al, "memory")
  481. #undef __LL_SC_CMPXCHG_DBL
  482. #undef __CMPXCHG_DBL
  483. #endif /* __ASM_ATOMIC_LSE_H */