atomic.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483
  1. /*
  2. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  3. * Copyright (C) 2012 Regents of the University of California
  4. * Copyright (C) 2017 SiFive
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public Licence
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the Licence, or (at your option) any later version.
  10. */
  11. #ifndef _ASM_RISCV_ATOMIC_H
  12. #define _ASM_RISCV_ATOMIC_H
  13. #ifdef CONFIG_GENERIC_ATOMIC64
  14. # include <asm-generic/atomic64.h>
  15. #else
  16. # if (__riscv_xlen < 64)
  17. # error "64-bit atomics require XLEN to be at least 64"
  18. # endif
  19. #endif
  20. #include <asm/cmpxchg.h>
  21. #include <asm/barrier.h>
  22. #define ATOMIC_INIT(i) { (i) }
  23. #define __atomic_op_acquire(op, args...) \
  24. ({ \
  25. typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
  26. __asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory"); \
  27. __ret; \
  28. })
  29. #define __atomic_op_release(op, args...) \
  30. ({ \
  31. __asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory"); \
  32. op##_relaxed(args); \
  33. })
  34. static __always_inline int atomic_read(const atomic_t *v)
  35. {
  36. return READ_ONCE(v->counter);
  37. }
  38. static __always_inline void atomic_set(atomic_t *v, int i)
  39. {
  40. WRITE_ONCE(v->counter, i);
  41. }
  42. #ifndef CONFIG_GENERIC_ATOMIC64
  43. #define ATOMIC64_INIT(i) { (i) }
  44. static __always_inline long atomic64_read(const atomic64_t *v)
  45. {
  46. return READ_ONCE(v->counter);
  47. }
  48. static __always_inline void atomic64_set(atomic64_t *v, long i)
  49. {
  50. WRITE_ONCE(v->counter, i);
  51. }
  52. #endif
  53. /*
  54. * First, the atomic ops that have no ordering constraints and therefor don't
  55. * have the AQ or RL bits set. These don't return anything, so there's only
  56. * one version to worry about.
  57. */
  58. #define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix) \
  59. static __always_inline \
  60. void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
  61. { \
  62. __asm__ __volatile__ ( \
  63. " amo" #asm_op "." #asm_type " zero, %1, %0" \
  64. : "+A" (v->counter) \
  65. : "r" (I) \
  66. : "memory"); \
  67. } \
  68. #ifdef CONFIG_GENERIC_ATOMIC64
  69. #define ATOMIC_OPS(op, asm_op, I) \
  70. ATOMIC_OP (op, asm_op, I, w, int, )
  71. #else
  72. #define ATOMIC_OPS(op, asm_op, I) \
  73. ATOMIC_OP (op, asm_op, I, w, int, ) \
  74. ATOMIC_OP (op, asm_op, I, d, long, 64)
  75. #endif
  76. ATOMIC_OPS(add, add, i)
  77. ATOMIC_OPS(sub, add, -i)
  78. ATOMIC_OPS(and, and, i)
  79. ATOMIC_OPS( or, or, i)
  80. ATOMIC_OPS(xor, xor, i)
  81. #undef ATOMIC_OP
  82. #undef ATOMIC_OPS
  83. /*
  84. * Atomic ops that have ordered, relaxed, acquire, and release variants.
  85. * There's two flavors of these: the arithmatic ops have both fetch and return
  86. * versions, while the logical ops only have fetch versions.
  87. */
  88. #define ATOMIC_FETCH_OP(op, asm_op, I, asm_type, c_type, prefix) \
  89. static __always_inline \
  90. c_type atomic##prefix##_fetch_##op##_relaxed(c_type i, \
  91. atomic##prefix##_t *v) \
  92. { \
  93. register c_type ret; \
  94. __asm__ __volatile__ ( \
  95. " amo" #asm_op "." #asm_type " %1, %2, %0" \
  96. : "+A" (v->counter), "=r" (ret) \
  97. : "r" (I) \
  98. : "memory"); \
  99. return ret; \
  100. } \
  101. static __always_inline \
  102. c_type atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v) \
  103. { \
  104. register c_type ret; \
  105. __asm__ __volatile__ ( \
  106. " amo" #asm_op "." #asm_type ".aqrl %1, %2, %0" \
  107. : "+A" (v->counter), "=r" (ret) \
  108. : "r" (I) \
  109. : "memory"); \
  110. return ret; \
  111. }
  112. #define ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_type, c_type, prefix) \
  113. static __always_inline \
  114. c_type atomic##prefix##_##op##_return_relaxed(c_type i, \
  115. atomic##prefix##_t *v) \
  116. { \
  117. return atomic##prefix##_fetch_##op##_relaxed(i, v) c_op I; \
  118. } \
  119. static __always_inline \
  120. c_type atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v) \
  121. { \
  122. return atomic##prefix##_fetch_##op(i, v) c_op I; \
  123. }
  124. #ifdef CONFIG_GENERIC_ATOMIC64
  125. #define ATOMIC_OPS(op, asm_op, c_op, I) \
  126. ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
  127. ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, )
  128. #else
  129. #define ATOMIC_OPS(op, asm_op, c_op, I) \
  130. ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
  131. ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, ) \
  132. ATOMIC_FETCH_OP( op, asm_op, I, d, long, 64) \
  133. ATOMIC_OP_RETURN(op, asm_op, c_op, I, d, long, 64)
  134. #endif
  135. ATOMIC_OPS(add, add, +, i)
  136. ATOMIC_OPS(sub, add, +, -i)
  137. #define atomic_add_return_relaxed atomic_add_return_relaxed
  138. #define atomic_sub_return_relaxed atomic_sub_return_relaxed
  139. #define atomic_add_return atomic_add_return
  140. #define atomic_sub_return atomic_sub_return
  141. #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
  142. #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
  143. #define atomic_fetch_add atomic_fetch_add
  144. #define atomic_fetch_sub atomic_fetch_sub
  145. #ifndef CONFIG_GENERIC_ATOMIC64
  146. #define atomic64_add_return_relaxed atomic64_add_return_relaxed
  147. #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
  148. #define atomic64_add_return atomic64_add_return
  149. #define atomic64_sub_return atomic64_sub_return
  150. #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
  151. #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
  152. #define atomic64_fetch_add atomic64_fetch_add
  153. #define atomic64_fetch_sub atomic64_fetch_sub
  154. #endif
  155. #undef ATOMIC_OPS
  156. #ifdef CONFIG_GENERIC_ATOMIC64
  157. #define ATOMIC_OPS(op, asm_op, I) \
  158. ATOMIC_FETCH_OP(op, asm_op, I, w, int, )
  159. #else
  160. #define ATOMIC_OPS(op, asm_op, I) \
  161. ATOMIC_FETCH_OP(op, asm_op, I, w, int, ) \
  162. ATOMIC_FETCH_OP(op, asm_op, I, d, long, 64)
  163. #endif
  164. ATOMIC_OPS(and, and, i)
  165. ATOMIC_OPS( or, or, i)
  166. ATOMIC_OPS(xor, xor, i)
  167. #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
  168. #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
  169. #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
  170. #define atomic_fetch_and atomic_fetch_and
  171. #define atomic_fetch_or atomic_fetch_or
  172. #define atomic_fetch_xor atomic_fetch_xor
  173. #ifndef CONFIG_GENERIC_ATOMIC64
  174. #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
  175. #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
  176. #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
  177. #define atomic64_fetch_and atomic64_fetch_and
  178. #define atomic64_fetch_or atomic64_fetch_or
  179. #define atomic64_fetch_xor atomic64_fetch_xor
  180. #endif
  181. #undef ATOMIC_OPS
  182. #undef ATOMIC_FETCH_OP
  183. #undef ATOMIC_OP_RETURN
  184. /*
  185. * The extra atomic operations that are constructed from one of the core
  186. * AMO-based operations above (aside from sub, which is easier to fit above).
  187. * These are required to perform a full barrier, but they're OK this way
  188. * because atomic_*_return is also required to perform a full barrier.
  189. *
  190. */
  191. #define ATOMIC_OP(op, func_op, comp_op, I, c_type, prefix) \
  192. static __always_inline \
  193. bool atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
  194. { \
  195. return atomic##prefix##_##func_op##_return(i, v) comp_op I; \
  196. }
  197. #ifdef CONFIG_GENERIC_ATOMIC64
  198. #define ATOMIC_OPS(op, func_op, comp_op, I) \
  199. ATOMIC_OP(op, func_op, comp_op, I, int, )
  200. #else
  201. #define ATOMIC_OPS(op, func_op, comp_op, I) \
  202. ATOMIC_OP(op, func_op, comp_op, I, int, ) \
  203. ATOMIC_OP(op, func_op, comp_op, I, long, 64)
  204. #endif
  205. ATOMIC_OPS(add_and_test, add, ==, 0)
  206. ATOMIC_OPS(sub_and_test, sub, ==, 0)
  207. ATOMIC_OPS(add_negative, add, <, 0)
  208. #undef ATOMIC_OP
  209. #undef ATOMIC_OPS
  210. #define ATOMIC_OP(op, func_op, I, c_type, prefix) \
  211. static __always_inline \
  212. void atomic##prefix##_##op(atomic##prefix##_t *v) \
  213. { \
  214. atomic##prefix##_##func_op(I, v); \
  215. }
  216. #define ATOMIC_FETCH_OP(op, func_op, I, c_type, prefix) \
  217. static __always_inline \
  218. c_type atomic##prefix##_fetch_##op##_relaxed(atomic##prefix##_t *v) \
  219. { \
  220. return atomic##prefix##_fetch_##func_op##_relaxed(I, v); \
  221. } \
  222. static __always_inline \
  223. c_type atomic##prefix##_fetch_##op(atomic##prefix##_t *v) \
  224. { \
  225. return atomic##prefix##_fetch_##func_op(I, v); \
  226. }
  227. #define ATOMIC_OP_RETURN(op, asm_op, c_op, I, c_type, prefix) \
  228. static __always_inline \
  229. c_type atomic##prefix##_##op##_return_relaxed(atomic##prefix##_t *v) \
  230. { \
  231. return atomic##prefix##_fetch_##op##_relaxed(v) c_op I; \
  232. } \
  233. static __always_inline \
  234. c_type atomic##prefix##_##op##_return(atomic##prefix##_t *v) \
  235. { \
  236. return atomic##prefix##_fetch_##op(v) c_op I; \
  237. }
  238. #ifdef CONFIG_GENERIC_ATOMIC64
  239. #define ATOMIC_OPS(op, asm_op, c_op, I) \
  240. ATOMIC_OP( op, asm_op, I, int, ) \
  241. ATOMIC_FETCH_OP( op, asm_op, I, int, ) \
  242. ATOMIC_OP_RETURN(op, asm_op, c_op, I, int, )
  243. #else
  244. #define ATOMIC_OPS(op, asm_op, c_op, I) \
  245. ATOMIC_OP( op, asm_op, I, int, ) \
  246. ATOMIC_FETCH_OP( op, asm_op, I, int, ) \
  247. ATOMIC_OP_RETURN(op, asm_op, c_op, I, int, ) \
  248. ATOMIC_OP( op, asm_op, I, long, 64) \
  249. ATOMIC_FETCH_OP( op, asm_op, I, long, 64) \
  250. ATOMIC_OP_RETURN(op, asm_op, c_op, I, long, 64)
  251. #endif
  252. ATOMIC_OPS(inc, add, +, 1)
  253. ATOMIC_OPS(dec, add, +, -1)
  254. #define atomic_inc_return_relaxed atomic_inc_return_relaxed
  255. #define atomic_dec_return_relaxed atomic_dec_return_relaxed
  256. #define atomic_inc_return atomic_inc_return
  257. #define atomic_dec_return atomic_dec_return
  258. #define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed
  259. #define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed
  260. #define atomic_fetch_inc atomic_fetch_inc
  261. #define atomic_fetch_dec atomic_fetch_dec
  262. #ifndef CONFIG_GENERIC_ATOMIC64
  263. #define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
  264. #define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
  265. #define atomic64_inc_return atomic64_inc_return
  266. #define atomic64_dec_return atomic64_dec_return
  267. #define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed
  268. #define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed
  269. #define atomic64_fetch_inc atomic64_fetch_inc
  270. #define atomic64_fetch_dec atomic64_fetch_dec
  271. #endif
  272. #undef ATOMIC_OPS
  273. #undef ATOMIC_OP
  274. #undef ATOMIC_FETCH_OP
  275. #undef ATOMIC_OP_RETURN
  276. #define ATOMIC_OP(op, func_op, comp_op, I, prefix) \
  277. static __always_inline \
  278. bool atomic##prefix##_##op(atomic##prefix##_t *v) \
  279. { \
  280. return atomic##prefix##_##func_op##_return(v) comp_op I; \
  281. }
  282. ATOMIC_OP(inc_and_test, inc, ==, 0, )
  283. ATOMIC_OP(dec_and_test, dec, ==, 0, )
  284. #ifndef CONFIG_GENERIC_ATOMIC64
  285. ATOMIC_OP(inc_and_test, inc, ==, 0, 64)
  286. ATOMIC_OP(dec_and_test, dec, ==, 0, 64)
  287. #endif
  288. #undef ATOMIC_OP
  289. /* This is required to provide a full barrier on success. */
  290. static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
  291. {
  292. int prev, rc;
  293. __asm__ __volatile__ (
  294. "0: lr.w %[p], %[c]\n"
  295. " beq %[p], %[u], 1f\n"
  296. " add %[rc], %[p], %[a]\n"
  297. " sc.w.rl %[rc], %[rc], %[c]\n"
  298. " bnez %[rc], 0b\n"
  299. " fence rw, rw\n"
  300. "1:\n"
  301. : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
  302. : [a]"r" (a), [u]"r" (u)
  303. : "memory");
  304. return prev;
  305. }
  306. #ifndef CONFIG_GENERIC_ATOMIC64
  307. static __always_inline long __atomic64_add_unless(atomic64_t *v, long a, long u)
  308. {
  309. long prev, rc;
  310. __asm__ __volatile__ (
  311. "0: lr.d %[p], %[c]\n"
  312. " beq %[p], %[u], 1f\n"
  313. " add %[rc], %[p], %[a]\n"
  314. " sc.d.rl %[rc], %[rc], %[c]\n"
  315. " bnez %[rc], 0b\n"
  316. " fence rw, rw\n"
  317. "1:\n"
  318. : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
  319. : [a]"r" (a), [u]"r" (u)
  320. : "memory");
  321. return prev;
  322. }
  323. static __always_inline int atomic64_add_unless(atomic64_t *v, long a, long u)
  324. {
  325. return __atomic64_add_unless(v, a, u) != u;
  326. }
  327. #endif
  328. /*
  329. * atomic_{cmp,}xchg is required to have exactly the same ordering semantics as
  330. * {cmp,}xchg and the operations that return, so they need a full barrier.
  331. */
  332. #define ATOMIC_OP(c_t, prefix, size) \
  333. static __always_inline \
  334. c_t atomic##prefix##_xchg_relaxed(atomic##prefix##_t *v, c_t n) \
  335. { \
  336. return __xchg_relaxed(&(v->counter), n, size); \
  337. } \
  338. static __always_inline \
  339. c_t atomic##prefix##_xchg_acquire(atomic##prefix##_t *v, c_t n) \
  340. { \
  341. return __xchg_acquire(&(v->counter), n, size); \
  342. } \
  343. static __always_inline \
  344. c_t atomic##prefix##_xchg_release(atomic##prefix##_t *v, c_t n) \
  345. { \
  346. return __xchg_release(&(v->counter), n, size); \
  347. } \
  348. static __always_inline \
  349. c_t atomic##prefix##_xchg(atomic##prefix##_t *v, c_t n) \
  350. { \
  351. return __xchg(&(v->counter), n, size); \
  352. } \
  353. static __always_inline \
  354. c_t atomic##prefix##_cmpxchg_relaxed(atomic##prefix##_t *v, \
  355. c_t o, c_t n) \
  356. { \
  357. return __cmpxchg_relaxed(&(v->counter), o, n, size); \
  358. } \
  359. static __always_inline \
  360. c_t atomic##prefix##_cmpxchg_acquire(atomic##prefix##_t *v, \
  361. c_t o, c_t n) \
  362. { \
  363. return __cmpxchg_acquire(&(v->counter), o, n, size); \
  364. } \
  365. static __always_inline \
  366. c_t atomic##prefix##_cmpxchg_release(atomic##prefix##_t *v, \
  367. c_t o, c_t n) \
  368. { \
  369. return __cmpxchg_release(&(v->counter), o, n, size); \
  370. } \
  371. static __always_inline \
  372. c_t atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \
  373. { \
  374. return __cmpxchg(&(v->counter), o, n, size); \
  375. }
  376. #ifdef CONFIG_GENERIC_ATOMIC64
  377. #define ATOMIC_OPS() \
  378. ATOMIC_OP( int, , 4)
  379. #else
  380. #define ATOMIC_OPS() \
  381. ATOMIC_OP( int, , 4) \
  382. ATOMIC_OP(long, 64, 8)
  383. #endif
  384. ATOMIC_OPS()
  385. #undef ATOMIC_OPS
  386. #undef ATOMIC_OP
  387. static __always_inline int atomic_sub_if_positive(atomic_t *v, int offset)
  388. {
  389. int prev, rc;
  390. __asm__ __volatile__ (
  391. "0: lr.w %[p], %[c]\n"
  392. " sub %[rc], %[p], %[o]\n"
  393. " bltz %[rc], 1f\n"
  394. " sc.w.rl %[rc], %[rc], %[c]\n"
  395. " bnez %[rc], 0b\n"
  396. " fence rw, rw\n"
  397. "1:\n"
  398. : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
  399. : [o]"r" (offset)
  400. : "memory");
  401. return prev - offset;
  402. }
  403. #define atomic_dec_if_positive(v) atomic_sub_if_positive(v, 1)
  404. #ifndef CONFIG_GENERIC_ATOMIC64
  405. static __always_inline long atomic64_sub_if_positive(atomic64_t *v, int offset)
  406. {
  407. long prev, rc;
  408. __asm__ __volatile__ (
  409. "0: lr.d %[p], %[c]\n"
  410. " sub %[rc], %[p], %[o]\n"
  411. " bltz %[rc], 1f\n"
  412. " sc.d.rl %[rc], %[rc], %[c]\n"
  413. " bnez %[rc], 0b\n"
  414. " fence rw, rw\n"
  415. "1:\n"
  416. : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
  417. : [o]"r" (offset)
  418. : "memory");
  419. return prev - offset;
  420. }
  421. #define atomic64_dec_if_positive(v) atomic64_sub_if_positive(v, 1)
  422. #endif
  423. #endif /* _ASM_RISCV_ATOMIC_H */