atomic.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375
  1. /*
  2. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  3. * Copyright (C) 2012 Regents of the University of California
  4. * Copyright (C) 2017 SiFive
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public Licence
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the Licence, or (at your option) any later version.
  10. */
  11. #ifndef _ASM_RISCV_ATOMIC_H
  12. #define _ASM_RISCV_ATOMIC_H
  13. #ifdef CONFIG_GENERIC_ATOMIC64
  14. # include <asm-generic/atomic64.h>
  15. #else
  16. # if (__riscv_xlen < 64)
  17. # error "64-bit atomics require XLEN to be at least 64"
  18. # endif
  19. #endif
  20. #include <asm/cmpxchg.h>
  21. #include <asm/barrier.h>
  22. #define ATOMIC_INIT(i) { (i) }
  23. static __always_inline int atomic_read(const atomic_t *v)
  24. {
  25. return READ_ONCE(v->counter);
  26. }
  27. static __always_inline void atomic_set(atomic_t *v, int i)
  28. {
  29. WRITE_ONCE(v->counter, i);
  30. }
  31. #ifndef CONFIG_GENERIC_ATOMIC64
  32. #define ATOMIC64_INIT(i) { (i) }
  33. static __always_inline long atomic64_read(const atomic64_t *v)
  34. {
  35. return READ_ONCE(v->counter);
  36. }
  37. static __always_inline void atomic64_set(atomic64_t *v, long i)
  38. {
  39. WRITE_ONCE(v->counter, i);
  40. }
  41. #endif
  42. /*
  43. * First, the atomic ops that have no ordering constraints and therefor don't
  44. * have the AQ or RL bits set. These don't return anything, so there's only
  45. * one version to worry about.
  46. */
  47. #define ATOMIC_OP(op, asm_op, c_op, I, asm_type, c_type, prefix) \
  48. static __always_inline void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
  49. { \
  50. __asm__ __volatile__ ( \
  51. "amo" #asm_op "." #asm_type " zero, %1, %0" \
  52. : "+A" (v->counter) \
  53. : "r" (I) \
  54. : "memory"); \
  55. }
  56. #ifdef CONFIG_GENERIC_ATOMIC64
  57. #define ATOMIC_OPS(op, asm_op, c_op, I) \
  58. ATOMIC_OP (op, asm_op, c_op, I, w, int, )
  59. #else
  60. #define ATOMIC_OPS(op, asm_op, c_op, I) \
  61. ATOMIC_OP (op, asm_op, c_op, I, w, int, ) \
  62. ATOMIC_OP (op, asm_op, c_op, I, d, long, 64)
  63. #endif
  64. ATOMIC_OPS(add, add, +, i)
  65. ATOMIC_OPS(sub, add, +, -i)
  66. ATOMIC_OPS(and, and, &, i)
  67. ATOMIC_OPS( or, or, |, i)
  68. ATOMIC_OPS(xor, xor, ^, i)
  69. #undef ATOMIC_OP
  70. #undef ATOMIC_OPS
  71. /*
  72. * Atomic ops that have ordered, relaxed, acquire, and relese variants.
  73. * There's two flavors of these: the arithmatic ops have both fetch and return
  74. * versions, while the logical ops only have fetch versions.
  75. */
  76. #define ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, asm_type, c_type, prefix) \
  77. static __always_inline c_type atomic##prefix##_fetch_##op##c_or(c_type i, atomic##prefix##_t *v) \
  78. { \
  79. register c_type ret; \
  80. __asm__ __volatile__ ( \
  81. "amo" #asm_op "." #asm_type #asm_or " %1, %2, %0" \
  82. : "+A" (v->counter), "=r" (ret) \
  83. : "r" (I) \
  84. : "memory"); \
  85. return ret; \
  86. }
  87. #define ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, asm_type, c_type, prefix) \
  88. static __always_inline c_type atomic##prefix##_##op##_return##c_or(c_type i, atomic##prefix##_t *v) \
  89. { \
  90. return atomic##prefix##_fetch_##op##c_or(i, v) c_op I; \
  91. }
  92. #ifdef CONFIG_GENERIC_ATOMIC64
  93. #define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or) \
  94. ATOMIC_FETCH_OP (op, asm_op, c_op, I, asm_or, c_or, w, int, ) \
  95. ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, w, int, )
  96. #else
  97. #define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or) \
  98. ATOMIC_FETCH_OP (op, asm_op, c_op, I, asm_or, c_or, w, int, ) \
  99. ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, w, int, ) \
  100. ATOMIC_FETCH_OP (op, asm_op, c_op, I, asm_or, c_or, d, long, 64) \
  101. ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, d, long, 64)
  102. #endif
  103. ATOMIC_OPS(add, add, +, i, , _relaxed)
  104. ATOMIC_OPS(add, add, +, i, .aq , _acquire)
  105. ATOMIC_OPS(add, add, +, i, .rl , _release)
  106. ATOMIC_OPS(add, add, +, i, .aqrl, )
  107. ATOMIC_OPS(sub, add, +, -i, , _relaxed)
  108. ATOMIC_OPS(sub, add, +, -i, .aq , _acquire)
  109. ATOMIC_OPS(sub, add, +, -i, .rl , _release)
  110. ATOMIC_OPS(sub, add, +, -i, .aqrl, )
  111. #undef ATOMIC_OPS
  112. #ifdef CONFIG_GENERIC_ATOMIC64
  113. #define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or) \
  114. ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, w, int, )
  115. #else
  116. #define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or) \
  117. ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, w, int, ) \
  118. ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, d, long, 64)
  119. #endif
  120. ATOMIC_OPS(and, and, &, i, , _relaxed)
  121. ATOMIC_OPS(and, and, &, i, .aq , _acquire)
  122. ATOMIC_OPS(and, and, &, i, .rl , _release)
  123. ATOMIC_OPS(and, and, &, i, .aqrl, )
  124. ATOMIC_OPS( or, or, |, i, , _relaxed)
  125. ATOMIC_OPS( or, or, |, i, .aq , _acquire)
  126. ATOMIC_OPS( or, or, |, i, .rl , _release)
  127. ATOMIC_OPS( or, or, |, i, .aqrl, )
  128. ATOMIC_OPS(xor, xor, ^, i, , _relaxed)
  129. ATOMIC_OPS(xor, xor, ^, i, .aq , _acquire)
  130. ATOMIC_OPS(xor, xor, ^, i, .rl , _release)
  131. ATOMIC_OPS(xor, xor, ^, i, .aqrl, )
  132. #undef ATOMIC_OPS
  133. #undef ATOMIC_FETCH_OP
  134. #undef ATOMIC_OP_RETURN
  135. /*
  136. * The extra atomic operations that are constructed from one of the core
  137. * AMO-based operations above (aside from sub, which is easier to fit above).
  138. * These are required to perform a barrier, but they're OK this way because
  139. * atomic_*_return is also required to perform a barrier.
  140. */
  141. #define ATOMIC_OP(op, func_op, comp_op, I, c_type, prefix) \
  142. static __always_inline bool atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
  143. { \
  144. return atomic##prefix##_##func_op##_return(i, v) comp_op I; \
  145. }
  146. #ifdef CONFIG_GENERIC_ATOMIC64
  147. #define ATOMIC_OPS(op, func_op, comp_op, I) \
  148. ATOMIC_OP (op, func_op, comp_op, I, int, )
  149. #else
  150. #define ATOMIC_OPS(op, func_op, comp_op, I) \
  151. ATOMIC_OP (op, func_op, comp_op, I, int, ) \
  152. ATOMIC_OP (op, func_op, comp_op, I, long, 64)
  153. #endif
  154. ATOMIC_OPS(add_and_test, add, ==, 0)
  155. ATOMIC_OPS(sub_and_test, sub, ==, 0)
  156. ATOMIC_OPS(add_negative, add, <, 0)
  157. #undef ATOMIC_OP
  158. #undef ATOMIC_OPS
  159. #define ATOMIC_OP(op, func_op, c_op, I, c_type, prefix) \
  160. static __always_inline void atomic##prefix##_##op(atomic##prefix##_t *v) \
  161. { \
  162. atomic##prefix##_##func_op(I, v); \
  163. }
  164. #define ATOMIC_FETCH_OP(op, func_op, c_op, I, c_type, prefix) \
  165. static __always_inline c_type atomic##prefix##_fetch_##op(atomic##prefix##_t *v) \
  166. { \
  167. return atomic##prefix##_fetch_##func_op(I, v); \
  168. }
  169. #define ATOMIC_OP_RETURN(op, asm_op, c_op, I, c_type, prefix) \
  170. static __always_inline c_type atomic##prefix##_##op##_return(atomic##prefix##_t *v) \
  171. { \
  172. return atomic##prefix##_fetch_##op(v) c_op I; \
  173. }
  174. #ifdef CONFIG_GENERIC_ATOMIC64
  175. #define ATOMIC_OPS(op, asm_op, c_op, I) \
  176. ATOMIC_OP (op, asm_op, c_op, I, int, ) \
  177. ATOMIC_FETCH_OP (op, asm_op, c_op, I, int, ) \
  178. ATOMIC_OP_RETURN(op, asm_op, c_op, I, int, )
  179. #else
  180. #define ATOMIC_OPS(op, asm_op, c_op, I) \
  181. ATOMIC_OP (op, asm_op, c_op, I, int, ) \
  182. ATOMIC_FETCH_OP (op, asm_op, c_op, I, int, ) \
  183. ATOMIC_OP_RETURN(op, asm_op, c_op, I, int, ) \
  184. ATOMIC_OP (op, asm_op, c_op, I, long, 64) \
  185. ATOMIC_FETCH_OP (op, asm_op, c_op, I, long, 64) \
  186. ATOMIC_OP_RETURN(op, asm_op, c_op, I, long, 64)
  187. #endif
  188. ATOMIC_OPS(inc, add, +, 1)
  189. ATOMIC_OPS(dec, add, +, -1)
  190. #undef ATOMIC_OPS
  191. #undef ATOMIC_OP
  192. #undef ATOMIC_FETCH_OP
  193. #undef ATOMIC_OP_RETURN
  194. #define ATOMIC_OP(op, func_op, comp_op, I, prefix) \
  195. static __always_inline bool atomic##prefix##_##op(atomic##prefix##_t *v) \
  196. { \
  197. return atomic##prefix##_##func_op##_return(v) comp_op I; \
  198. }
  199. ATOMIC_OP(inc_and_test, inc, ==, 0, )
  200. ATOMIC_OP(dec_and_test, dec, ==, 0, )
  201. #ifndef CONFIG_GENERIC_ATOMIC64
  202. ATOMIC_OP(inc_and_test, inc, ==, 0, 64)
  203. ATOMIC_OP(dec_and_test, dec, ==, 0, 64)
  204. #endif
  205. #undef ATOMIC_OP
  206. /* This is required to provide a barrier on success. */
  207. static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
  208. {
  209. int prev, rc;
  210. __asm__ __volatile__ (
  211. "0:\n\t"
  212. "lr.w.aqrl %[p], %[c]\n\t"
  213. "beq %[p], %[u], 1f\n\t"
  214. "add %[rc], %[p], %[a]\n\t"
  215. "sc.w.aqrl %[rc], %[rc], %[c]\n\t"
  216. "bnez %[rc], 0b\n\t"
  217. "1:"
  218. : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
  219. : [a]"r" (a), [u]"r" (u)
  220. : "memory");
  221. return prev;
  222. }
  223. #ifndef CONFIG_GENERIC_ATOMIC64
  224. static __always_inline long __atomic64_add_unless(atomic64_t *v, long a, long u)
  225. {
  226. long prev, rc;
  227. __asm__ __volatile__ (
  228. "0:\n\t"
  229. "lr.d.aqrl %[p], %[c]\n\t"
  230. "beq %[p], %[u], 1f\n\t"
  231. "add %[rc], %[p], %[a]\n\t"
  232. "sc.d.aqrl %[rc], %[rc], %[c]\n\t"
  233. "bnez %[rc], 0b\n\t"
  234. "1:"
  235. : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
  236. : [a]"r" (a), [u]"r" (u)
  237. : "memory");
  238. return prev;
  239. }
  240. static __always_inline int atomic64_add_unless(atomic64_t *v, long a, long u)
  241. {
  242. return __atomic64_add_unless(v, a, u) != u;
  243. }
  244. #endif
  245. /*
  246. * The extra atomic operations that are constructed from one of the core
  247. * LR/SC-based operations above.
  248. */
  249. static __always_inline int atomic_inc_not_zero(atomic_t *v)
  250. {
  251. return __atomic_add_unless(v, 1, 0);
  252. }
  253. #ifndef CONFIG_GENERIC_ATOMIC64
  254. static __always_inline long atomic64_inc_not_zero(atomic64_t *v)
  255. {
  256. return atomic64_add_unless(v, 1, 0);
  257. }
  258. #endif
  259. /*
  260. * atomic_{cmp,}xchg is required to have exactly the same ordering semantics as
  261. * {cmp,}xchg and the operations that return, so they need a barrier. We just
  262. * use the other implementations directly.
  263. */
  264. #define ATOMIC_OP(c_t, prefix, c_or, size, asm_or) \
  265. static __always_inline c_t atomic##prefix##_cmpxchg##c_or(atomic##prefix##_t *v, c_t o, c_t n) \
  266. { \
  267. return __cmpxchg(&(v->counter), o, n, size, asm_or, asm_or); \
  268. } \
  269. static __always_inline c_t atomic##prefix##_xchg##c_or(atomic##prefix##_t *v, c_t n) \
  270. { \
  271. return __xchg(n, &(v->counter), size, asm_or); \
  272. }
  273. #ifdef CONFIG_GENERIC_ATOMIC64
  274. #define ATOMIC_OPS(c_or, asm_or) \
  275. ATOMIC_OP( int, , c_or, 4, asm_or)
  276. #else
  277. #define ATOMIC_OPS(c_or, asm_or) \
  278. ATOMIC_OP( int, , c_or, 4, asm_or) \
  279. ATOMIC_OP(long, 64, c_or, 8, asm_or)
  280. #endif
  281. ATOMIC_OPS( , .aqrl)
  282. ATOMIC_OPS(_acquire, .aq)
  283. ATOMIC_OPS(_release, .rl)
  284. ATOMIC_OPS(_relaxed, )
  285. #undef ATOMIC_OPS
  286. #undef ATOMIC_OP
  287. static __always_inline int atomic_sub_if_positive(atomic_t *v, int offset)
  288. {
  289. int prev, rc;
  290. __asm__ __volatile__ (
  291. "0:\n\t"
  292. "lr.w.aqrl %[p], %[c]\n\t"
  293. "sub %[rc], %[p], %[o]\n\t"
  294. "bltz %[rc], 1f\n\t"
  295. "sc.w.aqrl %[rc], %[rc], %[c]\n\t"
  296. "bnez %[rc], 0b\n\t"
  297. "1:"
  298. : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
  299. : [o]"r" (offset)
  300. : "memory");
  301. return prev - offset;
  302. }
  303. #define atomic_dec_if_positive(v) atomic_sub_if_positive(v, 1)
  304. #ifndef CONFIG_GENERIC_ATOMIC64
  305. static __always_inline long atomic64_sub_if_positive(atomic64_t *v, int offset)
  306. {
  307. long prev, rc;
  308. __asm__ __volatile__ (
  309. "0:\n\t"
  310. "lr.d.aqrl %[p], %[c]\n\t"
  311. "sub %[rc], %[p], %[o]\n\t"
  312. "bltz %[rc], 1f\n\t"
  313. "sc.d.aqrl %[rc], %[rc], %[c]\n\t"
  314. "bnez %[rc], 0b\n\t"
  315. "1:"
  316. : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
  317. : [o]"r" (offset)
  318. : "memory");
  319. return prev - offset;
  320. }
  321. #define atomic64_dec_if_positive(v) atomic64_sub_if_positive(v, 1)
  322. #endif
  323. #endif /* _ASM_RISCV_ATOMIC_H */