atomic.h 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _ALPHA_ATOMIC_H
  3. #define _ALPHA_ATOMIC_H
  4. #include <linux/types.h>
  5. #include <asm/barrier.h>
  6. #include <asm/cmpxchg.h>
  7. /*
  8. * Atomic operations that C can't guarantee us. Useful for
  9. * resource counting etc...
  10. *
  11. * But use these as seldom as possible since they are much slower
  12. * than regular operations.
  13. */
  14. #define ATOMIC_INIT(i) { (i) }
  15. #define ATOMIC64_INIT(i) { (i) }
  16. #define atomic_read(v) READ_ONCE((v)->counter)
  17. #define atomic64_read(v) READ_ONCE((v)->counter)
  18. #define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
  19. #define atomic64_set(v,i) WRITE_ONCE((v)->counter, (i))
  20. /*
  21. * To get proper branch prediction for the main line, we must branch
  22. * forward to code at the end of this object's .text section, then
  23. * branch back to restart the operation.
  24. */
  25. #define ATOMIC_OP(op, asm_op) \
  26. static __inline__ void atomic_##op(int i, atomic_t * v) \
  27. { \
  28. unsigned long temp; \
  29. __asm__ __volatile__( \
  30. "1: ldl_l %0,%1\n" \
  31. " " #asm_op " %0,%2,%0\n" \
  32. " stl_c %0,%1\n" \
  33. " beq %0,2f\n" \
  34. ".subsection 2\n" \
  35. "2: br 1b\n" \
  36. ".previous" \
  37. :"=&r" (temp), "=m" (v->counter) \
  38. :"Ir" (i), "m" (v->counter)); \
  39. } \
  40. #define ATOMIC_OP_RETURN(op, asm_op) \
  41. static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
  42. { \
  43. long temp, result; \
  44. __asm__ __volatile__( \
  45. "1: ldl_l %0,%1\n" \
  46. " " #asm_op " %0,%3,%2\n" \
  47. " " #asm_op " %0,%3,%0\n" \
  48. " stl_c %0,%1\n" \
  49. " beq %0,2f\n" \
  50. ".subsection 2\n" \
  51. "2: br 1b\n" \
  52. ".previous" \
  53. :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
  54. :"Ir" (i), "m" (v->counter) : "memory"); \
  55. return result; \
  56. }
  57. #define ATOMIC_FETCH_OP(op, asm_op) \
  58. static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
  59. { \
  60. long temp, result; \
  61. __asm__ __volatile__( \
  62. "1: ldl_l %2,%1\n" \
  63. " " #asm_op " %2,%3,%0\n" \
  64. " stl_c %0,%1\n" \
  65. " beq %0,2f\n" \
  66. ".subsection 2\n" \
  67. "2: br 1b\n" \
  68. ".previous" \
  69. :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
  70. :"Ir" (i), "m" (v->counter) : "memory"); \
  71. return result; \
  72. }
  73. #define ATOMIC64_OP(op, asm_op) \
  74. static __inline__ void atomic64_##op(long i, atomic64_t * v) \
  75. { \
  76. unsigned long temp; \
  77. __asm__ __volatile__( \
  78. "1: ldq_l %0,%1\n" \
  79. " " #asm_op " %0,%2,%0\n" \
  80. " stq_c %0,%1\n" \
  81. " beq %0,2f\n" \
  82. ".subsection 2\n" \
  83. "2: br 1b\n" \
  84. ".previous" \
  85. :"=&r" (temp), "=m" (v->counter) \
  86. :"Ir" (i), "m" (v->counter)); \
  87. } \
  88. #define ATOMIC64_OP_RETURN(op, asm_op) \
  89. static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
  90. { \
  91. long temp, result; \
  92. __asm__ __volatile__( \
  93. "1: ldq_l %0,%1\n" \
  94. " " #asm_op " %0,%3,%2\n" \
  95. " " #asm_op " %0,%3,%0\n" \
  96. " stq_c %0,%1\n" \
  97. " beq %0,2f\n" \
  98. ".subsection 2\n" \
  99. "2: br 1b\n" \
  100. ".previous" \
  101. :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
  102. :"Ir" (i), "m" (v->counter) : "memory"); \
  103. return result; \
  104. }
  105. #define ATOMIC64_FETCH_OP(op, asm_op) \
  106. static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
  107. { \
  108. long temp, result; \
  109. __asm__ __volatile__( \
  110. "1: ldq_l %2,%1\n" \
  111. " " #asm_op " %2,%3,%0\n" \
  112. " stq_c %0,%1\n" \
  113. " beq %0,2f\n" \
  114. ".subsection 2\n" \
  115. "2: br 1b\n" \
  116. ".previous" \
  117. :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
  118. :"Ir" (i), "m" (v->counter) : "memory"); \
  119. return result; \
  120. }
  121. #define ATOMIC_OPS(op) \
  122. ATOMIC_OP(op, op##l) \
  123. ATOMIC_OP_RETURN(op, op##l) \
  124. ATOMIC_FETCH_OP(op, op##l) \
  125. ATOMIC64_OP(op, op##q) \
  126. ATOMIC64_OP_RETURN(op, op##q) \
  127. ATOMIC64_FETCH_OP(op, op##q)
  128. ATOMIC_OPS(add)
  129. ATOMIC_OPS(sub)
  130. #define atomic_add_return_relaxed atomic_add_return_relaxed
  131. #define atomic_sub_return_relaxed atomic_sub_return_relaxed
  132. #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
  133. #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
  134. #define atomic64_add_return_relaxed atomic64_add_return_relaxed
  135. #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
  136. #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
  137. #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
  138. #define atomic_andnot atomic_andnot
  139. #define atomic64_andnot atomic64_andnot
  140. #undef ATOMIC_OPS
  141. #define ATOMIC_OPS(op, asm) \
  142. ATOMIC_OP(op, asm) \
  143. ATOMIC_FETCH_OP(op, asm) \
  144. ATOMIC64_OP(op, asm) \
  145. ATOMIC64_FETCH_OP(op, asm)
  146. ATOMIC_OPS(and, and)
  147. ATOMIC_OPS(andnot, bic)
  148. ATOMIC_OPS(or, bis)
  149. ATOMIC_OPS(xor, xor)
  150. #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
  151. #define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
  152. #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
  153. #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
  154. #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
  155. #define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
  156. #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
  157. #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
  158. #undef ATOMIC_OPS
  159. #undef ATOMIC64_FETCH_OP
  160. #undef ATOMIC64_OP_RETURN
  161. #undef ATOMIC64_OP
  162. #undef ATOMIC_FETCH_OP
  163. #undef ATOMIC_OP_RETURN
  164. #undef ATOMIC_OP
  165. #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
  166. #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
  167. #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
  168. #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
  169. /**
  170. * __atomic_add_unless - add unless the number is a given value
  171. * @v: pointer of type atomic_t
  172. * @a: the amount to add to v...
  173. * @u: ...unless v is equal to u.
  174. *
  175. * Atomically adds @a to @v, so long as it was not @u.
  176. * Returns the old value of @v.
  177. */
  178. static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
  179. {
  180. int c, new, old;
  181. smp_mb();
  182. __asm__ __volatile__(
  183. "1: ldl_l %[old],%[mem]\n"
  184. " cmpeq %[old],%[u],%[c]\n"
  185. " addl %[old],%[a],%[new]\n"
  186. " bne %[c],2f\n"
  187. " stl_c %[new],%[mem]\n"
  188. " beq %[new],3f\n"
  189. "2:\n"
  190. ".subsection 2\n"
  191. "3: br 1b\n"
  192. ".previous"
  193. : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
  194. : [mem] "m"(*v), [a] "rI"(a), [u] "rI"((long)u)
  195. : "memory");
  196. smp_mb();
  197. return old;
  198. }
  199. /**
  200. * atomic64_add_unless - add unless the number is a given value
  201. * @v: pointer of type atomic64_t
  202. * @a: the amount to add to v...
  203. * @u: ...unless v is equal to u.
  204. *
  205. * Atomically adds @a to @v, so long as it was not @u.
  206. * Returns true iff @v was not @u.
  207. */
  208. static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
  209. {
  210. long c, tmp;
  211. smp_mb();
  212. __asm__ __volatile__(
  213. "1: ldq_l %[tmp],%[mem]\n"
  214. " cmpeq %[tmp],%[u],%[c]\n"
  215. " addq %[tmp],%[a],%[tmp]\n"
  216. " bne %[c],2f\n"
  217. " stq_c %[tmp],%[mem]\n"
  218. " beq %[tmp],3f\n"
  219. "2:\n"
  220. ".subsection 2\n"
  221. "3: br 1b\n"
  222. ".previous"
  223. : [tmp] "=&r"(tmp), [c] "=&r"(c)
  224. : [mem] "m"(*v), [a] "rI"(a), [u] "rI"(u)
  225. : "memory");
  226. smp_mb();
  227. return !c;
  228. }
  229. /*
  230. * atomic64_dec_if_positive - decrement by 1 if old value positive
  231. * @v: pointer of type atomic_t
  232. *
  233. * The function returns the old value of *v minus 1, even if
  234. * the atomic variable, v, was not decremented.
  235. */
  236. static inline long atomic64_dec_if_positive(atomic64_t *v)
  237. {
  238. long old, tmp;
  239. smp_mb();
  240. __asm__ __volatile__(
  241. "1: ldq_l %[old],%[mem]\n"
  242. " subq %[old],1,%[tmp]\n"
  243. " ble %[old],2f\n"
  244. " stq_c %[tmp],%[mem]\n"
  245. " beq %[tmp],3f\n"
  246. "2:\n"
  247. ".subsection 2\n"
  248. "3: br 1b\n"
  249. ".previous"
  250. : [old] "=&r"(old), [tmp] "=&r"(tmp)
  251. : [mem] "m"(*v)
  252. : "memory");
  253. smp_mb();
  254. return old - 1;
  255. }
  256. #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
  257. #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
  258. #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
  259. #define atomic_dec_return(v) atomic_sub_return(1,(v))
  260. #define atomic64_dec_return(v) atomic64_sub_return(1,(v))
  261. #define atomic_inc_return(v) atomic_add_return(1,(v))
  262. #define atomic64_inc_return(v) atomic64_add_return(1,(v))
  263. #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
  264. #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
  265. #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
  266. #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
  267. #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
  268. #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
  269. #define atomic_inc(v) atomic_add(1,(v))
  270. #define atomic64_inc(v) atomic64_add(1,(v))
  271. #define atomic_dec(v) atomic_sub(1,(v))
  272. #define atomic64_dec(v) atomic64_sub(1,(v))
  273. #endif /* _ALPHA_ATOMIC_H */