atomic.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467
  1. /* Atomic operations usable in machine independent code */
  2. #ifndef _LINUX_ATOMIC_H
  3. #define _LINUX_ATOMIC_H
  4. #include <asm/atomic.h>
  5. #include <asm/barrier.h>
  6. /*
  7. * Relaxed variants of xchg, cmpxchg and some atomic operations.
  8. *
  9. * We support four variants:
  10. *
  11. * - Fully ordered: The default implementation, no suffix required.
  12. * - Acquire: Provides ACQUIRE semantics, _acquire suffix.
  13. * - Release: Provides RELEASE semantics, _release suffix.
  14. * - Relaxed: No ordering guarantees, _relaxed suffix.
  15. *
  16. * For compound atomics performing both a load and a store, ACQUIRE
  17. * semantics apply only to the load and RELEASE semantics only to the
  18. * store portion of the operation. Note that a failed cmpxchg_acquire
  19. * does -not- imply any memory ordering constraints.
  20. *
  21. * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
  22. */
  23. #ifndef atomic_read_acquire
  24. #define atomic_read_acquire(v) smp_load_acquire(&(v)->counter)
  25. #endif
  26. #ifndef atomic_set_release
  27. #define atomic_set_release(v, i) smp_store_release(&(v)->counter, (i))
  28. #endif
  29. /*
  30. * The idea here is to build acquire/release variants by adding explicit
  31. * barriers on top of the relaxed variant. In the case where the relaxed
  32. * variant is already fully ordered, no additional barriers are needed.
  33. */
  34. #define __atomic_op_acquire(op, args...) \
  35. ({ \
  36. typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
  37. smp_mb__after_atomic(); \
  38. __ret; \
  39. })
  40. #define __atomic_op_release(op, args...) \
  41. ({ \
  42. smp_mb__before_atomic(); \
  43. op##_relaxed(args); \
  44. })
  45. #define __atomic_op_fence(op, args...) \
  46. ({ \
  47. typeof(op##_relaxed(args)) __ret; \
  48. smp_mb__before_atomic(); \
  49. __ret = op##_relaxed(args); \
  50. smp_mb__after_atomic(); \
  51. __ret; \
  52. })
  53. /* atomic_add_return_relaxed */
  54. #ifndef atomic_add_return_relaxed
  55. #define atomic_add_return_relaxed atomic_add_return
  56. #define atomic_add_return_acquire atomic_add_return
  57. #define atomic_add_return_release atomic_add_return
  58. #else /* atomic_add_return_relaxed */
  59. #ifndef atomic_add_return_acquire
  60. #define atomic_add_return_acquire(...) \
  61. __atomic_op_acquire(atomic_add_return, __VA_ARGS__)
  62. #endif
  63. #ifndef atomic_add_return_release
  64. #define atomic_add_return_release(...) \
  65. __atomic_op_release(atomic_add_return, __VA_ARGS__)
  66. #endif
  67. #ifndef atomic_add_return
  68. #define atomic_add_return(...) \
  69. __atomic_op_fence(atomic_add_return, __VA_ARGS__)
  70. #endif
  71. #endif /* atomic_add_return_relaxed */
  72. /* atomic_sub_return_relaxed */
  73. #ifndef atomic_sub_return_relaxed
  74. #define atomic_sub_return_relaxed atomic_sub_return
  75. #define atomic_sub_return_acquire atomic_sub_return
  76. #define atomic_sub_return_release atomic_sub_return
  77. #else /* atomic_sub_return_relaxed */
  78. #ifndef atomic_sub_return_acquire
  79. #define atomic_sub_return_acquire(...) \
  80. __atomic_op_acquire(atomic_sub_return, __VA_ARGS__)
  81. #endif
  82. #ifndef atomic_sub_return_release
  83. #define atomic_sub_return_release(...) \
  84. __atomic_op_release(atomic_sub_return, __VA_ARGS__)
  85. #endif
  86. #ifndef atomic_sub_return
  87. #define atomic_sub_return(...) \
  88. __atomic_op_fence(atomic_sub_return, __VA_ARGS__)
  89. #endif
  90. #endif /* atomic_sub_return_relaxed */
  91. /* atomic_xchg_relaxed */
  92. #ifndef atomic_xchg_relaxed
  93. #define atomic_xchg_relaxed atomic_xchg
  94. #define atomic_xchg_acquire atomic_xchg
  95. #define atomic_xchg_release atomic_xchg
  96. #else /* atomic_xchg_relaxed */
  97. #ifndef atomic_xchg_acquire
  98. #define atomic_xchg_acquire(...) \
  99. __atomic_op_acquire(atomic_xchg, __VA_ARGS__)
  100. #endif
  101. #ifndef atomic_xchg_release
  102. #define atomic_xchg_release(...) \
  103. __atomic_op_release(atomic_xchg, __VA_ARGS__)
  104. #endif
  105. #ifndef atomic_xchg
  106. #define atomic_xchg(...) \
  107. __atomic_op_fence(atomic_xchg, __VA_ARGS__)
  108. #endif
  109. #endif /* atomic_xchg_relaxed */
  110. /* atomic_cmpxchg_relaxed */
  111. #ifndef atomic_cmpxchg_relaxed
  112. #define atomic_cmpxchg_relaxed atomic_cmpxchg
  113. #define atomic_cmpxchg_acquire atomic_cmpxchg
  114. #define atomic_cmpxchg_release atomic_cmpxchg
  115. #else /* atomic_cmpxchg_relaxed */
  116. #ifndef atomic_cmpxchg_acquire
  117. #define atomic_cmpxchg_acquire(...) \
  118. __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__)
  119. #endif
  120. #ifndef atomic_cmpxchg_release
  121. #define atomic_cmpxchg_release(...) \
  122. __atomic_op_release(atomic_cmpxchg, __VA_ARGS__)
  123. #endif
  124. #ifndef atomic_cmpxchg
  125. #define atomic_cmpxchg(...) \
  126. __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__)
  127. #endif
  128. #endif /* atomic_cmpxchg_relaxed */
  129. #ifndef atomic64_read_acquire
  130. #define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter)
  131. #endif
  132. #ifndef atomic64_set_release
  133. #define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i))
  134. #endif
  135. /* atomic64_add_return_relaxed */
  136. #ifndef atomic64_add_return_relaxed
  137. #define atomic64_add_return_relaxed atomic64_add_return
  138. #define atomic64_add_return_acquire atomic64_add_return
  139. #define atomic64_add_return_release atomic64_add_return
  140. #else /* atomic64_add_return_relaxed */
  141. #ifndef atomic64_add_return_acquire
  142. #define atomic64_add_return_acquire(...) \
  143. __atomic_op_acquire(atomic64_add_return, __VA_ARGS__)
  144. #endif
  145. #ifndef atomic64_add_return_release
  146. #define atomic64_add_return_release(...) \
  147. __atomic_op_release(atomic64_add_return, __VA_ARGS__)
  148. #endif
  149. #ifndef atomic64_add_return
  150. #define atomic64_add_return(...) \
  151. __atomic_op_fence(atomic64_add_return, __VA_ARGS__)
  152. #endif
  153. #endif /* atomic64_add_return_relaxed */
  154. /* atomic64_sub_return_relaxed */
  155. #ifndef atomic64_sub_return_relaxed
  156. #define atomic64_sub_return_relaxed atomic64_sub_return
  157. #define atomic64_sub_return_acquire atomic64_sub_return
  158. #define atomic64_sub_return_release atomic64_sub_return
  159. #else /* atomic64_sub_return_relaxed */
  160. #ifndef atomic64_sub_return_acquire
  161. #define atomic64_sub_return_acquire(...) \
  162. __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__)
  163. #endif
  164. #ifndef atomic64_sub_return_release
  165. #define atomic64_sub_return_release(...) \
  166. __atomic_op_release(atomic64_sub_return, __VA_ARGS__)
  167. #endif
  168. #ifndef atomic64_sub_return
  169. #define atomic64_sub_return(...) \
  170. __atomic_op_fence(atomic64_sub_return, __VA_ARGS__)
  171. #endif
  172. #endif /* atomic64_sub_return_relaxed */
  173. /* atomic64_xchg_relaxed */
  174. #ifndef atomic64_xchg_relaxed
  175. #define atomic64_xchg_relaxed atomic64_xchg
  176. #define atomic64_xchg_acquire atomic64_xchg
  177. #define atomic64_xchg_release atomic64_xchg
  178. #else /* atomic64_xchg_relaxed */
  179. #ifndef atomic64_xchg_acquire
  180. #define atomic64_xchg_acquire(...) \
  181. __atomic_op_acquire(atomic64_xchg, __VA_ARGS__)
  182. #endif
  183. #ifndef atomic64_xchg_release
  184. #define atomic64_xchg_release(...) \
  185. __atomic_op_release(atomic64_xchg, __VA_ARGS__)
  186. #endif
  187. #ifndef atomic64_xchg
  188. #define atomic64_xchg(...) \
  189. __atomic_op_fence(atomic64_xchg, __VA_ARGS__)
  190. #endif
  191. #endif /* atomic64_xchg_relaxed */
  192. /* atomic64_cmpxchg_relaxed */
  193. #ifndef atomic64_cmpxchg_relaxed
  194. #define atomic64_cmpxchg_relaxed atomic64_cmpxchg
  195. #define atomic64_cmpxchg_acquire atomic64_cmpxchg
  196. #define atomic64_cmpxchg_release atomic64_cmpxchg
  197. #else /* atomic64_cmpxchg_relaxed */
  198. #ifndef atomic64_cmpxchg_acquire
  199. #define atomic64_cmpxchg_acquire(...) \
  200. __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__)
  201. #endif
  202. #ifndef atomic64_cmpxchg_release
  203. #define atomic64_cmpxchg_release(...) \
  204. __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__)
  205. #endif
  206. #ifndef atomic64_cmpxchg
  207. #define atomic64_cmpxchg(...) \
  208. __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__)
  209. #endif
  210. #endif /* atomic64_cmpxchg_relaxed */
  211. /* cmpxchg_relaxed */
  212. #ifndef cmpxchg_relaxed
  213. #define cmpxchg_relaxed cmpxchg
  214. #define cmpxchg_acquire cmpxchg
  215. #define cmpxchg_release cmpxchg
  216. #else /* cmpxchg_relaxed */
  217. #ifndef cmpxchg_acquire
  218. #define cmpxchg_acquire(...) \
  219. __atomic_op_acquire(cmpxchg, __VA_ARGS__)
  220. #endif
  221. #ifndef cmpxchg_release
  222. #define cmpxchg_release(...) \
  223. __atomic_op_release(cmpxchg, __VA_ARGS__)
  224. #endif
  225. #ifndef cmpxchg
  226. #define cmpxchg(...) \
  227. __atomic_op_fence(cmpxchg, __VA_ARGS__)
  228. #endif
  229. #endif /* cmpxchg_relaxed */
  230. /* cmpxchg64_relaxed */
  231. #ifndef cmpxchg64_relaxed
  232. #define cmpxchg64_relaxed cmpxchg64
  233. #define cmpxchg64_acquire cmpxchg64
  234. #define cmpxchg64_release cmpxchg64
  235. #else /* cmpxchg64_relaxed */
  236. #ifndef cmpxchg64_acquire
  237. #define cmpxchg64_acquire(...) \
  238. __atomic_op_acquire(cmpxchg64, __VA_ARGS__)
  239. #endif
  240. #ifndef cmpxchg64_release
  241. #define cmpxchg64_release(...) \
  242. __atomic_op_release(cmpxchg64, __VA_ARGS__)
  243. #endif
  244. #ifndef cmpxchg64
  245. #define cmpxchg64(...) \
  246. __atomic_op_fence(cmpxchg64, __VA_ARGS__)
  247. #endif
  248. #endif /* cmpxchg64_relaxed */
  249. /* xchg_relaxed */
  250. #ifndef xchg_relaxed
  251. #define xchg_relaxed xchg
  252. #define xchg_acquire xchg
  253. #define xchg_release xchg
  254. #else /* xchg_relaxed */
  255. #ifndef xchg_acquire
  256. #define xchg_acquire(...) __atomic_op_acquire(xchg, __VA_ARGS__)
  257. #endif
  258. #ifndef xchg_release
  259. #define xchg_release(...) __atomic_op_release(xchg, __VA_ARGS__)
  260. #endif
  261. #ifndef xchg
  262. #define xchg(...) __atomic_op_fence(xchg, __VA_ARGS__)
  263. #endif
  264. #endif /* xchg_relaxed */
  265. /**
  266. * atomic_add_unless - add unless the number is already a given value
  267. * @v: pointer of type atomic_t
  268. * @a: the amount to add to v...
  269. * @u: ...unless v is equal to u.
  270. *
  271. * Atomically adds @a to @v, so long as @v was not already @u.
  272. * Returns non-zero if @v was not @u, and zero otherwise.
  273. */
  274. static inline int atomic_add_unless(atomic_t *v, int a, int u)
  275. {
  276. return __atomic_add_unless(v, a, u) != u;
  277. }
  278. /**
  279. * atomic_inc_not_zero - increment unless the number is zero
  280. * @v: pointer of type atomic_t
  281. *
  282. * Atomically increments @v by 1, so long as @v is non-zero.
  283. * Returns non-zero if @v was non-zero, and zero otherwise.
  284. */
  285. #ifndef atomic_inc_not_zero
  286. #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
  287. #endif
  288. #ifndef atomic_andnot
  289. static inline void atomic_andnot(int i, atomic_t *v)
  290. {
  291. atomic_and(~i, v);
  292. }
  293. #endif
  294. static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
  295. {
  296. atomic_andnot(mask, v);
  297. }
  298. static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
  299. {
  300. atomic_or(mask, v);
  301. }
  302. /**
  303. * atomic_inc_not_zero_hint - increment if not null
  304. * @v: pointer of type atomic_t
  305. * @hint: probable value of the atomic before the increment
  306. *
  307. * This version of atomic_inc_not_zero() gives a hint of probable
  308. * value of the atomic. This helps processor to not read the memory
  309. * before doing the atomic read/modify/write cycle, lowering
  310. * number of bus transactions on some arches.
  311. *
  312. * Returns: 0 if increment was not done, 1 otherwise.
  313. */
  314. #ifndef atomic_inc_not_zero_hint
  315. static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
  316. {
  317. int val, c = hint;
  318. /* sanity test, should be removed by compiler if hint is a constant */
  319. if (!hint)
  320. return atomic_inc_not_zero(v);
  321. do {
  322. val = atomic_cmpxchg(v, c, c + 1);
  323. if (val == c)
  324. return 1;
  325. c = val;
  326. } while (c);
  327. return 0;
  328. }
  329. #endif
  330. #ifndef atomic_inc_unless_negative
  331. static inline int atomic_inc_unless_negative(atomic_t *p)
  332. {
  333. int v, v1;
  334. for (v = 0; v >= 0; v = v1) {
  335. v1 = atomic_cmpxchg(p, v, v + 1);
  336. if (likely(v1 == v))
  337. return 1;
  338. }
  339. return 0;
  340. }
  341. #endif
  342. #ifndef atomic_dec_unless_positive
  343. static inline int atomic_dec_unless_positive(atomic_t *p)
  344. {
  345. int v, v1;
  346. for (v = 0; v <= 0; v = v1) {
  347. v1 = atomic_cmpxchg(p, v, v - 1);
  348. if (likely(v1 == v))
  349. return 1;
  350. }
  351. return 0;
  352. }
  353. #endif
  354. /*
  355. * atomic_dec_if_positive - decrement by 1 if old value positive
  356. * @v: pointer of type atomic_t
  357. *
  358. * The function returns the old value of *v minus 1, even if
  359. * the atomic variable, v, was not decremented.
  360. */
  361. #ifndef atomic_dec_if_positive
  362. static inline int atomic_dec_if_positive(atomic_t *v)
  363. {
  364. int c, old, dec;
  365. c = atomic_read(v);
  366. for (;;) {
  367. dec = c - 1;
  368. if (unlikely(dec < 0))
  369. break;
  370. old = atomic_cmpxchg((v), c, dec);
  371. if (likely(old == c))
  372. break;
  373. c = old;
  374. }
  375. return dec;
  376. }
  377. #endif
  378. #ifdef CONFIG_GENERIC_ATOMIC64
  379. #include <asm-generic/atomic64.h>
  380. #endif
  381. #ifndef atomic64_andnot
  382. static inline void atomic64_andnot(long long i, atomic64_t *v)
  383. {
  384. atomic64_and(~i, v);
  385. }
  386. #endif
  387. #include <asm-generic/atomic-long.h>
  388. #endif /* _LINUX_ATOMIC_H */