atomic.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616
  1. /* Atomic operations usable in machine independent code */
  2. #ifndef _LINUX_ATOMIC_H
  3. #define _LINUX_ATOMIC_H
  4. #include <asm/atomic.h>
  5. #include <asm/barrier.h>
  6. /*
  7. * Relaxed variants of xchg, cmpxchg and some atomic operations.
  8. *
  9. * We support four variants:
  10. *
  11. * - Fully ordered: The default implementation, no suffix required.
  12. * - Acquire: Provides ACQUIRE semantics, _acquire suffix.
  13. * - Release: Provides RELEASE semantics, _release suffix.
  14. * - Relaxed: No ordering guarantees, _relaxed suffix.
  15. *
  16. * For compound atomics performing both a load and a store, ACQUIRE
  17. * semantics apply only to the load and RELEASE semantics only to the
  18. * store portion of the operation. Note that a failed cmpxchg_acquire
  19. * does -not- imply any memory ordering constraints.
  20. *
  21. * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
  22. */
  23. #ifndef atomic_read_acquire
  24. #define atomic_read_acquire(v) smp_load_acquire(&(v)->counter)
  25. #endif
  26. #ifndef atomic_set_release
  27. #define atomic_set_release(v, i) smp_store_release(&(v)->counter, (i))
  28. #endif
  29. /*
  30. * The idea here is to build acquire/release variants by adding explicit
  31. * barriers on top of the relaxed variant. In the case where the relaxed
  32. * variant is already fully ordered, no additional barriers are needed.
  33. *
  34. * Besides, if an arch has a special barrier for acquire/release, it could
  35. * implement its own __atomic_op_* and use the same framework for building
  36. * variants
  37. */
  38. #ifndef __atomic_op_acquire
  39. #define __atomic_op_acquire(op, args...) \
  40. ({ \
  41. typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
  42. smp_mb__after_atomic(); \
  43. __ret; \
  44. })
  45. #endif
  46. #ifndef __atomic_op_release
  47. #define __atomic_op_release(op, args...) \
  48. ({ \
  49. smp_mb__before_atomic(); \
  50. op##_relaxed(args); \
  51. })
  52. #endif
  53. #ifndef __atomic_op_fence
  54. #define __atomic_op_fence(op, args...) \
  55. ({ \
  56. typeof(op##_relaxed(args)) __ret; \
  57. smp_mb__before_atomic(); \
  58. __ret = op##_relaxed(args); \
  59. smp_mb__after_atomic(); \
  60. __ret; \
  61. })
  62. #endif
  63. /* atomic_add_return_relaxed */
  64. #ifndef atomic_add_return_relaxed
  65. #define atomic_add_return_relaxed atomic_add_return
  66. #define atomic_add_return_acquire atomic_add_return
  67. #define atomic_add_return_release atomic_add_return
  68. #else /* atomic_add_return_relaxed */
  69. #ifndef atomic_add_return_acquire
  70. #define atomic_add_return_acquire(...) \
  71. __atomic_op_acquire(atomic_add_return, __VA_ARGS__)
  72. #endif
  73. #ifndef atomic_add_return_release
  74. #define atomic_add_return_release(...) \
  75. __atomic_op_release(atomic_add_return, __VA_ARGS__)
  76. #endif
  77. #ifndef atomic_add_return
  78. #define atomic_add_return(...) \
  79. __atomic_op_fence(atomic_add_return, __VA_ARGS__)
  80. #endif
  81. #endif /* atomic_add_return_relaxed */
  82. /* atomic_inc_return_relaxed */
  83. #ifndef atomic_inc_return_relaxed
  84. #define atomic_inc_return_relaxed atomic_inc_return
  85. #define atomic_inc_return_acquire atomic_inc_return
  86. #define atomic_inc_return_release atomic_inc_return
  87. #else /* atomic_inc_return_relaxed */
  88. #ifndef atomic_inc_return_acquire
  89. #define atomic_inc_return_acquire(...) \
  90. __atomic_op_acquire(atomic_inc_return, __VA_ARGS__)
  91. #endif
  92. #ifndef atomic_inc_return_release
  93. #define atomic_inc_return_release(...) \
  94. __atomic_op_release(atomic_inc_return, __VA_ARGS__)
  95. #endif
  96. #ifndef atomic_inc_return
  97. #define atomic_inc_return(...) \
  98. __atomic_op_fence(atomic_inc_return, __VA_ARGS__)
  99. #endif
  100. #endif /* atomic_inc_return_relaxed */
  101. /* atomic_sub_return_relaxed */
  102. #ifndef atomic_sub_return_relaxed
  103. #define atomic_sub_return_relaxed atomic_sub_return
  104. #define atomic_sub_return_acquire atomic_sub_return
  105. #define atomic_sub_return_release atomic_sub_return
  106. #else /* atomic_sub_return_relaxed */
  107. #ifndef atomic_sub_return_acquire
  108. #define atomic_sub_return_acquire(...) \
  109. __atomic_op_acquire(atomic_sub_return, __VA_ARGS__)
  110. #endif
  111. #ifndef atomic_sub_return_release
  112. #define atomic_sub_return_release(...) \
  113. __atomic_op_release(atomic_sub_return, __VA_ARGS__)
  114. #endif
  115. #ifndef atomic_sub_return
  116. #define atomic_sub_return(...) \
  117. __atomic_op_fence(atomic_sub_return, __VA_ARGS__)
  118. #endif
  119. #endif /* atomic_sub_return_relaxed */
  120. /* atomic_dec_return_relaxed */
  121. #ifndef atomic_dec_return_relaxed
  122. #define atomic_dec_return_relaxed atomic_dec_return
  123. #define atomic_dec_return_acquire atomic_dec_return
  124. #define atomic_dec_return_release atomic_dec_return
  125. #else /* atomic_dec_return_relaxed */
  126. #ifndef atomic_dec_return_acquire
  127. #define atomic_dec_return_acquire(...) \
  128. __atomic_op_acquire(atomic_dec_return, __VA_ARGS__)
  129. #endif
  130. #ifndef atomic_dec_return_release
  131. #define atomic_dec_return_release(...) \
  132. __atomic_op_release(atomic_dec_return, __VA_ARGS__)
  133. #endif
  134. #ifndef atomic_dec_return
  135. #define atomic_dec_return(...) \
  136. __atomic_op_fence(atomic_dec_return, __VA_ARGS__)
  137. #endif
  138. #endif /* atomic_dec_return_relaxed */
  139. /* atomic_xchg_relaxed */
  140. #ifndef atomic_xchg_relaxed
  141. #define atomic_xchg_relaxed atomic_xchg
  142. #define atomic_xchg_acquire atomic_xchg
  143. #define atomic_xchg_release atomic_xchg
  144. #else /* atomic_xchg_relaxed */
  145. #ifndef atomic_xchg_acquire
  146. #define atomic_xchg_acquire(...) \
  147. __atomic_op_acquire(atomic_xchg, __VA_ARGS__)
  148. #endif
  149. #ifndef atomic_xchg_release
  150. #define atomic_xchg_release(...) \
  151. __atomic_op_release(atomic_xchg, __VA_ARGS__)
  152. #endif
  153. #ifndef atomic_xchg
  154. #define atomic_xchg(...) \
  155. __atomic_op_fence(atomic_xchg, __VA_ARGS__)
  156. #endif
  157. #endif /* atomic_xchg_relaxed */
  158. /* atomic_cmpxchg_relaxed */
  159. #ifndef atomic_cmpxchg_relaxed
  160. #define atomic_cmpxchg_relaxed atomic_cmpxchg
  161. #define atomic_cmpxchg_acquire atomic_cmpxchg
  162. #define atomic_cmpxchg_release atomic_cmpxchg
  163. #else /* atomic_cmpxchg_relaxed */
  164. #ifndef atomic_cmpxchg_acquire
  165. #define atomic_cmpxchg_acquire(...) \
  166. __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__)
  167. #endif
  168. #ifndef atomic_cmpxchg_release
  169. #define atomic_cmpxchg_release(...) \
  170. __atomic_op_release(atomic_cmpxchg, __VA_ARGS__)
  171. #endif
  172. #ifndef atomic_cmpxchg
  173. #define atomic_cmpxchg(...) \
  174. __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__)
  175. #endif
  176. #endif /* atomic_cmpxchg_relaxed */
  177. #ifndef atomic64_read_acquire
  178. #define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter)
  179. #endif
  180. #ifndef atomic64_set_release
  181. #define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i))
  182. #endif
  183. /* atomic64_add_return_relaxed */
  184. #ifndef atomic64_add_return_relaxed
  185. #define atomic64_add_return_relaxed atomic64_add_return
  186. #define atomic64_add_return_acquire atomic64_add_return
  187. #define atomic64_add_return_release atomic64_add_return
  188. #else /* atomic64_add_return_relaxed */
  189. #ifndef atomic64_add_return_acquire
  190. #define atomic64_add_return_acquire(...) \
  191. __atomic_op_acquire(atomic64_add_return, __VA_ARGS__)
  192. #endif
  193. #ifndef atomic64_add_return_release
  194. #define atomic64_add_return_release(...) \
  195. __atomic_op_release(atomic64_add_return, __VA_ARGS__)
  196. #endif
  197. #ifndef atomic64_add_return
  198. #define atomic64_add_return(...) \
  199. __atomic_op_fence(atomic64_add_return, __VA_ARGS__)
  200. #endif
  201. #endif /* atomic64_add_return_relaxed */
  202. /* atomic64_inc_return_relaxed */
  203. #ifndef atomic64_inc_return_relaxed
  204. #define atomic64_inc_return_relaxed atomic64_inc_return
  205. #define atomic64_inc_return_acquire atomic64_inc_return
  206. #define atomic64_inc_return_release atomic64_inc_return
  207. #else /* atomic64_inc_return_relaxed */
  208. #ifndef atomic64_inc_return_acquire
  209. #define atomic64_inc_return_acquire(...) \
  210. __atomic_op_acquire(atomic64_inc_return, __VA_ARGS__)
  211. #endif
  212. #ifndef atomic64_inc_return_release
  213. #define atomic64_inc_return_release(...) \
  214. __atomic_op_release(atomic64_inc_return, __VA_ARGS__)
  215. #endif
  216. #ifndef atomic64_inc_return
  217. #define atomic64_inc_return(...) \
  218. __atomic_op_fence(atomic64_inc_return, __VA_ARGS__)
  219. #endif
  220. #endif /* atomic64_inc_return_relaxed */
  221. /* atomic64_sub_return_relaxed */
  222. #ifndef atomic64_sub_return_relaxed
  223. #define atomic64_sub_return_relaxed atomic64_sub_return
  224. #define atomic64_sub_return_acquire atomic64_sub_return
  225. #define atomic64_sub_return_release atomic64_sub_return
  226. #else /* atomic64_sub_return_relaxed */
  227. #ifndef atomic64_sub_return_acquire
  228. #define atomic64_sub_return_acquire(...) \
  229. __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__)
  230. #endif
  231. #ifndef atomic64_sub_return_release
  232. #define atomic64_sub_return_release(...) \
  233. __atomic_op_release(atomic64_sub_return, __VA_ARGS__)
  234. #endif
  235. #ifndef atomic64_sub_return
  236. #define atomic64_sub_return(...) \
  237. __atomic_op_fence(atomic64_sub_return, __VA_ARGS__)
  238. #endif
  239. #endif /* atomic64_sub_return_relaxed */
  240. /* atomic64_dec_return_relaxed */
  241. #ifndef atomic64_dec_return_relaxed
  242. #define atomic64_dec_return_relaxed atomic64_dec_return
  243. #define atomic64_dec_return_acquire atomic64_dec_return
  244. #define atomic64_dec_return_release atomic64_dec_return
  245. #else /* atomic64_dec_return_relaxed */
  246. #ifndef atomic64_dec_return_acquire
  247. #define atomic64_dec_return_acquire(...) \
  248. __atomic_op_acquire(atomic64_dec_return, __VA_ARGS__)
  249. #endif
  250. #ifndef atomic64_dec_return_release
  251. #define atomic64_dec_return_release(...) \
  252. __atomic_op_release(atomic64_dec_return, __VA_ARGS__)
  253. #endif
  254. #ifndef atomic64_dec_return
  255. #define atomic64_dec_return(...) \
  256. __atomic_op_fence(atomic64_dec_return, __VA_ARGS__)
  257. #endif
  258. #endif /* atomic64_dec_return_relaxed */
  259. /* atomic64_xchg_relaxed */
  260. #ifndef atomic64_xchg_relaxed
  261. #define atomic64_xchg_relaxed atomic64_xchg
  262. #define atomic64_xchg_acquire atomic64_xchg
  263. #define atomic64_xchg_release atomic64_xchg
  264. #else /* atomic64_xchg_relaxed */
  265. #ifndef atomic64_xchg_acquire
  266. #define atomic64_xchg_acquire(...) \
  267. __atomic_op_acquire(atomic64_xchg, __VA_ARGS__)
  268. #endif
  269. #ifndef atomic64_xchg_release
  270. #define atomic64_xchg_release(...) \
  271. __atomic_op_release(atomic64_xchg, __VA_ARGS__)
  272. #endif
  273. #ifndef atomic64_xchg
  274. #define atomic64_xchg(...) \
  275. __atomic_op_fence(atomic64_xchg, __VA_ARGS__)
  276. #endif
  277. #endif /* atomic64_xchg_relaxed */
  278. /* atomic64_cmpxchg_relaxed */
  279. #ifndef atomic64_cmpxchg_relaxed
  280. #define atomic64_cmpxchg_relaxed atomic64_cmpxchg
  281. #define atomic64_cmpxchg_acquire atomic64_cmpxchg
  282. #define atomic64_cmpxchg_release atomic64_cmpxchg
  283. #else /* atomic64_cmpxchg_relaxed */
  284. #ifndef atomic64_cmpxchg_acquire
  285. #define atomic64_cmpxchg_acquire(...) \
  286. __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__)
  287. #endif
  288. #ifndef atomic64_cmpxchg_release
  289. #define atomic64_cmpxchg_release(...) \
  290. __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__)
  291. #endif
  292. #ifndef atomic64_cmpxchg
  293. #define atomic64_cmpxchg(...) \
  294. __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__)
  295. #endif
  296. #endif /* atomic64_cmpxchg_relaxed */
  297. /* cmpxchg_relaxed */
  298. #ifndef cmpxchg_relaxed
  299. #define cmpxchg_relaxed cmpxchg
  300. #define cmpxchg_acquire cmpxchg
  301. #define cmpxchg_release cmpxchg
  302. #else /* cmpxchg_relaxed */
  303. #ifndef cmpxchg_acquire
  304. #define cmpxchg_acquire(...) \
  305. __atomic_op_acquire(cmpxchg, __VA_ARGS__)
  306. #endif
  307. #ifndef cmpxchg_release
  308. #define cmpxchg_release(...) \
  309. __atomic_op_release(cmpxchg, __VA_ARGS__)
  310. #endif
  311. #ifndef cmpxchg
  312. #define cmpxchg(...) \
  313. __atomic_op_fence(cmpxchg, __VA_ARGS__)
  314. #endif
  315. #endif /* cmpxchg_relaxed */
  316. /* cmpxchg64_relaxed */
  317. #ifndef cmpxchg64_relaxed
  318. #define cmpxchg64_relaxed cmpxchg64
  319. #define cmpxchg64_acquire cmpxchg64
  320. #define cmpxchg64_release cmpxchg64
  321. #else /* cmpxchg64_relaxed */
  322. #ifndef cmpxchg64_acquire
  323. #define cmpxchg64_acquire(...) \
  324. __atomic_op_acquire(cmpxchg64, __VA_ARGS__)
  325. #endif
  326. #ifndef cmpxchg64_release
  327. #define cmpxchg64_release(...) \
  328. __atomic_op_release(cmpxchg64, __VA_ARGS__)
  329. #endif
  330. #ifndef cmpxchg64
  331. #define cmpxchg64(...) \
  332. __atomic_op_fence(cmpxchg64, __VA_ARGS__)
  333. #endif
  334. #endif /* cmpxchg64_relaxed */
  335. /* xchg_relaxed */
  336. #ifndef xchg_relaxed
  337. #define xchg_relaxed xchg
  338. #define xchg_acquire xchg
  339. #define xchg_release xchg
  340. #else /* xchg_relaxed */
  341. #ifndef xchg_acquire
  342. #define xchg_acquire(...) __atomic_op_acquire(xchg, __VA_ARGS__)
  343. #endif
  344. #ifndef xchg_release
  345. #define xchg_release(...) __atomic_op_release(xchg, __VA_ARGS__)
  346. #endif
  347. #ifndef xchg
  348. #define xchg(...) __atomic_op_fence(xchg, __VA_ARGS__)
  349. #endif
  350. #endif /* xchg_relaxed */
  351. /**
  352. * atomic_add_unless - add unless the number is already a given value
  353. * @v: pointer of type atomic_t
  354. * @a: the amount to add to v...
  355. * @u: ...unless v is equal to u.
  356. *
  357. * Atomically adds @a to @v, so long as @v was not already @u.
  358. * Returns non-zero if @v was not @u, and zero otherwise.
  359. */
  360. static inline int atomic_add_unless(atomic_t *v, int a, int u)
  361. {
  362. return __atomic_add_unless(v, a, u) != u;
  363. }
  364. /**
  365. * atomic_inc_not_zero - increment unless the number is zero
  366. * @v: pointer of type atomic_t
  367. *
  368. * Atomically increments @v by 1, so long as @v is non-zero.
  369. * Returns non-zero if @v was non-zero, and zero otherwise.
  370. */
  371. #ifndef atomic_inc_not_zero
  372. #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
  373. #endif
  374. #ifndef atomic_andnot
  375. static inline void atomic_andnot(int i, atomic_t *v)
  376. {
  377. atomic_and(~i, v);
  378. }
  379. #endif
  380. static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
  381. {
  382. atomic_andnot(mask, v);
  383. }
  384. static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
  385. {
  386. atomic_or(mask, v);
  387. }
  388. /**
  389. * atomic_inc_not_zero_hint - increment if not null
  390. * @v: pointer of type atomic_t
  391. * @hint: probable value of the atomic before the increment
  392. *
  393. * This version of atomic_inc_not_zero() gives a hint of probable
  394. * value of the atomic. This helps processor to not read the memory
  395. * before doing the atomic read/modify/write cycle, lowering
  396. * number of bus transactions on some arches.
  397. *
  398. * Returns: 0 if increment was not done, 1 otherwise.
  399. */
  400. #ifndef atomic_inc_not_zero_hint
  401. static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
  402. {
  403. int val, c = hint;
  404. /* sanity test, should be removed by compiler if hint is a constant */
  405. if (!hint)
  406. return atomic_inc_not_zero(v);
  407. do {
  408. val = atomic_cmpxchg(v, c, c + 1);
  409. if (val == c)
  410. return 1;
  411. c = val;
  412. } while (c);
  413. return 0;
  414. }
  415. #endif
  416. #ifndef atomic_inc_unless_negative
  417. static inline int atomic_inc_unless_negative(atomic_t *p)
  418. {
  419. int v, v1;
  420. for (v = 0; v >= 0; v = v1) {
  421. v1 = atomic_cmpxchg(p, v, v + 1);
  422. if (likely(v1 == v))
  423. return 1;
  424. }
  425. return 0;
  426. }
  427. #endif
  428. #ifndef atomic_dec_unless_positive
  429. static inline int atomic_dec_unless_positive(atomic_t *p)
  430. {
  431. int v, v1;
  432. for (v = 0; v <= 0; v = v1) {
  433. v1 = atomic_cmpxchg(p, v, v - 1);
  434. if (likely(v1 == v))
  435. return 1;
  436. }
  437. return 0;
  438. }
  439. #endif
  440. /*
  441. * atomic_dec_if_positive - decrement by 1 if old value positive
  442. * @v: pointer of type atomic_t
  443. *
  444. * The function returns the old value of *v minus 1, even if
  445. * the atomic variable, v, was not decremented.
  446. */
  447. #ifndef atomic_dec_if_positive
  448. static inline int atomic_dec_if_positive(atomic_t *v)
  449. {
  450. int c, old, dec;
  451. c = atomic_read(v);
  452. for (;;) {
  453. dec = c - 1;
  454. if (unlikely(dec < 0))
  455. break;
  456. old = atomic_cmpxchg((v), c, dec);
  457. if (likely(old == c))
  458. break;
  459. c = old;
  460. }
  461. return dec;
  462. }
  463. #endif
  464. /**
  465. * atomic_fetch_or - perform *p |= mask and return old value of *p
  466. * @p: pointer to atomic_t
  467. * @mask: mask to OR on the atomic_t
  468. */
  469. #ifndef atomic_fetch_or
  470. static inline int atomic_fetch_or(atomic_t *p, int mask)
  471. {
  472. int old, val = atomic_read(p);
  473. for (;;) {
  474. old = atomic_cmpxchg(p, val, val | mask);
  475. if (old == val)
  476. break;
  477. val = old;
  478. }
  479. return old;
  480. }
  481. #endif
  482. /**
  483. * fetch_or - perform *ptr |= mask and return old value of *ptr
  484. * @ptr: pointer to value
  485. * @mask: mask to OR on the value
  486. *
  487. * cmpxchg based fetch_or, macro so it works for different integer types
  488. */
  489. #ifndef fetch_or
  490. #define fetch_or(ptr, mask) \
  491. ({ typeof(*(ptr)) __old, __val = *(ptr); \
  492. for (;;) { \
  493. __old = cmpxchg((ptr), __val, __val | (mask)); \
  494. if (__old == __val) \
  495. break; \
  496. __val = __old; \
  497. } \
  498. __old; \
  499. })
  500. #endif
  501. #ifdef CONFIG_GENERIC_ATOMIC64
  502. #include <asm-generic/atomic64.h>
  503. #endif
  504. #ifndef atomic64_andnot
  505. static inline void atomic64_andnot(long long i, atomic64_t *v)
  506. {
  507. atomic64_and(~i, v);
  508. }
  509. #endif
  510. #include <asm-generic/atomic-long.h>
  511. #endif /* _LINUX_ATOMIC_H */