atomic.h 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415
  1. /*
  2. * Copyright IBM Corp. 1999, 2009
  3. * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
  4. * Denis Joseph Barrow,
  5. * Arnd Bergmann <arndb@de.ibm.com>,
  6. *
  7. * Atomic operations that C can't guarantee us.
  8. * Useful for resource counting etc.
  9. * s390 uses 'Compare And Swap' for atomicity in SMP environment.
  10. *
  11. */
  12. #ifndef __ARCH_S390_ATOMIC__
  13. #define __ARCH_S390_ATOMIC__
  14. #include <linux/compiler.h>
  15. #include <linux/types.h>
  16. #include <asm/barrier.h>
  17. #include <asm/cmpxchg.h>
  18. #define ATOMIC_INIT(i) { (i) }
  19. #define __ATOMIC_NO_BARRIER "\n"
  20. #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
  21. #define __ATOMIC_OR "lao"
  22. #define __ATOMIC_AND "lan"
  23. #define __ATOMIC_ADD "laa"
  24. #define __ATOMIC_BARRIER "bcr 14,0\n"
  25. #define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
  26. ({ \
  27. int old_val; \
  28. \
  29. typecheck(atomic_t *, ptr); \
  30. asm volatile( \
  31. __barrier \
  32. op_string " %0,%2,%1\n" \
  33. __barrier \
  34. : "=d" (old_val), "+Q" ((ptr)->counter) \
  35. : "d" (op_val) \
  36. : "cc", "memory"); \
  37. old_val; \
  38. })
  39. #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
  40. #define __ATOMIC_OR "or"
  41. #define __ATOMIC_AND "nr"
  42. #define __ATOMIC_ADD "ar"
  43. #define __ATOMIC_BARRIER "\n"
  44. #define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
  45. ({ \
  46. int old_val, new_val; \
  47. \
  48. typecheck(atomic_t *, ptr); \
  49. asm volatile( \
  50. " l %0,%2\n" \
  51. "0: lr %1,%0\n" \
  52. op_string " %1,%3\n" \
  53. " cs %0,%1,%2\n" \
  54. " jl 0b" \
  55. : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
  56. : "d" (op_val) \
  57. : "cc", "memory"); \
  58. old_val; \
  59. })
  60. #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
  61. static inline int atomic_read(const atomic_t *v)
  62. {
  63. int c;
  64. asm volatile(
  65. " l %0,%1\n"
  66. : "=d" (c) : "Q" (v->counter));
  67. return c;
  68. }
  69. static inline void atomic_set(atomic_t *v, int i)
  70. {
  71. asm volatile(
  72. " st %1,%0\n"
  73. : "=Q" (v->counter) : "d" (i));
  74. }
  75. static inline int atomic_add_return(int i, atomic_t *v)
  76. {
  77. return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER) + i;
  78. }
  79. static inline void atomic_add(int i, atomic_t *v)
  80. {
  81. #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
  82. if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
  83. asm volatile(
  84. "asi %0,%1\n"
  85. : "+Q" (v->counter)
  86. : "i" (i)
  87. : "cc", "memory");
  88. return;
  89. }
  90. #endif
  91. __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_NO_BARRIER);
  92. }
  93. #define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
  94. #define atomic_inc(_v) atomic_add(1, _v)
  95. #define atomic_inc_return(_v) atomic_add_return(1, _v)
  96. #define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
  97. #define atomic_sub(_i, _v) atomic_add(-(int)(_i), _v)
  98. #define atomic_sub_return(_i, _v) atomic_add_return(-(int)(_i), _v)
  99. #define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
  100. #define atomic_dec(_v) atomic_sub(1, _v)
  101. #define atomic_dec_return(_v) atomic_sub_return(1, _v)
  102. #define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
  103. static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
  104. {
  105. __ATOMIC_LOOP(v, ~mask, __ATOMIC_AND, __ATOMIC_NO_BARRIER);
  106. }
  107. static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
  108. {
  109. __ATOMIC_LOOP(v, mask, __ATOMIC_OR, __ATOMIC_NO_BARRIER);
  110. }
  111. #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
  112. static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
  113. {
  114. asm volatile(
  115. " cs %0,%2,%1"
  116. : "+d" (old), "+Q" (v->counter)
  117. : "d" (new)
  118. : "cc", "memory");
  119. return old;
  120. }
  121. static inline int __atomic_add_unless(atomic_t *v, int a, int u)
  122. {
  123. int c, old;
  124. c = atomic_read(v);
  125. for (;;) {
  126. if (unlikely(c == u))
  127. break;
  128. old = atomic_cmpxchg(v, c, c + a);
  129. if (likely(old == c))
  130. break;
  131. c = old;
  132. }
  133. return c;
  134. }
  135. #undef __ATOMIC_LOOP
  136. #define ATOMIC64_INIT(i) { (i) }
  137. #ifdef CONFIG_64BIT
  138. #define __ATOMIC64_NO_BARRIER "\n"
  139. #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
  140. #define __ATOMIC64_OR "laog"
  141. #define __ATOMIC64_AND "lang"
  142. #define __ATOMIC64_ADD "laag"
  143. #define __ATOMIC64_BARRIER "bcr 14,0\n"
  144. #define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
  145. ({ \
  146. long long old_val; \
  147. \
  148. typecheck(atomic64_t *, ptr); \
  149. asm volatile( \
  150. __barrier \
  151. op_string " %0,%2,%1\n" \
  152. __barrier \
  153. : "=d" (old_val), "+Q" ((ptr)->counter) \
  154. : "d" (op_val) \
  155. : "cc", "memory"); \
  156. old_val; \
  157. })
  158. #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
  159. #define __ATOMIC64_OR "ogr"
  160. #define __ATOMIC64_AND "ngr"
  161. #define __ATOMIC64_ADD "agr"
  162. #define __ATOMIC64_BARRIER "\n"
  163. #define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
  164. ({ \
  165. long long old_val, new_val; \
  166. \
  167. typecheck(atomic64_t *, ptr); \
  168. asm volatile( \
  169. " lg %0,%2\n" \
  170. "0: lgr %1,%0\n" \
  171. op_string " %1,%3\n" \
  172. " csg %0,%1,%2\n" \
  173. " jl 0b" \
  174. : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
  175. : "d" (op_val) \
  176. : "cc", "memory"); \
  177. old_val; \
  178. })
  179. #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
  180. static inline long long atomic64_read(const atomic64_t *v)
  181. {
  182. long long c;
  183. asm volatile(
  184. " lg %0,%1\n"
  185. : "=d" (c) : "Q" (v->counter));
  186. return c;
  187. }
  188. static inline void atomic64_set(atomic64_t *v, long long i)
  189. {
  190. asm volatile(
  191. " stg %1,%0\n"
  192. : "=Q" (v->counter) : "d" (i));
  193. }
  194. static inline long long atomic64_add_return(long long i, atomic64_t *v)
  195. {
  196. return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER) + i;
  197. }
  198. static inline void atomic64_add(long long i, atomic64_t *v)
  199. {
  200. #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
  201. if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
  202. asm volatile(
  203. "agsi %0,%1\n"
  204. : "+Q" (v->counter)
  205. : "i" (i)
  206. : "cc", "memory");
  207. return;
  208. }
  209. #endif
  210. __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER);
  211. }
  212. static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
  213. {
  214. __ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND, __ATOMIC64_NO_BARRIER);
  215. }
  216. static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
  217. {
  218. __ATOMIC64_LOOP(v, mask, __ATOMIC64_OR, __ATOMIC64_NO_BARRIER);
  219. }
  220. #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
  221. static inline long long atomic64_cmpxchg(atomic64_t *v,
  222. long long old, long long new)
  223. {
  224. asm volatile(
  225. " csg %0,%2,%1"
  226. : "+d" (old), "+Q" (v->counter)
  227. : "d" (new)
  228. : "cc", "memory");
  229. return old;
  230. }
  231. #undef __ATOMIC64_LOOP
  232. #else /* CONFIG_64BIT */
  233. typedef struct {
  234. long long counter;
  235. } atomic64_t;
  236. static inline long long atomic64_read(const atomic64_t *v)
  237. {
  238. register_pair rp;
  239. asm volatile(
  240. " lm %0,%N0,%1"
  241. : "=&d" (rp) : "Q" (v->counter) );
  242. return rp.pair;
  243. }
  244. static inline void atomic64_set(atomic64_t *v, long long i)
  245. {
  246. register_pair rp = {.pair = i};
  247. asm volatile(
  248. " stm %1,%N1,%0"
  249. : "=Q" (v->counter) : "d" (rp) );
  250. }
  251. static inline long long atomic64_xchg(atomic64_t *v, long long new)
  252. {
  253. register_pair rp_new = {.pair = new};
  254. register_pair rp_old;
  255. asm volatile(
  256. " lm %0,%N0,%1\n"
  257. "0: cds %0,%2,%1\n"
  258. " jl 0b\n"
  259. : "=&d" (rp_old), "+Q" (v->counter)
  260. : "d" (rp_new)
  261. : "cc");
  262. return rp_old.pair;
  263. }
  264. static inline long long atomic64_cmpxchg(atomic64_t *v,
  265. long long old, long long new)
  266. {
  267. register_pair rp_old = {.pair = old};
  268. register_pair rp_new = {.pair = new};
  269. asm volatile(
  270. " cds %0,%2,%1"
  271. : "+&d" (rp_old), "+Q" (v->counter)
  272. : "d" (rp_new)
  273. : "cc");
  274. return rp_old.pair;
  275. }
  276. static inline long long atomic64_add_return(long long i, atomic64_t *v)
  277. {
  278. long long old, new;
  279. do {
  280. old = atomic64_read(v);
  281. new = old + i;
  282. } while (atomic64_cmpxchg(v, old, new) != old);
  283. return new;
  284. }
  285. static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v)
  286. {
  287. long long old, new;
  288. do {
  289. old = atomic64_read(v);
  290. new = old | mask;
  291. } while (atomic64_cmpxchg(v, old, new) != old);
  292. }
  293. static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)
  294. {
  295. long long old, new;
  296. do {
  297. old = atomic64_read(v);
  298. new = old & mask;
  299. } while (atomic64_cmpxchg(v, old, new) != old);
  300. }
  301. static inline void atomic64_add(long long i, atomic64_t *v)
  302. {
  303. atomic64_add_return(i, v);
  304. }
  305. #endif /* CONFIG_64BIT */
  306. static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
  307. {
  308. long long c, old;
  309. c = atomic64_read(v);
  310. for (;;) {
  311. if (unlikely(c == u))
  312. break;
  313. old = atomic64_cmpxchg(v, c, c + i);
  314. if (likely(old == c))
  315. break;
  316. c = old;
  317. }
  318. return c != u;
  319. }
  320. static inline long long atomic64_dec_if_positive(atomic64_t *v)
  321. {
  322. long long c, old, dec;
  323. c = atomic64_read(v);
  324. for (;;) {
  325. dec = c - 1;
  326. if (unlikely(dec < 0))
  327. break;
  328. old = atomic64_cmpxchg((v), c, dec);
  329. if (likely(old == c))
  330. break;
  331. c = old;
  332. }
  333. return dec;
  334. }
  335. #define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
  336. #define atomic64_inc(_v) atomic64_add(1, _v)
  337. #define atomic64_inc_return(_v) atomic64_add_return(1, _v)
  338. #define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
  339. #define atomic64_sub_return(_i, _v) atomic64_add_return(-(long long)(_i), _v)
  340. #define atomic64_sub(_i, _v) atomic64_add(-(long long)(_i), _v)
  341. #define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
  342. #define atomic64_dec(_v) atomic64_sub(1, _v)
  343. #define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
  344. #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
  345. #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
  346. #endif /* __ARCH_S390_ATOMIC__ */