atomic.h 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293
  1. /*
  2. * Based on arch/arm/include/asm/atomic.h
  3. *
  4. * Copyright (C) 1996 Russell King.
  5. * Copyright (C) 2002 Deep Blue Solutions Ltd.
  6. * Copyright (C) 2012 ARM Ltd.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  19. */
  20. #ifndef __ASM_ATOMIC_H
  21. #define __ASM_ATOMIC_H
  22. #include <linux/compiler.h>
  23. #include <linux/types.h>
  24. #include <asm/barrier.h>
  25. #include <asm/cmpxchg.h>
  26. #define ATOMIC_INIT(i) { (i) }
  27. #ifdef __KERNEL__
  28. /*
  29. * On ARM, ordinary assignment (str instruction) doesn't clear the local
  30. * strex/ldrex monitor on some implementations. The reason we can use it for
  31. * atomic_set() is the clrex or dummy strex done on every exception return.
  32. */
  33. #define atomic_read(v) (*(volatile int *)&(v)->counter)
  34. #define atomic_set(v,i) (((v)->counter) = (i))
  35. /*
  36. * AArch64 UP and SMP safe atomic ops. We use load exclusive and
  37. * store exclusive to ensure that these are atomic. We may loop
  38. * to ensure that the update happens.
  39. */
  40. static inline void atomic_add(int i, atomic_t *v)
  41. {
  42. unsigned long tmp;
  43. int result;
  44. asm volatile("// atomic_add\n"
  45. "1: ldxr %w0, %2\n"
  46. " add %w0, %w0, %w3\n"
  47. " stxr %w1, %w0, %2\n"
  48. " cbnz %w1, 1b"
  49. : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
  50. : "Ir" (i));
  51. }
  52. static inline int atomic_add_return(int i, atomic_t *v)
  53. {
  54. unsigned long tmp;
  55. int result;
  56. asm volatile("// atomic_add_return\n"
  57. "1: ldxr %w0, %2\n"
  58. " add %w0, %w0, %w3\n"
  59. " stlxr %w1, %w0, %2\n"
  60. " cbnz %w1, 1b"
  61. : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
  62. : "Ir" (i)
  63. : "memory");
  64. smp_mb();
  65. return result;
  66. }
  67. static inline void atomic_sub(int i, atomic_t *v)
  68. {
  69. unsigned long tmp;
  70. int result;
  71. asm volatile("// atomic_sub\n"
  72. "1: ldxr %w0, %2\n"
  73. " sub %w0, %w0, %w3\n"
  74. " stxr %w1, %w0, %2\n"
  75. " cbnz %w1, 1b"
  76. : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
  77. : "Ir" (i));
  78. }
  79. static inline int atomic_sub_return(int i, atomic_t *v)
  80. {
  81. unsigned long tmp;
  82. int result;
  83. asm volatile("// atomic_sub_return\n"
  84. "1: ldxr %w0, %2\n"
  85. " sub %w0, %w0, %w3\n"
  86. " stlxr %w1, %w0, %2\n"
  87. " cbnz %w1, 1b"
  88. : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
  89. : "Ir" (i)
  90. : "memory");
  91. smp_mb();
  92. return result;
  93. }
  94. static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
  95. {
  96. unsigned long tmp;
  97. int oldval;
  98. smp_mb();
  99. asm volatile("// atomic_cmpxchg\n"
  100. "1: ldxr %w1, %2\n"
  101. " cmp %w1, %w3\n"
  102. " b.ne 2f\n"
  103. " stxr %w0, %w4, %2\n"
  104. " cbnz %w0, 1b\n"
  105. "2:"
  106. : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
  107. : "Ir" (old), "r" (new)
  108. : "cc");
  109. smp_mb();
  110. return oldval;
  111. }
  112. #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
  113. static inline int __atomic_add_unless(atomic_t *v, int a, int u)
  114. {
  115. int c, old;
  116. c = atomic_read(v);
  117. while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
  118. c = old;
  119. return c;
  120. }
  121. #define atomic_inc(v) atomic_add(1, v)
  122. #define atomic_dec(v) atomic_sub(1, v)
  123. #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
  124. #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
  125. #define atomic_inc_return(v) (atomic_add_return(1, v))
  126. #define atomic_dec_return(v) (atomic_sub_return(1, v))
  127. #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
  128. #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
  129. /*
  130. * 64-bit atomic operations.
  131. */
  132. #define ATOMIC64_INIT(i) { (i) }
  133. #define atomic64_read(v) (*(volatile long long *)&(v)->counter)
  134. #define atomic64_set(v,i) (((v)->counter) = (i))
  135. static inline void atomic64_add(u64 i, atomic64_t *v)
  136. {
  137. long result;
  138. unsigned long tmp;
  139. asm volatile("// atomic64_add\n"
  140. "1: ldxr %0, %2\n"
  141. " add %0, %0, %3\n"
  142. " stxr %w1, %0, %2\n"
  143. " cbnz %w1, 1b"
  144. : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
  145. : "Ir" (i));
  146. }
  147. static inline long atomic64_add_return(long i, atomic64_t *v)
  148. {
  149. long result;
  150. unsigned long tmp;
  151. asm volatile("// atomic64_add_return\n"
  152. "1: ldxr %0, %2\n"
  153. " add %0, %0, %3\n"
  154. " stlxr %w1, %0, %2\n"
  155. " cbnz %w1, 1b"
  156. : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
  157. : "Ir" (i)
  158. : "memory");
  159. smp_mb();
  160. return result;
  161. }
  162. static inline void atomic64_sub(u64 i, atomic64_t *v)
  163. {
  164. long result;
  165. unsigned long tmp;
  166. asm volatile("// atomic64_sub\n"
  167. "1: ldxr %0, %2\n"
  168. " sub %0, %0, %3\n"
  169. " stxr %w1, %0, %2\n"
  170. " cbnz %w1, 1b"
  171. : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
  172. : "Ir" (i));
  173. }
  174. static inline long atomic64_sub_return(long i, atomic64_t *v)
  175. {
  176. long result;
  177. unsigned long tmp;
  178. asm volatile("// atomic64_sub_return\n"
  179. "1: ldxr %0, %2\n"
  180. " sub %0, %0, %3\n"
  181. " stlxr %w1, %0, %2\n"
  182. " cbnz %w1, 1b"
  183. : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
  184. : "Ir" (i)
  185. : "memory");
  186. smp_mb();
  187. return result;
  188. }
  189. static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
  190. {
  191. long oldval;
  192. unsigned long res;
  193. smp_mb();
  194. asm volatile("// atomic64_cmpxchg\n"
  195. "1: ldxr %1, %2\n"
  196. " cmp %1, %3\n"
  197. " b.ne 2f\n"
  198. " stxr %w0, %4, %2\n"
  199. " cbnz %w0, 1b\n"
  200. "2:"
  201. : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
  202. : "Ir" (old), "r" (new)
  203. : "cc");
  204. smp_mb();
  205. return oldval;
  206. }
  207. #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
  208. static inline long atomic64_dec_if_positive(atomic64_t *v)
  209. {
  210. long result;
  211. unsigned long tmp;
  212. asm volatile("// atomic64_dec_if_positive\n"
  213. "1: ldxr %0, %2\n"
  214. " subs %0, %0, #1\n"
  215. " b.mi 2f\n"
  216. " stlxr %w1, %0, %2\n"
  217. " cbnz %w1, 1b\n"
  218. " dmb ish\n"
  219. "2:"
  220. : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
  221. :
  222. : "cc", "memory");
  223. return result;
  224. }
  225. static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
  226. {
  227. long c, old;
  228. c = atomic64_read(v);
  229. while (c != u && (old = atomic64_cmpxchg((v), c, c + a)) != c)
  230. c = old;
  231. return c != u;
  232. }
  233. #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
  234. #define atomic64_inc(v) atomic64_add(1LL, (v))
  235. #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
  236. #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
  237. #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
  238. #define atomic64_dec(v) atomic64_sub(1LL, (v))
  239. #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
  240. #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
  241. #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
  242. #endif
  243. #endif