atomic.h 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256
  1. /*
  2. * Based on arch/arm/include/asm/atomic.h
  3. *
  4. * Copyright (C) 1996 Russell King.
  5. * Copyright (C) 2002 Deep Blue Solutions Ltd.
  6. * Copyright (C) 2012 ARM Ltd.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  19. */
  20. #ifndef __ASM_ATOMIC_H
  21. #define __ASM_ATOMIC_H
  22. #include <linux/compiler.h>
  23. #include <linux/types.h>
  24. #include <asm/barrier.h>
  25. #include <asm/cmpxchg.h>
  26. #define ATOMIC_INIT(i) { (i) }
  27. #ifdef __KERNEL__
  28. /*
  29. * On ARM, ordinary assignment (str instruction) doesn't clear the local
  30. * strex/ldrex monitor on some implementations. The reason we can use it for
  31. * atomic_set() is the clrex or dummy strex done on every exception return.
  32. */
  33. #define atomic_read(v) ACCESS_ONCE((v)->counter)
  34. #define atomic_set(v,i) (((v)->counter) = (i))
  35. /*
  36. * AArch64 UP and SMP safe atomic ops. We use load exclusive and
  37. * store exclusive to ensure that these are atomic. We may loop
  38. * to ensure that the update happens.
  39. */
  40. #define ATOMIC_OP(op, asm_op) \
  41. static inline void atomic_##op(int i, atomic_t *v) \
  42. { \
  43. unsigned long tmp; \
  44. int result; \
  45. \
  46. asm volatile("// atomic_" #op "\n" \
  47. "1: ldxr %w0, %2\n" \
  48. " " #asm_op " %w0, %w0, %w3\n" \
  49. " stxr %w1, %w0, %2\n" \
  50. " cbnz %w1, 1b" \
  51. : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
  52. : "Ir" (i)); \
  53. } \
  54. #define ATOMIC_OP_RETURN(op, asm_op) \
  55. static inline int atomic_##op##_return(int i, atomic_t *v) \
  56. { \
  57. unsigned long tmp; \
  58. int result; \
  59. \
  60. asm volatile("// atomic_" #op "_return\n" \
  61. "1: ldxr %w0, %2\n" \
  62. " " #asm_op " %w0, %w0, %w3\n" \
  63. " stlxr %w1, %w0, %2\n" \
  64. " cbnz %w1, 1b" \
  65. : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
  66. : "Ir" (i) \
  67. : "memory"); \
  68. \
  69. smp_mb(); \
  70. return result; \
  71. }
  72. #define ATOMIC_OPS(op, asm_op) \
  73. ATOMIC_OP(op, asm_op) \
  74. ATOMIC_OP_RETURN(op, asm_op)
  75. ATOMIC_OPS(add, add)
  76. ATOMIC_OPS(sub, sub)
  77. #undef ATOMIC_OPS
  78. #undef ATOMIC_OP_RETURN
  79. #undef ATOMIC_OP
  80. static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
  81. {
  82. unsigned long tmp;
  83. int oldval;
  84. smp_mb();
  85. asm volatile("// atomic_cmpxchg\n"
  86. "1: ldxr %w1, %2\n"
  87. " cmp %w1, %w3\n"
  88. " b.ne 2f\n"
  89. " stxr %w0, %w4, %2\n"
  90. " cbnz %w0, 1b\n"
  91. "2:"
  92. : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
  93. : "Ir" (old), "r" (new)
  94. : "cc");
  95. smp_mb();
  96. return oldval;
  97. }
  98. #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
  99. static inline int __atomic_add_unless(atomic_t *v, int a, int u)
  100. {
  101. int c, old;
  102. c = atomic_read(v);
  103. while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
  104. c = old;
  105. return c;
  106. }
  107. #define atomic_inc(v) atomic_add(1, v)
  108. #define atomic_dec(v) atomic_sub(1, v)
  109. #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
  110. #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
  111. #define atomic_inc_return(v) (atomic_add_return(1, v))
  112. #define atomic_dec_return(v) (atomic_sub_return(1, v))
  113. #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
  114. #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
  115. /*
  116. * 64-bit atomic operations.
  117. */
  118. #define ATOMIC64_INIT(i) { (i) }
  119. #define atomic64_read(v) ACCESS_ONCE((v)->counter)
  120. #define atomic64_set(v,i) (((v)->counter) = (i))
  121. #define ATOMIC64_OP(op, asm_op) \
  122. static inline void atomic64_##op(long i, atomic64_t *v) \
  123. { \
  124. long result; \
  125. unsigned long tmp; \
  126. \
  127. asm volatile("// atomic64_" #op "\n" \
  128. "1: ldxr %0, %2\n" \
  129. " " #asm_op " %0, %0, %3\n" \
  130. " stxr %w1, %0, %2\n" \
  131. " cbnz %w1, 1b" \
  132. : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
  133. : "Ir" (i)); \
  134. } \
  135. #define ATOMIC64_OP_RETURN(op, asm_op) \
  136. static inline long atomic64_##op##_return(long i, atomic64_t *v) \
  137. { \
  138. long result; \
  139. unsigned long tmp; \
  140. \
  141. asm volatile("// atomic64_" #op "_return\n" \
  142. "1: ldxr %0, %2\n" \
  143. " " #asm_op " %0, %0, %3\n" \
  144. " stlxr %w1, %0, %2\n" \
  145. " cbnz %w1, 1b" \
  146. : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
  147. : "Ir" (i) \
  148. : "memory"); \
  149. \
  150. smp_mb(); \
  151. return result; \
  152. }
  153. #define ATOMIC64_OPS(op, asm_op) \
  154. ATOMIC64_OP(op, asm_op) \
  155. ATOMIC64_OP_RETURN(op, asm_op)
  156. ATOMIC64_OPS(add, add)
  157. ATOMIC64_OPS(sub, sub)
  158. #undef ATOMIC64_OPS
  159. #undef ATOMIC64_OP_RETURN
  160. #undef ATOMIC64_OP
  161. static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
  162. {
  163. long oldval;
  164. unsigned long res;
  165. smp_mb();
  166. asm volatile("// atomic64_cmpxchg\n"
  167. "1: ldxr %1, %2\n"
  168. " cmp %1, %3\n"
  169. " b.ne 2f\n"
  170. " stxr %w0, %4, %2\n"
  171. " cbnz %w0, 1b\n"
  172. "2:"
  173. : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
  174. : "Ir" (old), "r" (new)
  175. : "cc");
  176. smp_mb();
  177. return oldval;
  178. }
  179. #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
  180. static inline long atomic64_dec_if_positive(atomic64_t *v)
  181. {
  182. long result;
  183. unsigned long tmp;
  184. asm volatile("// atomic64_dec_if_positive\n"
  185. "1: ldxr %0, %2\n"
  186. " subs %0, %0, #1\n"
  187. " b.mi 2f\n"
  188. " stlxr %w1, %0, %2\n"
  189. " cbnz %w1, 1b\n"
  190. " dmb ish\n"
  191. "2:"
  192. : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
  193. :
  194. : "cc", "memory");
  195. return result;
  196. }
  197. static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
  198. {
  199. long c, old;
  200. c = atomic64_read(v);
  201. while (c != u && (old = atomic64_cmpxchg((v), c, c + a)) != c)
  202. c = old;
  203. return c != u;
  204. }
  205. #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
  206. #define atomic64_inc(v) atomic64_add(1LL, (v))
  207. #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
  208. #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
  209. #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
  210. #define atomic64_dec(v) atomic64_sub(1LL, (v))
  211. #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
  212. #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
  213. #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
  214. #endif
  215. #endif