atomic.h 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
  3. * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
  4. */
  5. #ifndef _ASM_PARISC_ATOMIC_H_
  6. #define _ASM_PARISC_ATOMIC_H_
  7. #include <linux/types.h>
  8. #include <asm/cmpxchg.h>
  9. #include <asm/barrier.h>
  10. /*
  11. * Atomic operations that C can't guarantee us. Useful for
  12. * resource counting etc..
  13. *
  14. * And probably incredibly slow on parisc. OTOH, we don't
  15. * have to write any serious assembly. prumpf
  16. */
  17. #ifdef CONFIG_SMP
  18. #include <asm/spinlock.h>
  19. #include <asm/cache.h> /* we use L1_CACHE_BYTES */
  20. /* Use an array of spinlocks for our atomic_ts.
  21. * Hash function to index into a different SPINLOCK.
  22. * Since "a" is usually an address, use one spinlock per cacheline.
  23. */
  24. # define ATOMIC_HASH_SIZE 4
  25. # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
  26. extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
  27. /* Can't use raw_spin_lock_irq because of #include problems, so
  28. * this is the substitute */
  29. #define _atomic_spin_lock_irqsave(l,f) do { \
  30. arch_spinlock_t *s = ATOMIC_HASH(l); \
  31. local_irq_save(f); \
  32. arch_spin_lock(s); \
  33. } while(0)
  34. #define _atomic_spin_unlock_irqrestore(l,f) do { \
  35. arch_spinlock_t *s = ATOMIC_HASH(l); \
  36. arch_spin_unlock(s); \
  37. local_irq_restore(f); \
  38. } while(0)
  39. #else
  40. # define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
  41. # define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
  42. #endif
  43. /*
  44. * Note that we need not lock read accesses - aligned word writes/reads
  45. * are atomic, so a reader never sees inconsistent values.
  46. */
  47. static __inline__ void atomic_set(atomic_t *v, int i)
  48. {
  49. unsigned long flags;
  50. _atomic_spin_lock_irqsave(v, flags);
  51. v->counter = i;
  52. _atomic_spin_unlock_irqrestore(v, flags);
  53. }
  54. #define atomic_set_release(v, i) atomic_set((v), (i))
  55. static __inline__ int atomic_read(const atomic_t *v)
  56. {
  57. return READ_ONCE((v)->counter);
  58. }
  59. /* exported interface */
  60. #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
  61. #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
  62. /**
  63. * atomic_fetch_add_unless - add unless the number is a given value
  64. * @v: pointer of type atomic_t
  65. * @a: the amount to add to v...
  66. * @u: ...unless v is equal to u.
  67. *
  68. * Atomically adds @a to @v, so long as it was not @u.
  69. * Returns the old value of @v.
  70. */
  71. static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
  72. {
  73. int c, old;
  74. c = atomic_read(v);
  75. for (;;) {
  76. if (unlikely(c == (u)))
  77. break;
  78. old = atomic_cmpxchg((v), c, c + (a));
  79. if (likely(old == c))
  80. break;
  81. c = old;
  82. }
  83. return c;
  84. }
  85. #define ATOMIC_OP(op, c_op) \
  86. static __inline__ void atomic_##op(int i, atomic_t *v) \
  87. { \
  88. unsigned long flags; \
  89. \
  90. _atomic_spin_lock_irqsave(v, flags); \
  91. v->counter c_op i; \
  92. _atomic_spin_unlock_irqrestore(v, flags); \
  93. } \
  94. #define ATOMIC_OP_RETURN(op, c_op) \
  95. static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
  96. { \
  97. unsigned long flags; \
  98. int ret; \
  99. \
  100. _atomic_spin_lock_irqsave(v, flags); \
  101. ret = (v->counter c_op i); \
  102. _atomic_spin_unlock_irqrestore(v, flags); \
  103. \
  104. return ret; \
  105. }
  106. #define ATOMIC_FETCH_OP(op, c_op) \
  107. static __inline__ int atomic_fetch_##op(int i, atomic_t *v) \
  108. { \
  109. unsigned long flags; \
  110. int ret; \
  111. \
  112. _atomic_spin_lock_irqsave(v, flags); \
  113. ret = v->counter; \
  114. v->counter c_op i; \
  115. _atomic_spin_unlock_irqrestore(v, flags); \
  116. \
  117. return ret; \
  118. }
  119. #define ATOMIC_OPS(op, c_op) \
  120. ATOMIC_OP(op, c_op) \
  121. ATOMIC_OP_RETURN(op, c_op) \
  122. ATOMIC_FETCH_OP(op, c_op)
  123. ATOMIC_OPS(add, +=)
  124. ATOMIC_OPS(sub, -=)
  125. #undef ATOMIC_OPS
  126. #define ATOMIC_OPS(op, c_op) \
  127. ATOMIC_OP(op, c_op) \
  128. ATOMIC_FETCH_OP(op, c_op)
  129. ATOMIC_OPS(and, &=)
  130. ATOMIC_OPS(or, |=)
  131. ATOMIC_OPS(xor, ^=)
  132. #undef ATOMIC_OPS
  133. #undef ATOMIC_FETCH_OP
  134. #undef ATOMIC_OP_RETURN
  135. #undef ATOMIC_OP
  136. #define atomic_inc(v) (atomic_add( 1,(v)))
  137. #define atomic_dec(v) (atomic_add( -1,(v)))
  138. #define atomic_inc_return(v) (atomic_add_return( 1,(v)))
  139. #define atomic_dec_return(v) (atomic_add_return( -1,(v)))
  140. #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
  141. /*
  142. * atomic_inc_and_test - increment and test
  143. * @v: pointer of type atomic_t
  144. *
  145. * Atomically increments @v by 1
  146. * and returns true if the result is zero, or false for all
  147. * other cases.
  148. */
  149. #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
  150. #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
  151. #define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
  152. #define ATOMIC_INIT(i) { (i) }
  153. #ifdef CONFIG_64BIT
  154. #define ATOMIC64_INIT(i) { (i) }
  155. #define ATOMIC64_OP(op, c_op) \
  156. static __inline__ void atomic64_##op(s64 i, atomic64_t *v) \
  157. { \
  158. unsigned long flags; \
  159. \
  160. _atomic_spin_lock_irqsave(v, flags); \
  161. v->counter c_op i; \
  162. _atomic_spin_unlock_irqrestore(v, flags); \
  163. } \
  164. #define ATOMIC64_OP_RETURN(op, c_op) \
  165. static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \
  166. { \
  167. unsigned long flags; \
  168. s64 ret; \
  169. \
  170. _atomic_spin_lock_irqsave(v, flags); \
  171. ret = (v->counter c_op i); \
  172. _atomic_spin_unlock_irqrestore(v, flags); \
  173. \
  174. return ret; \
  175. }
  176. #define ATOMIC64_FETCH_OP(op, c_op) \
  177. static __inline__ s64 atomic64_fetch_##op(s64 i, atomic64_t *v) \
  178. { \
  179. unsigned long flags; \
  180. s64 ret; \
  181. \
  182. _atomic_spin_lock_irqsave(v, flags); \
  183. ret = v->counter; \
  184. v->counter c_op i; \
  185. _atomic_spin_unlock_irqrestore(v, flags); \
  186. \
  187. return ret; \
  188. }
  189. #define ATOMIC64_OPS(op, c_op) \
  190. ATOMIC64_OP(op, c_op) \
  191. ATOMIC64_OP_RETURN(op, c_op) \
  192. ATOMIC64_FETCH_OP(op, c_op)
  193. ATOMIC64_OPS(add, +=)
  194. ATOMIC64_OPS(sub, -=)
  195. #undef ATOMIC64_OPS
  196. #define ATOMIC64_OPS(op, c_op) \
  197. ATOMIC64_OP(op, c_op) \
  198. ATOMIC64_FETCH_OP(op, c_op)
  199. ATOMIC64_OPS(and, &=)
  200. ATOMIC64_OPS(or, |=)
  201. ATOMIC64_OPS(xor, ^=)
  202. #undef ATOMIC64_OPS
  203. #undef ATOMIC64_FETCH_OP
  204. #undef ATOMIC64_OP_RETURN
  205. #undef ATOMIC64_OP
  206. static __inline__ void
  207. atomic64_set(atomic64_t *v, s64 i)
  208. {
  209. unsigned long flags;
  210. _atomic_spin_lock_irqsave(v, flags);
  211. v->counter = i;
  212. _atomic_spin_unlock_irqrestore(v, flags);
  213. }
  214. static __inline__ s64
  215. atomic64_read(const atomic64_t *v)
  216. {
  217. return READ_ONCE((v)->counter);
  218. }
  219. #define atomic64_inc(v) (atomic64_add( 1,(v)))
  220. #define atomic64_dec(v) (atomic64_add( -1,(v)))
  221. #define atomic64_inc_return(v) (atomic64_add_return( 1,(v)))
  222. #define atomic64_dec_return(v) (atomic64_add_return( -1,(v)))
  223. #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
  224. #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
  225. #define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
  226. #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i),(v)) == 0)
  227. /* exported interface */
  228. #define atomic64_cmpxchg(v, o, n) \
  229. ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
  230. #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
  231. /**
  232. * atomic64_add_unless - add unless the number is a given value
  233. * @v: pointer of type atomic64_t
  234. * @a: the amount to add to v...
  235. * @u: ...unless v is equal to u.
  236. *
  237. * Atomically adds @a to @v, so long as it was not @u.
  238. * Returns the old value of @v.
  239. */
  240. static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
  241. {
  242. long c, old;
  243. c = atomic64_read(v);
  244. for (;;) {
  245. if (unlikely(c == (u)))
  246. break;
  247. old = atomic64_cmpxchg((v), c, c + (a));
  248. if (likely(old == c))
  249. break;
  250. c = old;
  251. }
  252. return c != (u);
  253. }
  254. /*
  255. * atomic64_dec_if_positive - decrement by 1 if old value positive
  256. * @v: pointer of type atomic_t
  257. *
  258. * The function returns the old value of *v minus 1, even if
  259. * the atomic variable, v, was not decremented.
  260. */
  261. static inline long atomic64_dec_if_positive(atomic64_t *v)
  262. {
  263. long c, old, dec;
  264. c = atomic64_read(v);
  265. for (;;) {
  266. dec = c - 1;
  267. if (unlikely(dec < 0))
  268. break;
  269. old = atomic64_cmpxchg((v), c, dec);
  270. if (likely(old == c))
  271. break;
  272. c = old;
  273. }
  274. return dec;
  275. }
  276. #endif /* !CONFIG_64BIT */
  277. #endif /* _ASM_PARISC_ATOMIC_H_ */