atomic.h 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254
  1. /* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
  2. * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
  3. */
  4. #ifndef _ASM_PARISC_ATOMIC_H_
  5. #define _ASM_PARISC_ATOMIC_H_
  6. #include <linux/types.h>
  7. #include <asm/cmpxchg.h>
  8. #include <asm/barrier.h>
  9. /*
  10. * Atomic operations that C can't guarantee us. Useful for
  11. * resource counting etc..
  12. *
  13. * And probably incredibly slow on parisc. OTOH, we don't
  14. * have to write any serious assembly. prumpf
  15. */
  16. #ifdef CONFIG_SMP
  17. #include <asm/spinlock.h>
  18. #include <asm/cache.h> /* we use L1_CACHE_BYTES */
  19. /* Use an array of spinlocks for our atomic_ts.
  20. * Hash function to index into a different SPINLOCK.
  21. * Since "a" is usually an address, use one spinlock per cacheline.
  22. */
  23. # define ATOMIC_HASH_SIZE 4
  24. # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
  25. extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
  26. /* Can't use raw_spin_lock_irq because of #include problems, so
  27. * this is the substitute */
  28. #define _atomic_spin_lock_irqsave(l,f) do { \
  29. arch_spinlock_t *s = ATOMIC_HASH(l); \
  30. local_irq_save(f); \
  31. arch_spin_lock(s); \
  32. } while(0)
  33. #define _atomic_spin_unlock_irqrestore(l,f) do { \
  34. arch_spinlock_t *s = ATOMIC_HASH(l); \
  35. arch_spin_unlock(s); \
  36. local_irq_restore(f); \
  37. } while(0)
  38. #else
  39. # define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
  40. # define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
  41. #endif
  42. /*
  43. * Note that we need not lock read accesses - aligned word writes/reads
  44. * are atomic, so a reader never sees inconsistent values.
  45. */
  46. /* It's possible to reduce all atomic operations to either
  47. * __atomic_add_return, atomic_set and atomic_read (the latter
  48. * is there only for consistency).
  49. */
  50. static __inline__ int __atomic_add_return(int i, atomic_t *v)
  51. {
  52. int ret;
  53. unsigned long flags;
  54. _atomic_spin_lock_irqsave(v, flags);
  55. ret = (v->counter += i);
  56. _atomic_spin_unlock_irqrestore(v, flags);
  57. return ret;
  58. }
  59. static __inline__ void atomic_set(atomic_t *v, int i)
  60. {
  61. unsigned long flags;
  62. _atomic_spin_lock_irqsave(v, flags);
  63. v->counter = i;
  64. _atomic_spin_unlock_irqrestore(v, flags);
  65. }
  66. static __inline__ int atomic_read(const atomic_t *v)
  67. {
  68. return (*(volatile int *)&(v)->counter);
  69. }
  70. /* exported interface */
  71. #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
  72. #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
  73. /**
  74. * __atomic_add_unless - add unless the number is a given value
  75. * @v: pointer of type atomic_t
  76. * @a: the amount to add to v...
  77. * @u: ...unless v is equal to u.
  78. *
  79. * Atomically adds @a to @v, so long as it was not @u.
  80. * Returns the old value of @v.
  81. */
  82. static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
  83. {
  84. int c, old;
  85. c = atomic_read(v);
  86. for (;;) {
  87. if (unlikely(c == (u)))
  88. break;
  89. old = atomic_cmpxchg((v), c, c + (a));
  90. if (likely(old == c))
  91. break;
  92. c = old;
  93. }
  94. return c;
  95. }
  96. #define atomic_add(i,v) ((void)(__atomic_add_return( (i),(v))))
  97. #define atomic_sub(i,v) ((void)(__atomic_add_return(-((int) (i)),(v))))
  98. #define atomic_inc(v) ((void)(__atomic_add_return( 1,(v))))
  99. #define atomic_dec(v) ((void)(__atomic_add_return( -1,(v))))
  100. #define atomic_add_return(i,v) (__atomic_add_return( (i),(v)))
  101. #define atomic_sub_return(i,v) (__atomic_add_return(-(i),(v)))
  102. #define atomic_inc_return(v) (__atomic_add_return( 1,(v)))
  103. #define atomic_dec_return(v) (__atomic_add_return( -1,(v)))
  104. #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
  105. /*
  106. * atomic_inc_and_test - increment and test
  107. * @v: pointer of type atomic_t
  108. *
  109. * Atomically increments @v by 1
  110. * and returns true if the result is zero, or false for all
  111. * other cases.
  112. */
  113. #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
  114. #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
  115. #define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
  116. #define ATOMIC_INIT(i) { (i) }
  117. #ifdef CONFIG_64BIT
  118. #define ATOMIC64_INIT(i) { (i) }
  119. static __inline__ s64
  120. __atomic64_add_return(s64 i, atomic64_t *v)
  121. {
  122. s64 ret;
  123. unsigned long flags;
  124. _atomic_spin_lock_irqsave(v, flags);
  125. ret = (v->counter += i);
  126. _atomic_spin_unlock_irqrestore(v, flags);
  127. return ret;
  128. }
  129. static __inline__ void
  130. atomic64_set(atomic64_t *v, s64 i)
  131. {
  132. unsigned long flags;
  133. _atomic_spin_lock_irqsave(v, flags);
  134. v->counter = i;
  135. _atomic_spin_unlock_irqrestore(v, flags);
  136. }
  137. static __inline__ s64
  138. atomic64_read(const atomic64_t *v)
  139. {
  140. return (*(volatile long *)&(v)->counter);
  141. }
  142. #define atomic64_add(i,v) ((void)(__atomic64_add_return( ((s64)(i)),(v))))
  143. #define atomic64_sub(i,v) ((void)(__atomic64_add_return(-((s64)(i)),(v))))
  144. #define atomic64_inc(v) ((void)(__atomic64_add_return( 1,(v))))
  145. #define atomic64_dec(v) ((void)(__atomic64_add_return( -1,(v))))
  146. #define atomic64_add_return(i,v) (__atomic64_add_return( ((s64)(i)),(v)))
  147. #define atomic64_sub_return(i,v) (__atomic64_add_return(-((s64)(i)),(v)))
  148. #define atomic64_inc_return(v) (__atomic64_add_return( 1,(v)))
  149. #define atomic64_dec_return(v) (__atomic64_add_return( -1,(v)))
  150. #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
  151. #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
  152. #define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
  153. #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i),(v)) == 0)
  154. /* exported interface */
  155. #define atomic64_cmpxchg(v, o, n) \
  156. ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
  157. #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
  158. /**
  159. * atomic64_add_unless - add unless the number is a given value
  160. * @v: pointer of type atomic64_t
  161. * @a: the amount to add to v...
  162. * @u: ...unless v is equal to u.
  163. *
  164. * Atomically adds @a to @v, so long as it was not @u.
  165. * Returns the old value of @v.
  166. */
  167. static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
  168. {
  169. long c, old;
  170. c = atomic64_read(v);
  171. for (;;) {
  172. if (unlikely(c == (u)))
  173. break;
  174. old = atomic64_cmpxchg((v), c, c + (a));
  175. if (likely(old == c))
  176. break;
  177. c = old;
  178. }
  179. return c != (u);
  180. }
  181. #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
  182. /*
  183. * atomic64_dec_if_positive - decrement by 1 if old value positive
  184. * @v: pointer of type atomic_t
  185. *
  186. * The function returns the old value of *v minus 1, even if
  187. * the atomic variable, v, was not decremented.
  188. */
  189. static inline long atomic64_dec_if_positive(atomic64_t *v)
  190. {
  191. long c, old, dec;
  192. c = atomic64_read(v);
  193. for (;;) {
  194. dec = c - 1;
  195. if (unlikely(dec < 0))
  196. break;
  197. old = atomic64_cmpxchg((v), c, dec);
  198. if (likely(old == c))
  199. break;
  200. c = old;
  201. }
  202. return dec;
  203. }
  204. #endif /* !CONFIG_64BIT */
  205. #endif /* _ASM_PARISC_ATOMIC_H_ */