atomic_32.h 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. *
  14. * Do not include directly; use <linux/atomic.h>.
  15. */
  16. #ifndef _ASM_TILE_ATOMIC_32_H
  17. #define _ASM_TILE_ATOMIC_32_H
  18. #include <asm/barrier.h>
  19. #include <arch/chip.h>
  20. #ifndef __ASSEMBLY__
  21. /**
  22. * atomic_add - add integer to atomic variable
  23. * @i: integer value to add
  24. * @v: pointer of type atomic_t
  25. *
  26. * Atomically adds @i to @v.
  27. */
  28. static inline void atomic_add(int i, atomic_t *v)
  29. {
  30. _atomic_xchg_add(&v->counter, i);
  31. }
  32. /**
  33. * atomic_add_return - add integer and return
  34. * @v: pointer of type atomic_t
  35. * @i: integer value to add
  36. *
  37. * Atomically adds @i to @v and returns @i + @v
  38. */
  39. static inline int atomic_add_return(int i, atomic_t *v)
  40. {
  41. smp_mb(); /* barrier for proper semantics */
  42. return _atomic_xchg_add(&v->counter, i) + i;
  43. }
  44. /**
  45. * __atomic_add_unless - add unless the number is already a given value
  46. * @v: pointer of type atomic_t
  47. * @a: the amount to add to v...
  48. * @u: ...unless v is equal to u.
  49. *
  50. * Atomically adds @a to @v, so long as @v was not already @u.
  51. * Returns the old value of @v.
  52. */
  53. static inline int __atomic_add_unless(atomic_t *v, int a, int u)
  54. {
  55. smp_mb(); /* barrier for proper semantics */
  56. return _atomic_xchg_add_unless(&v->counter, a, u);
  57. }
  58. /**
  59. * atomic_set - set atomic variable
  60. * @v: pointer of type atomic_t
  61. * @i: required value
  62. *
  63. * Atomically sets the value of @v to @i.
  64. *
  65. * atomic_set() can't be just a raw store, since it would be lost if it
  66. * fell between the load and store of one of the other atomic ops.
  67. */
  68. static inline void atomic_set(atomic_t *v, int n)
  69. {
  70. _atomic_xchg(&v->counter, n);
  71. }
  72. /* A 64bit atomic type */
  73. typedef struct {
  74. long long counter;
  75. } atomic64_t;
  76. #define ATOMIC64_INIT(val) { (val) }
  77. /**
  78. * atomic64_read - read atomic variable
  79. * @v: pointer of type atomic64_t
  80. *
  81. * Atomically reads the value of @v.
  82. */
  83. static inline long long atomic64_read(const atomic64_t *v)
  84. {
  85. /*
  86. * Requires an atomic op to read both 32-bit parts consistently.
  87. * Casting away const is safe since the atomic support routines
  88. * do not write to memory if the value has not been modified.
  89. */
  90. return _atomic64_xchg_add((long long *)&v->counter, 0);
  91. }
  92. /**
  93. * atomic64_add - add integer to atomic variable
  94. * @i: integer value to add
  95. * @v: pointer of type atomic64_t
  96. *
  97. * Atomically adds @i to @v.
  98. */
  99. static inline void atomic64_add(long long i, atomic64_t *v)
  100. {
  101. _atomic64_xchg_add(&v->counter, i);
  102. }
  103. /**
  104. * atomic64_add_return - add integer and return
  105. * @v: pointer of type atomic64_t
  106. * @i: integer value to add
  107. *
  108. * Atomically adds @i to @v and returns @i + @v
  109. */
  110. static inline long long atomic64_add_return(long long i, atomic64_t *v)
  111. {
  112. smp_mb(); /* barrier for proper semantics */
  113. return _atomic64_xchg_add(&v->counter, i) + i;
  114. }
  115. /**
  116. * atomic64_add_unless - add unless the number is already a given value
  117. * @v: pointer of type atomic64_t
  118. * @a: the amount to add to v...
  119. * @u: ...unless v is equal to u.
  120. *
  121. * Atomically adds @a to @v, so long as @v was not already @u.
  122. * Returns non-zero if @v was not @u, and zero otherwise.
  123. */
  124. static inline long long atomic64_add_unless(atomic64_t *v, long long a,
  125. long long u)
  126. {
  127. smp_mb(); /* barrier for proper semantics */
  128. return _atomic64_xchg_add_unless(&v->counter, a, u) != u;
  129. }
  130. /**
  131. * atomic64_set - set atomic variable
  132. * @v: pointer of type atomic64_t
  133. * @i: required value
  134. *
  135. * Atomically sets the value of @v to @i.
  136. *
  137. * atomic64_set() can't be just a raw store, since it would be lost if it
  138. * fell between the load and store of one of the other atomic ops.
  139. */
  140. static inline void atomic64_set(atomic64_t *v, long long n)
  141. {
  142. _atomic64_xchg(&v->counter, n);
  143. }
  144. #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
  145. #define atomic64_inc(v) atomic64_add(1LL, (v))
  146. #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
  147. #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
  148. #define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
  149. #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
  150. #define atomic64_sub(i, v) atomic64_add(-(i), (v))
  151. #define atomic64_dec(v) atomic64_sub(1LL, (v))
  152. #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
  153. #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
  154. #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
  155. #endif /* !__ASSEMBLY__ */
  156. /*
  157. * Internal definitions only beyond this point.
  158. */
  159. /*
  160. * Number of atomic locks in atomic_locks[]. Must be a power of two.
  161. * There is no reason for more than PAGE_SIZE / 8 entries, since that
  162. * is the maximum number of pointer bits we can use to index this.
  163. * And we cannot have more than PAGE_SIZE / 4, since this has to
  164. * fit on a single page and each entry takes 4 bytes.
  165. */
  166. #define ATOMIC_HASH_SHIFT (PAGE_SHIFT - 3)
  167. #define ATOMIC_HASH_SIZE (1 << ATOMIC_HASH_SHIFT)
  168. #ifndef __ASSEMBLY__
  169. extern int atomic_locks[];
  170. #endif
  171. /*
  172. * All the code that may fault while holding an atomic lock must
  173. * place the pointer to the lock in ATOMIC_LOCK_REG so the fault code
  174. * can correctly release and reacquire the lock. Note that we
  175. * mention the register number in a comment in "lib/atomic_asm.S" to help
  176. * assembly coders from using this register by mistake, so if it
  177. * is changed here, change that comment as well.
  178. */
  179. #define ATOMIC_LOCK_REG 20
  180. #define ATOMIC_LOCK_REG_NAME r20
  181. #ifndef __ASSEMBLY__
  182. /* Called from setup to initialize a hash table to point to per_cpu locks. */
  183. void __init_atomic_per_cpu(void);
  184. #ifdef CONFIG_SMP
  185. /* Support releasing the atomic lock in do_page_fault_ics(). */
  186. void __atomic_fault_unlock(int *lock_ptr);
  187. #endif
  188. /* Return a pointer to the lock for the given address. */
  189. int *__atomic_hashed_lock(volatile void *v);
  190. /* Private helper routines in lib/atomic_asm_32.S */
  191. struct __get_user {
  192. unsigned long val;
  193. int err;
  194. };
  195. extern struct __get_user __atomic_cmpxchg(volatile int *p,
  196. int *lock, int o, int n);
  197. extern struct __get_user __atomic_xchg(volatile int *p, int *lock, int n);
  198. extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n);
  199. extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
  200. int *lock, int o, int n);
  201. extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);
  202. extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);
  203. extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);
  204. extern long long __atomic64_cmpxchg(volatile long long *p, int *lock,
  205. long long o, long long n);
  206. extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n);
  207. extern long long __atomic64_xchg_add(volatile long long *p, int *lock,
  208. long long n);
  209. extern long long __atomic64_xchg_add_unless(volatile long long *p,
  210. int *lock, long long o, long long n);
  211. /* Return failure from the atomic wrappers. */
  212. struct __get_user __atomic_bad_address(int __user *addr);
  213. #endif /* !__ASSEMBLY__ */
  214. #endif /* _ASM_TILE_ATOMIC_32_H */