atomic.h 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352
  1. /*
  2. * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. #ifndef _ASM_ARC_ATOMIC_H
  9. #define _ASM_ARC_ATOMIC_H
  10. #ifndef __ASSEMBLY__
  11. #include <linux/types.h>
  12. #include <linux/compiler.h>
  13. #include <asm/cmpxchg.h>
  14. #include <asm/barrier.h>
  15. #include <asm/smp.h>
  16. #ifndef CONFIG_ARC_PLAT_EZNPS
  17. #define atomic_read(v) READ_ONCE((v)->counter)
  18. #ifdef CONFIG_ARC_HAS_LLSC
  19. #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
  20. #define ATOMIC_OP(op, c_op, asm_op) \
  21. static inline void atomic_##op(int i, atomic_t *v) \
  22. { \
  23. unsigned int val; \
  24. \
  25. __asm__ __volatile__( \
  26. "1: llock %[val], [%[ctr]] \n" \
  27. " " #asm_op " %[val], %[val], %[i] \n" \
  28. " scond %[val], [%[ctr]] \n" \
  29. " bnz 1b \n" \
  30. : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
  31. : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
  32. [i] "ir" (i) \
  33. : "cc"); \
  34. } \
  35. #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
  36. static inline int atomic_##op##_return(int i, atomic_t *v) \
  37. { \
  38. unsigned int val; \
  39. \
  40. /* \
  41. * Explicit full memory barrier needed before/after as \
  42. * LLOCK/SCOND thmeselves don't provide any such semantics \
  43. */ \
  44. smp_mb(); \
  45. \
  46. __asm__ __volatile__( \
  47. "1: llock %[val], [%[ctr]] \n" \
  48. " " #asm_op " %[val], %[val], %[i] \n" \
  49. " scond %[val], [%[ctr]] \n" \
  50. " bnz 1b \n" \
  51. : [val] "=&r" (val) \
  52. : [ctr] "r" (&v->counter), \
  53. [i] "ir" (i) \
  54. : "cc"); \
  55. \
  56. smp_mb(); \
  57. \
  58. return val; \
  59. }
  60. #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
  61. static inline int atomic_fetch_##op(int i, atomic_t *v) \
  62. { \
  63. unsigned int val, orig; \
  64. \
  65. /* \
  66. * Explicit full memory barrier needed before/after as \
  67. * LLOCK/SCOND thmeselves don't provide any such semantics \
  68. */ \
  69. smp_mb(); \
  70. \
  71. __asm__ __volatile__( \
  72. "1: llock %[orig], [%[ctr]] \n" \
  73. " " #asm_op " %[val], %[orig], %[i] \n" \
  74. " scond %[val], [%[ctr]] \n" \
  75. " \n" \
  76. : [val] "=&r" (val), \
  77. [orig] "=&r" (orig) \
  78. : [ctr] "r" (&v->counter), \
  79. [i] "ir" (i) \
  80. : "cc"); \
  81. \
  82. smp_mb(); \
  83. \
  84. return orig; \
  85. }
  86. #else /* !CONFIG_ARC_HAS_LLSC */
  87. #ifndef CONFIG_SMP
  88. /* violating atomic_xxx API locking protocol in UP for optimization sake */
  89. #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
  90. #else
  91. static inline void atomic_set(atomic_t *v, int i)
  92. {
  93. /*
  94. * Independent of hardware support, all of the atomic_xxx() APIs need
  95. * to follow the same locking rules to make sure that a "hardware"
  96. * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
  97. * sequence
  98. *
  99. * Thus atomic_set() despite being 1 insn (and seemingly atomic)
  100. * requires the locking.
  101. */
  102. unsigned long flags;
  103. atomic_ops_lock(flags);
  104. WRITE_ONCE(v->counter, i);
  105. atomic_ops_unlock(flags);
  106. }
  107. #endif
  108. /*
  109. * Non hardware assisted Atomic-R-M-W
  110. * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
  111. */
  112. #define ATOMIC_OP(op, c_op, asm_op) \
  113. static inline void atomic_##op(int i, atomic_t *v) \
  114. { \
  115. unsigned long flags; \
  116. \
  117. atomic_ops_lock(flags); \
  118. v->counter c_op i; \
  119. atomic_ops_unlock(flags); \
  120. }
  121. #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
  122. static inline int atomic_##op##_return(int i, atomic_t *v) \
  123. { \
  124. unsigned long flags; \
  125. unsigned long temp; \
  126. \
  127. /* \
  128. * spin lock/unlock provides the needed smp_mb() before/after \
  129. */ \
  130. atomic_ops_lock(flags); \
  131. temp = v->counter; \
  132. temp c_op i; \
  133. v->counter = temp; \
  134. atomic_ops_unlock(flags); \
  135. \
  136. return temp; \
  137. }
  138. #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
  139. static inline int atomic_fetch_##op(int i, atomic_t *v) \
  140. { \
  141. unsigned long flags; \
  142. unsigned long orig; \
  143. \
  144. /* \
  145. * spin lock/unlock provides the needed smp_mb() before/after \
  146. */ \
  147. atomic_ops_lock(flags); \
  148. orig = v->counter; \
  149. v->counter c_op i; \
  150. atomic_ops_unlock(flags); \
  151. \
  152. return orig; \
  153. }
  154. #endif /* !CONFIG_ARC_HAS_LLSC */
  155. #define ATOMIC_OPS(op, c_op, asm_op) \
  156. ATOMIC_OP(op, c_op, asm_op) \
  157. ATOMIC_OP_RETURN(op, c_op, asm_op) \
  158. ATOMIC_FETCH_OP(op, c_op, asm_op)
  159. ATOMIC_OPS(add, +=, add)
  160. ATOMIC_OPS(sub, -=, sub)
  161. #define atomic_andnot atomic_andnot
  162. #undef ATOMIC_OPS
  163. #define ATOMIC_OPS(op, c_op, asm_op) \
  164. ATOMIC_OP(op, c_op, asm_op) \
  165. ATOMIC_FETCH_OP(op, c_op, asm_op)
  166. ATOMIC_OPS(and, &=, and)
  167. ATOMIC_OPS(andnot, &= ~, bic)
  168. ATOMIC_OPS(or, |=, or)
  169. ATOMIC_OPS(xor, ^=, xor)
  170. #else /* CONFIG_ARC_PLAT_EZNPS */
  171. static inline int atomic_read(const atomic_t *v)
  172. {
  173. int temp;
  174. __asm__ __volatile__(
  175. " ld.di %0, [%1]"
  176. : "=r"(temp)
  177. : "r"(&v->counter)
  178. : "memory");
  179. return temp;
  180. }
  181. static inline void atomic_set(atomic_t *v, int i)
  182. {
  183. __asm__ __volatile__(
  184. " st.di %0,[%1]"
  185. :
  186. : "r"(i), "r"(&v->counter)
  187. : "memory");
  188. }
  189. #define ATOMIC_OP(op, c_op, asm_op) \
  190. static inline void atomic_##op(int i, atomic_t *v) \
  191. { \
  192. __asm__ __volatile__( \
  193. " mov r2, %0\n" \
  194. " mov r3, %1\n" \
  195. " .word %2\n" \
  196. : \
  197. : "r"(i), "r"(&v->counter), "i"(asm_op) \
  198. : "r2", "r3", "memory"); \
  199. } \
  200. #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
  201. static inline int atomic_##op##_return(int i, atomic_t *v) \
  202. { \
  203. unsigned int temp = i; \
  204. \
  205. /* Explicit full memory barrier needed before/after */ \
  206. smp_mb(); \
  207. \
  208. __asm__ __volatile__( \
  209. " mov r2, %0\n" \
  210. " mov r3, %1\n" \
  211. " .word %2\n" \
  212. " mov %0, r2" \
  213. : "+r"(temp) \
  214. : "r"(&v->counter), "i"(asm_op) \
  215. : "r2", "r3", "memory"); \
  216. \
  217. smp_mb(); \
  218. \
  219. temp c_op i; \
  220. \
  221. return temp; \
  222. }
  223. #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
  224. static inline int atomic_fetch_##op(int i, atomic_t *v) \
  225. { \
  226. unsigned int temp = i; \
  227. \
  228. /* Explicit full memory barrier needed before/after */ \
  229. smp_mb(); \
  230. \
  231. __asm__ __volatile__( \
  232. " mov r2, %0\n" \
  233. " mov r3, %1\n" \
  234. " .word %2\n" \
  235. " mov %0, r2" \
  236. : "+r"(temp) \
  237. : "r"(&v->counter), "i"(asm_op) \
  238. : "r2", "r3", "memory"); \
  239. \
  240. smp_mb(); \
  241. \
  242. return temp; \
  243. }
  244. #define ATOMIC_OPS(op, c_op, asm_op) \
  245. ATOMIC_OP(op, c_op, asm_op) \
  246. ATOMIC_OP_RETURN(op, c_op, asm_op) \
  247. ATOMIC_FETCH_OP(op, c_op, asm_op)
  248. ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3)
  249. #define atomic_sub(i, v) atomic_add(-(i), (v))
  250. #define atomic_sub_return(i, v) atomic_add_return(-(i), (v))
  251. #undef ATOMIC_OPS
  252. #define ATOMIC_OPS(op, c_op, asm_op) \
  253. ATOMIC_OP(op, c_op, asm_op) \
  254. ATOMIC_FETCH_OP(op, c_op, asm_op)
  255. ATOMIC_OPS(and, &=, CTOP_INST_AAND_DI_R2_R2_R3)
  256. #define atomic_andnot(mask, v) atomic_and(~(mask), (v))
  257. ATOMIC_OPS(or, |=, CTOP_INST_AOR_DI_R2_R2_R3)
  258. ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
  259. #endif /* CONFIG_ARC_PLAT_EZNPS */
  260. #undef ATOMIC_OPS
  261. #undef ATOMIC_FETCH_OP
  262. #undef ATOMIC_OP_RETURN
  263. #undef ATOMIC_OP
  264. /**
  265. * __atomic_add_unless - add unless the number is a given value
  266. * @v: pointer of type atomic_t
  267. * @a: the amount to add to v...
  268. * @u: ...unless v is equal to u.
  269. *
  270. * Atomically adds @a to @v, so long as it was not @u.
  271. * Returns the old value of @v
  272. */
  273. #define __atomic_add_unless(v, a, u) \
  274. ({ \
  275. int c, old; \
  276. \
  277. /* \
  278. * Explicit full memory barrier needed before/after as \
  279. * LLOCK/SCOND thmeselves don't provide any such semantics \
  280. */ \
  281. smp_mb(); \
  282. \
  283. c = atomic_read(v); \
  284. while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
  285. c = old; \
  286. \
  287. smp_mb(); \
  288. \
  289. c; \
  290. })
  291. #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
  292. #define atomic_inc(v) atomic_add(1, v)
  293. #define atomic_dec(v) atomic_sub(1, v)
  294. #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
  295. #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
  296. #define atomic_inc_return(v) atomic_add_return(1, (v))
  297. #define atomic_dec_return(v) atomic_sub_return(1, (v))
  298. #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
  299. #define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
  300. #define ATOMIC_INIT(i) { (i) }
  301. #include <asm-generic/atomic64.h>
  302. #endif
  303. #endif