atomic.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455
  1. /*
  2. * arch/arm/include/asm/atomic.h
  3. *
  4. * Copyright (C) 1996 Russell King.
  5. * Copyright (C) 2002 Deep Blue Solutions Ltd.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #ifndef __ASM_ARM_ATOMIC_H
  12. #define __ASM_ARM_ATOMIC_H
  13. #include <linux/compiler.h>
  14. #include <linux/prefetch.h>
  15. #include <linux/types.h>
  16. #include <linux/irqflags.h>
  17. #include <asm/barrier.h>
  18. #include <asm/cmpxchg.h>
  19. #define ATOMIC_INIT(i) { (i) }
  20. #ifdef __KERNEL__
  21. /*
  22. * On ARM, ordinary assignment (str instruction) doesn't clear the local
  23. * strex/ldrex monitor on some implementations. The reason we can use it for
  24. * atomic_set() is the clrex or dummy strex done on every exception return.
  25. */
  26. #define atomic_read(v) ACCESS_ONCE((v)->counter)
  27. #define atomic_set(v,i) (((v)->counter) = (i))
  28. #if __LINUX_ARM_ARCH__ >= 6
  29. /*
  30. * ARMv6 UP and SMP safe atomic ops. We use load exclusive and
  31. * store exclusive to ensure that these are atomic. We may loop
  32. * to ensure that the update happens.
  33. */
  34. #define ATOMIC_OP(op, c_op, asm_op) \
  35. static inline void atomic_##op(int i, atomic_t *v) \
  36. { \
  37. unsigned long tmp; \
  38. int result; \
  39. \
  40. prefetchw(&v->counter); \
  41. __asm__ __volatile__("@ atomic_" #op "\n" \
  42. "1: ldrex %0, [%3]\n" \
  43. " " #asm_op " %0, %0, %4\n" \
  44. " strex %1, %0, [%3]\n" \
  45. " teq %1, #0\n" \
  46. " bne 1b" \
  47. : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
  48. : "r" (&v->counter), "Ir" (i) \
  49. : "cc"); \
  50. } \
  51. #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
  52. static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
  53. { \
  54. unsigned long tmp; \
  55. int result; \
  56. \
  57. prefetchw(&v->counter); \
  58. \
  59. __asm__ __volatile__("@ atomic_" #op "_return\n" \
  60. "1: ldrex %0, [%3]\n" \
  61. " " #asm_op " %0, %0, %4\n" \
  62. " strex %1, %0, [%3]\n" \
  63. " teq %1, #0\n" \
  64. " bne 1b" \
  65. : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
  66. : "r" (&v->counter), "Ir" (i) \
  67. : "cc"); \
  68. \
  69. return result; \
  70. }
  71. #define atomic_add_return_relaxed atomic_add_return_relaxed
  72. #define atomic_sub_return_relaxed atomic_sub_return_relaxed
  73. static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
  74. {
  75. int oldval;
  76. unsigned long res;
  77. prefetchw(&ptr->counter);
  78. do {
  79. __asm__ __volatile__("@ atomic_cmpxchg\n"
  80. "ldrex %1, [%3]\n"
  81. "mov %0, #0\n"
  82. "teq %1, %4\n"
  83. "strexeq %0, %5, [%3]\n"
  84. : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
  85. : "r" (&ptr->counter), "Ir" (old), "r" (new)
  86. : "cc");
  87. } while (res);
  88. return oldval;
  89. }
  90. #define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
  91. static inline int __atomic_add_unless(atomic_t *v, int a, int u)
  92. {
  93. int oldval, newval;
  94. unsigned long tmp;
  95. smp_mb();
  96. prefetchw(&v->counter);
  97. __asm__ __volatile__ ("@ atomic_add_unless\n"
  98. "1: ldrex %0, [%4]\n"
  99. " teq %0, %5\n"
  100. " beq 2f\n"
  101. " add %1, %0, %6\n"
  102. " strex %2, %1, [%4]\n"
  103. " teq %2, #0\n"
  104. " bne 1b\n"
  105. "2:"
  106. : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
  107. : "r" (&v->counter), "r" (u), "r" (a)
  108. : "cc");
  109. if (oldval != u)
  110. smp_mb();
  111. return oldval;
  112. }
  113. #else /* ARM_ARCH_6 */
  114. #ifdef CONFIG_SMP
  115. #error SMP not supported on pre-ARMv6 CPUs
  116. #endif
  117. #define ATOMIC_OP(op, c_op, asm_op) \
  118. static inline void atomic_##op(int i, atomic_t *v) \
  119. { \
  120. unsigned long flags; \
  121. \
  122. raw_local_irq_save(flags); \
  123. v->counter c_op i; \
  124. raw_local_irq_restore(flags); \
  125. } \
  126. #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
  127. static inline int atomic_##op##_return(int i, atomic_t *v) \
  128. { \
  129. unsigned long flags; \
  130. int val; \
  131. \
  132. raw_local_irq_save(flags); \
  133. v->counter c_op i; \
  134. val = v->counter; \
  135. raw_local_irq_restore(flags); \
  136. \
  137. return val; \
  138. }
  139. static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
  140. {
  141. int ret;
  142. unsigned long flags;
  143. raw_local_irq_save(flags);
  144. ret = v->counter;
  145. if (likely(ret == old))
  146. v->counter = new;
  147. raw_local_irq_restore(flags);
  148. return ret;
  149. }
  150. static inline int __atomic_add_unless(atomic_t *v, int a, int u)
  151. {
  152. int c, old;
  153. c = atomic_read(v);
  154. while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
  155. c = old;
  156. return c;
  157. }
  158. #endif /* __LINUX_ARM_ARCH__ */
  159. #define ATOMIC_OPS(op, c_op, asm_op) \
  160. ATOMIC_OP(op, c_op, asm_op) \
  161. ATOMIC_OP_RETURN(op, c_op, asm_op)
  162. ATOMIC_OPS(add, +=, add)
  163. ATOMIC_OPS(sub, -=, sub)
  164. #define atomic_andnot atomic_andnot
  165. ATOMIC_OP(and, &=, and)
  166. ATOMIC_OP(andnot, &= ~, bic)
  167. ATOMIC_OP(or, |=, orr)
  168. ATOMIC_OP(xor, ^=, eor)
  169. #undef ATOMIC_OPS
  170. #undef ATOMIC_OP_RETURN
  171. #undef ATOMIC_OP
  172. #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
  173. #define atomic_inc(v) atomic_add(1, v)
  174. #define atomic_dec(v) atomic_sub(1, v)
  175. #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
  176. #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
  177. #define atomic_inc_return(v) (atomic_add_return(1, v))
  178. #define atomic_dec_return(v) (atomic_sub_return(1, v))
  179. #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
  180. #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
  181. #ifndef CONFIG_GENERIC_ATOMIC64
  182. typedef struct {
  183. long long counter;
  184. } atomic64_t;
  185. #define ATOMIC64_INIT(i) { (i) }
  186. #ifdef CONFIG_ARM_LPAE
  187. static inline long long atomic64_read(const atomic64_t *v)
  188. {
  189. long long result;
  190. __asm__ __volatile__("@ atomic64_read\n"
  191. " ldrd %0, %H0, [%1]"
  192. : "=&r" (result)
  193. : "r" (&v->counter), "Qo" (v->counter)
  194. );
  195. return result;
  196. }
  197. static inline void atomic64_set(atomic64_t *v, long long i)
  198. {
  199. __asm__ __volatile__("@ atomic64_set\n"
  200. " strd %2, %H2, [%1]"
  201. : "=Qo" (v->counter)
  202. : "r" (&v->counter), "r" (i)
  203. );
  204. }
  205. #else
  206. static inline long long atomic64_read(const atomic64_t *v)
  207. {
  208. long long result;
  209. __asm__ __volatile__("@ atomic64_read\n"
  210. " ldrexd %0, %H0, [%1]"
  211. : "=&r" (result)
  212. : "r" (&v->counter), "Qo" (v->counter)
  213. );
  214. return result;
  215. }
  216. static inline void atomic64_set(atomic64_t *v, long long i)
  217. {
  218. long long tmp;
  219. prefetchw(&v->counter);
  220. __asm__ __volatile__("@ atomic64_set\n"
  221. "1: ldrexd %0, %H0, [%2]\n"
  222. " strexd %0, %3, %H3, [%2]\n"
  223. " teq %0, #0\n"
  224. " bne 1b"
  225. : "=&r" (tmp), "=Qo" (v->counter)
  226. : "r" (&v->counter), "r" (i)
  227. : "cc");
  228. }
  229. #endif
  230. #define ATOMIC64_OP(op, op1, op2) \
  231. static inline void atomic64_##op(long long i, atomic64_t *v) \
  232. { \
  233. long long result; \
  234. unsigned long tmp; \
  235. \
  236. prefetchw(&v->counter); \
  237. __asm__ __volatile__("@ atomic64_" #op "\n" \
  238. "1: ldrexd %0, %H0, [%3]\n" \
  239. " " #op1 " %Q0, %Q0, %Q4\n" \
  240. " " #op2 " %R0, %R0, %R4\n" \
  241. " strexd %1, %0, %H0, [%3]\n" \
  242. " teq %1, #0\n" \
  243. " bne 1b" \
  244. : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
  245. : "r" (&v->counter), "r" (i) \
  246. : "cc"); \
  247. } \
  248. #define ATOMIC64_OP_RETURN(op, op1, op2) \
  249. static inline long long \
  250. atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \
  251. { \
  252. long long result; \
  253. unsigned long tmp; \
  254. \
  255. prefetchw(&v->counter); \
  256. \
  257. __asm__ __volatile__("@ atomic64_" #op "_return\n" \
  258. "1: ldrexd %0, %H0, [%3]\n" \
  259. " " #op1 " %Q0, %Q0, %Q4\n" \
  260. " " #op2 " %R0, %R0, %R4\n" \
  261. " strexd %1, %0, %H0, [%3]\n" \
  262. " teq %1, #0\n" \
  263. " bne 1b" \
  264. : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
  265. : "r" (&v->counter), "r" (i) \
  266. : "cc"); \
  267. \
  268. return result; \
  269. }
  270. #define ATOMIC64_OPS(op, op1, op2) \
  271. ATOMIC64_OP(op, op1, op2) \
  272. ATOMIC64_OP_RETURN(op, op1, op2)
  273. ATOMIC64_OPS(add, adds, adc)
  274. ATOMIC64_OPS(sub, subs, sbc)
  275. #define atomic64_add_return_relaxed atomic64_add_return_relaxed
  276. #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
  277. #define atomic64_andnot atomic64_andnot
  278. ATOMIC64_OP(and, and, and)
  279. ATOMIC64_OP(andnot, bic, bic)
  280. ATOMIC64_OP(or, orr, orr)
  281. ATOMIC64_OP(xor, eor, eor)
  282. #undef ATOMIC64_OPS
  283. #undef ATOMIC64_OP_RETURN
  284. #undef ATOMIC64_OP
  285. static inline long long
  286. atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
  287. {
  288. long long oldval;
  289. unsigned long res;
  290. prefetchw(&ptr->counter);
  291. do {
  292. __asm__ __volatile__("@ atomic64_cmpxchg\n"
  293. "ldrexd %1, %H1, [%3]\n"
  294. "mov %0, #0\n"
  295. "teq %1, %4\n"
  296. "teqeq %H1, %H4\n"
  297. "strexdeq %0, %5, %H5, [%3]"
  298. : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
  299. : "r" (&ptr->counter), "r" (old), "r" (new)
  300. : "cc");
  301. } while (res);
  302. return oldval;
  303. }
  304. #define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
  305. static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
  306. {
  307. long long result;
  308. unsigned long tmp;
  309. prefetchw(&ptr->counter);
  310. __asm__ __volatile__("@ atomic64_xchg\n"
  311. "1: ldrexd %0, %H0, [%3]\n"
  312. " strexd %1, %4, %H4, [%3]\n"
  313. " teq %1, #0\n"
  314. " bne 1b"
  315. : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
  316. : "r" (&ptr->counter), "r" (new)
  317. : "cc");
  318. return result;
  319. }
  320. #define atomic64_xchg_relaxed atomic64_xchg_relaxed
  321. static inline long long atomic64_dec_if_positive(atomic64_t *v)
  322. {
  323. long long result;
  324. unsigned long tmp;
  325. smp_mb();
  326. prefetchw(&v->counter);
  327. __asm__ __volatile__("@ atomic64_dec_if_positive\n"
  328. "1: ldrexd %0, %H0, [%3]\n"
  329. " subs %Q0, %Q0, #1\n"
  330. " sbc %R0, %R0, #0\n"
  331. " teq %R0, #0\n"
  332. " bmi 2f\n"
  333. " strexd %1, %0, %H0, [%3]\n"
  334. " teq %1, #0\n"
  335. " bne 1b\n"
  336. "2:"
  337. : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
  338. : "r" (&v->counter)
  339. : "cc");
  340. smp_mb();
  341. return result;
  342. }
  343. static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
  344. {
  345. long long val;
  346. unsigned long tmp;
  347. int ret = 1;
  348. smp_mb();
  349. prefetchw(&v->counter);
  350. __asm__ __volatile__("@ atomic64_add_unless\n"
  351. "1: ldrexd %0, %H0, [%4]\n"
  352. " teq %0, %5\n"
  353. " teqeq %H0, %H5\n"
  354. " moveq %1, #0\n"
  355. " beq 2f\n"
  356. " adds %Q0, %Q0, %Q6\n"
  357. " adc %R0, %R0, %R6\n"
  358. " strexd %2, %0, %H0, [%4]\n"
  359. " teq %2, #0\n"
  360. " bne 1b\n"
  361. "2:"
  362. : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
  363. : "r" (&v->counter), "r" (u), "r" (a)
  364. : "cc");
  365. if (ret)
  366. smp_mb();
  367. return ret;
  368. }
  369. #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
  370. #define atomic64_inc(v) atomic64_add(1LL, (v))
  371. #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
  372. #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
  373. #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
  374. #define atomic64_dec(v) atomic64_sub(1LL, (v))
  375. #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
  376. #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
  377. #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
  378. #endif /* !CONFIG_GENERIC_ATOMIC64 */
  379. #endif
  380. #endif