atomic.h 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393
  1. /*
  2. * include/asm-xtensa/atomic.h
  3. *
  4. * Atomic operations that C can't guarantee us. Useful for resource counting..
  5. *
  6. * This file is subject to the terms and conditions of the GNU General Public
  7. * License. See the file "COPYING" in the main directory of this archive
  8. * for more details.
  9. *
  10. * Copyright (C) 2001 - 2008 Tensilica Inc.
  11. */
  12. #ifndef _XTENSA_ATOMIC_H
  13. #define _XTENSA_ATOMIC_H
  14. #include <linux/stringify.h>
  15. #include <linux/types.h>
  16. #ifdef __KERNEL__
  17. #include <asm/processor.h>
  18. #include <asm/cmpxchg.h>
  19. #include <asm/barrier.h>
  20. #define ATOMIC_INIT(i) { (i) }
  21. /*
  22. * This Xtensa implementation assumes that the right mechanism
  23. * for exclusion is for locking interrupts to level EXCM_LEVEL.
  24. *
  25. * Locking interrupts looks like this:
  26. *
  27. * rsil a15, LOCKLEVEL
  28. * <code>
  29. * wsr a15, PS
  30. * rsync
  31. *
  32. * Note that a15 is used here because the register allocation
  33. * done by the compiler is not guaranteed and a window overflow
  34. * may not occur between the rsil and wsr instructions. By using
  35. * a15 in the rsil, the machine is guaranteed to be in a state
  36. * where no register reference will cause an overflow.
  37. */
  38. /**
  39. * atomic_read - read atomic variable
  40. * @v: pointer of type atomic_t
  41. *
  42. * Atomically reads the value of @v.
  43. */
  44. #define atomic_read(v) (*(volatile int *)&(v)->counter)
  45. /**
  46. * atomic_set - set atomic variable
  47. * @v: pointer of type atomic_t
  48. * @i: required value
  49. *
  50. * Atomically sets the value of @v to @i.
  51. */
  52. #define atomic_set(v,i) ((v)->counter = (i))
  53. /**
  54. * atomic_add - add integer to atomic variable
  55. * @i: integer value to add
  56. * @v: pointer of type atomic_t
  57. *
  58. * Atomically adds @i to @v.
  59. */
  60. static inline void atomic_add(int i, atomic_t * v)
  61. {
  62. #if XCHAL_HAVE_S32C1I
  63. unsigned long tmp;
  64. int result;
  65. __asm__ __volatile__(
  66. "1: l32i %1, %3, 0\n"
  67. " wsr %1, scompare1\n"
  68. " add %0, %1, %2\n"
  69. " s32c1i %0, %3, 0\n"
  70. " bne %0, %1, 1b\n"
  71. : "=&a" (result), "=&a" (tmp)
  72. : "a" (i), "a" (v)
  73. : "memory"
  74. );
  75. #else
  76. unsigned int vval;
  77. __asm__ __volatile__(
  78. " rsil a15, "__stringify(LOCKLEVEL)"\n"
  79. " l32i %0, %2, 0\n"
  80. " add %0, %0, %1\n"
  81. " s32i %0, %2, 0\n"
  82. " wsr a15, ps\n"
  83. " rsync\n"
  84. : "=&a" (vval)
  85. : "a" (i), "a" (v)
  86. : "a15", "memory"
  87. );
  88. #endif
  89. }
  90. /**
  91. * atomic_sub - subtract the atomic variable
  92. * @i: integer value to subtract
  93. * @v: pointer of type atomic_t
  94. *
  95. * Atomically subtracts @i from @v.
  96. */
  97. static inline void atomic_sub(int i, atomic_t *v)
  98. {
  99. #if XCHAL_HAVE_S32C1I
  100. unsigned long tmp;
  101. int result;
  102. __asm__ __volatile__(
  103. "1: l32i %1, %3, 0\n"
  104. " wsr %1, scompare1\n"
  105. " sub %0, %1, %2\n"
  106. " s32c1i %0, %3, 0\n"
  107. " bne %0, %1, 1b\n"
  108. : "=&a" (result), "=&a" (tmp)
  109. : "a" (i), "a" (v)
  110. : "memory"
  111. );
  112. #else
  113. unsigned int vval;
  114. __asm__ __volatile__(
  115. " rsil a15, "__stringify(LOCKLEVEL)"\n"
  116. " l32i %0, %2, 0\n"
  117. " sub %0, %0, %1\n"
  118. " s32i %0, %2, 0\n"
  119. " wsr a15, ps\n"
  120. " rsync\n"
  121. : "=&a" (vval)
  122. : "a" (i), "a" (v)
  123. : "a15", "memory"
  124. );
  125. #endif
  126. }
  127. /*
  128. * We use atomic_{add|sub}_return to define other functions.
  129. */
  130. static inline int atomic_add_return(int i, atomic_t * v)
  131. {
  132. #if XCHAL_HAVE_S32C1I
  133. unsigned long tmp;
  134. int result;
  135. __asm__ __volatile__(
  136. "1: l32i %1, %3, 0\n"
  137. " wsr %1, scompare1\n"
  138. " add %0, %1, %2\n"
  139. " s32c1i %0, %3, 0\n"
  140. " bne %0, %1, 1b\n"
  141. " add %0, %0, %2\n"
  142. : "=&a" (result), "=&a" (tmp)
  143. : "a" (i), "a" (v)
  144. : "memory"
  145. );
  146. return result;
  147. #else
  148. unsigned int vval;
  149. __asm__ __volatile__(
  150. " rsil a15,"__stringify(LOCKLEVEL)"\n"
  151. " l32i %0, %2, 0\n"
  152. " add %0, %0, %1\n"
  153. " s32i %0, %2, 0\n"
  154. " wsr a15, ps\n"
  155. " rsync\n"
  156. : "=&a" (vval)
  157. : "a" (i), "a" (v)
  158. : "a15", "memory"
  159. );
  160. return vval;
  161. #endif
  162. }
  163. static inline int atomic_sub_return(int i, atomic_t * v)
  164. {
  165. #if XCHAL_HAVE_S32C1I
  166. unsigned long tmp;
  167. int result;
  168. __asm__ __volatile__(
  169. "1: l32i %1, %3, 0\n"
  170. " wsr %1, scompare1\n"
  171. " sub %0, %1, %2\n"
  172. " s32c1i %0, %3, 0\n"
  173. " bne %0, %1, 1b\n"
  174. " sub %0, %0, %2\n"
  175. : "=&a" (result), "=&a" (tmp)
  176. : "a" (i), "a" (v)
  177. : "memory"
  178. );
  179. return result;
  180. #else
  181. unsigned int vval;
  182. __asm__ __volatile__(
  183. " rsil a15,"__stringify(LOCKLEVEL)"\n"
  184. " l32i %0, %2, 0\n"
  185. " sub %0, %0, %1\n"
  186. " s32i %0, %2, 0\n"
  187. " wsr a15, ps\n"
  188. " rsync\n"
  189. : "=&a" (vval)
  190. : "a" (i), "a" (v)
  191. : "a15", "memory"
  192. );
  193. return vval;
  194. #endif
  195. }
  196. /**
  197. * atomic_sub_and_test - subtract value from variable and test result
  198. * @i: integer value to subtract
  199. * @v: pointer of type atomic_t
  200. *
  201. * Atomically subtracts @i from @v and returns
  202. * true if the result is zero, or false for all
  203. * other cases.
  204. */
  205. #define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
  206. /**
  207. * atomic_inc - increment atomic variable
  208. * @v: pointer of type atomic_t
  209. *
  210. * Atomically increments @v by 1.
  211. */
  212. #define atomic_inc(v) atomic_add(1,(v))
  213. /**
  214. * atomic_inc - increment atomic variable
  215. * @v: pointer of type atomic_t
  216. *
  217. * Atomically increments @v by 1.
  218. */
  219. #define atomic_inc_return(v) atomic_add_return(1,(v))
  220. /**
  221. * atomic_dec - decrement atomic variable
  222. * @v: pointer of type atomic_t
  223. *
  224. * Atomically decrements @v by 1.
  225. */
  226. #define atomic_dec(v) atomic_sub(1,(v))
  227. /**
  228. * atomic_dec_return - decrement atomic variable
  229. * @v: pointer of type atomic_t
  230. *
  231. * Atomically decrements @v by 1.
  232. */
  233. #define atomic_dec_return(v) atomic_sub_return(1,(v))
  234. /**
  235. * atomic_dec_and_test - decrement and test
  236. * @v: pointer of type atomic_t
  237. *
  238. * Atomically decrements @v by 1 and
  239. * returns true if the result is 0, or false for all other
  240. * cases.
  241. */
  242. #define atomic_dec_and_test(v) (atomic_sub_return(1,(v)) == 0)
  243. /**
  244. * atomic_inc_and_test - increment and test
  245. * @v: pointer of type atomic_t
  246. *
  247. * Atomically increments @v by 1
  248. * and returns true if the result is zero, or false for all
  249. * other cases.
  250. */
  251. #define atomic_inc_and_test(v) (atomic_add_return(1,(v)) == 0)
  252. /**
  253. * atomic_add_negative - add and test if negative
  254. * @v: pointer of type atomic_t
  255. * @i: integer value to add
  256. *
  257. * Atomically adds @i to @v and returns true
  258. * if the result is negative, or false when
  259. * result is greater than or equal to zero.
  260. */
  261. #define atomic_add_negative(i,v) (atomic_add_return((i),(v)) < 0)
  262. #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
  263. #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
  264. /**
  265. * __atomic_add_unless - add unless the number is a given value
  266. * @v: pointer of type atomic_t
  267. * @a: the amount to add to v...
  268. * @u: ...unless v is equal to u.
  269. *
  270. * Atomically adds @a to @v, so long as it was not @u.
  271. * Returns the old value of @v.
  272. */
  273. static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
  274. {
  275. int c, old;
  276. c = atomic_read(v);
  277. for (;;) {
  278. if (unlikely(c == (u)))
  279. break;
  280. old = atomic_cmpxchg((v), c, c + (a));
  281. if (likely(old == c))
  282. break;
  283. c = old;
  284. }
  285. return c;
  286. }
  287. static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
  288. {
  289. #if XCHAL_HAVE_S32C1I
  290. unsigned long tmp;
  291. int result;
  292. __asm__ __volatile__(
  293. "1: l32i %1, %3, 0\n"
  294. " wsr %1, scompare1\n"
  295. " and %0, %1, %2\n"
  296. " s32c1i %0, %3, 0\n"
  297. " bne %0, %1, 1b\n"
  298. : "=&a" (result), "=&a" (tmp)
  299. : "a" (~mask), "a" (v)
  300. : "memory"
  301. );
  302. #else
  303. unsigned int all_f = -1;
  304. unsigned int vval;
  305. __asm__ __volatile__(
  306. " rsil a15,"__stringify(LOCKLEVEL)"\n"
  307. " l32i %0, %2, 0\n"
  308. " xor %1, %4, %3\n"
  309. " and %0, %0, %4\n"
  310. " s32i %0, %2, 0\n"
  311. " wsr a15, ps\n"
  312. " rsync\n"
  313. : "=&a" (vval), "=a" (mask)
  314. : "a" (v), "a" (all_f), "1" (mask)
  315. : "a15", "memory"
  316. );
  317. #endif
  318. }
  319. static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
  320. {
  321. #if XCHAL_HAVE_S32C1I
  322. unsigned long tmp;
  323. int result;
  324. __asm__ __volatile__(
  325. "1: l32i %1, %3, 0\n"
  326. " wsr %1, scompare1\n"
  327. " or %0, %1, %2\n"
  328. " s32c1i %0, %3, 0\n"
  329. " bne %0, %1, 1b\n"
  330. : "=&a" (result), "=&a" (tmp)
  331. : "a" (mask), "a" (v)
  332. : "memory"
  333. );
  334. #else
  335. unsigned int vval;
  336. __asm__ __volatile__(
  337. " rsil a15,"__stringify(LOCKLEVEL)"\n"
  338. " l32i %0, %2, 0\n"
  339. " or %0, %0, %1\n"
  340. " s32i %0, %2, 0\n"
  341. " wsr a15, ps\n"
  342. " rsync\n"
  343. : "=&a" (vval)
  344. : "a" (mask), "a" (v)
  345. : "a15", "memory"
  346. );
  347. #endif
  348. }
  349. #endif /* __KERNEL__ */
  350. #endif /* _XTENSA_ATOMIC_H */