cmpxchg.h 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266
  1. #ifndef __ASM_ARM_CMPXCHG_H
  2. #define __ASM_ARM_CMPXCHG_H
  3. #include <linux/irqflags.h>
  4. #include <linux/prefetch.h>
  5. #include <asm/barrier.h>
  6. #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
  7. /*
  8. * On the StrongARM, "swp" is terminally broken since it bypasses the
  9. * cache totally. This means that the cache becomes inconsistent, and,
  10. * since we use normal loads/stores as well, this is really bad.
  11. * Typically, this causes oopsen in filp_close, but could have other,
  12. * more disastrous effects. There are two work-arounds:
  13. * 1. Disable interrupts and emulate the atomic swap
  14. * 2. Clean the cache, perform atomic swap, flush the cache
  15. *
  16. * We choose (1) since its the "easiest" to achieve here and is not
  17. * dependent on the processor type.
  18. *
  19. * NOTE that this solution won't work on an SMP system, so explcitly
  20. * forbid it here.
  21. */
  22. #define swp_is_buggy
  23. #endif
  24. static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
  25. {
  26. extern void __bad_xchg(volatile void *, int);
  27. unsigned long ret;
  28. #ifdef swp_is_buggy
  29. unsigned long flags;
  30. #endif
  31. #if __LINUX_ARM_ARCH__ >= 6
  32. unsigned int tmp;
  33. #endif
  34. prefetchw((const void *)ptr);
  35. switch (size) {
  36. #if __LINUX_ARM_ARCH__ >= 6
  37. case 1:
  38. asm volatile("@ __xchg1\n"
  39. "1: ldrexb %0, [%3]\n"
  40. " strexb %1, %2, [%3]\n"
  41. " teq %1, #0\n"
  42. " bne 1b"
  43. : "=&r" (ret), "=&r" (tmp)
  44. : "r" (x), "r" (ptr)
  45. : "memory", "cc");
  46. break;
  47. case 4:
  48. asm volatile("@ __xchg4\n"
  49. "1: ldrex %0, [%3]\n"
  50. " strex %1, %2, [%3]\n"
  51. " teq %1, #0\n"
  52. " bne 1b"
  53. : "=&r" (ret), "=&r" (tmp)
  54. : "r" (x), "r" (ptr)
  55. : "memory", "cc");
  56. break;
  57. #elif defined(swp_is_buggy)
  58. #ifdef CONFIG_SMP
  59. #error SMP is not supported on this platform
  60. #endif
  61. case 1:
  62. raw_local_irq_save(flags);
  63. ret = *(volatile unsigned char *)ptr;
  64. *(volatile unsigned char *)ptr = x;
  65. raw_local_irq_restore(flags);
  66. break;
  67. case 4:
  68. raw_local_irq_save(flags);
  69. ret = *(volatile unsigned long *)ptr;
  70. *(volatile unsigned long *)ptr = x;
  71. raw_local_irq_restore(flags);
  72. break;
  73. #else
  74. case 1:
  75. asm volatile("@ __xchg1\n"
  76. " swpb %0, %1, [%2]"
  77. : "=&r" (ret)
  78. : "r" (x), "r" (ptr)
  79. : "memory", "cc");
  80. break;
  81. case 4:
  82. asm volatile("@ __xchg4\n"
  83. " swp %0, %1, [%2]"
  84. : "=&r" (ret)
  85. : "r" (x), "r" (ptr)
  86. : "memory", "cc");
  87. break;
  88. #endif
  89. default:
  90. /* Cause a link-time error, the xchg() size is not supported */
  91. __bad_xchg(ptr, size), ret = 0;
  92. break;
  93. }
  94. return ret;
  95. }
  96. #define xchg_relaxed(ptr, x) ({ \
  97. (__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \
  98. sizeof(*(ptr))); \
  99. })
  100. #include <asm-generic/cmpxchg-local.h>
  101. #if __LINUX_ARM_ARCH__ < 6
  102. /* min ARCH < ARMv6 */
  103. #ifdef CONFIG_SMP
  104. #error "SMP is not supported on this platform"
  105. #endif
  106. #define xchg xchg_relaxed
  107. /*
  108. * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
  109. * them available.
  110. */
  111. #define cmpxchg_local(ptr, o, n) ({ \
  112. (__typeof(*ptr))__cmpxchg_local_generic((ptr), \
  113. (unsigned long)(o), \
  114. (unsigned long)(n), \
  115. sizeof(*(ptr))); \
  116. })
  117. #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
  118. #include <asm-generic/cmpxchg.h>
  119. #else /* min ARCH >= ARMv6 */
  120. extern void __bad_cmpxchg(volatile void *ptr, int size);
  121. /*
  122. * cmpxchg only support 32-bits operands on ARMv6.
  123. */
  124. static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
  125. unsigned long new, int size)
  126. {
  127. unsigned long oldval, res;
  128. prefetchw((const void *)ptr);
  129. switch (size) {
  130. #ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */
  131. case 1:
  132. do {
  133. asm volatile("@ __cmpxchg1\n"
  134. " ldrexb %1, [%2]\n"
  135. " mov %0, #0\n"
  136. " teq %1, %3\n"
  137. " strexbeq %0, %4, [%2]\n"
  138. : "=&r" (res), "=&r" (oldval)
  139. : "r" (ptr), "Ir" (old), "r" (new)
  140. : "memory", "cc");
  141. } while (res);
  142. break;
  143. case 2:
  144. do {
  145. asm volatile("@ __cmpxchg1\n"
  146. " ldrexh %1, [%2]\n"
  147. " mov %0, #0\n"
  148. " teq %1, %3\n"
  149. " strexheq %0, %4, [%2]\n"
  150. : "=&r" (res), "=&r" (oldval)
  151. : "r" (ptr), "Ir" (old), "r" (new)
  152. : "memory", "cc");
  153. } while (res);
  154. break;
  155. #endif
  156. case 4:
  157. do {
  158. asm volatile("@ __cmpxchg4\n"
  159. " ldrex %1, [%2]\n"
  160. " mov %0, #0\n"
  161. " teq %1, %3\n"
  162. " strexeq %0, %4, [%2]\n"
  163. : "=&r" (res), "=&r" (oldval)
  164. : "r" (ptr), "Ir" (old), "r" (new)
  165. : "memory", "cc");
  166. } while (res);
  167. break;
  168. default:
  169. __bad_cmpxchg(ptr, size);
  170. oldval = 0;
  171. }
  172. return oldval;
  173. }
  174. #define cmpxchg_relaxed(ptr,o,n) ({ \
  175. (__typeof__(*(ptr)))__cmpxchg((ptr), \
  176. (unsigned long)(o), \
  177. (unsigned long)(n), \
  178. sizeof(*(ptr))); \
  179. })
  180. static inline unsigned long __cmpxchg_local(volatile void *ptr,
  181. unsigned long old,
  182. unsigned long new, int size)
  183. {
  184. unsigned long ret;
  185. switch (size) {
  186. #ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */
  187. case 1:
  188. case 2:
  189. ret = __cmpxchg_local_generic(ptr, old, new, size);
  190. break;
  191. #endif
  192. default:
  193. ret = __cmpxchg(ptr, old, new, size);
  194. }
  195. return ret;
  196. }
  197. #define cmpxchg_local(ptr, o, n) ({ \
  198. (__typeof(*ptr))__cmpxchg_local((ptr), \
  199. (unsigned long)(o), \
  200. (unsigned long)(n), \
  201. sizeof(*(ptr))); \
  202. })
  203. static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
  204. unsigned long long old,
  205. unsigned long long new)
  206. {
  207. unsigned long long oldval;
  208. unsigned long res;
  209. prefetchw(ptr);
  210. __asm__ __volatile__(
  211. "1: ldrexd %1, %H1, [%3]\n"
  212. " teq %1, %4\n"
  213. " teqeq %H1, %H4\n"
  214. " bne 2f\n"
  215. " strexd %0, %5, %H5, [%3]\n"
  216. " teq %0, #0\n"
  217. " bne 1b\n"
  218. "2:"
  219. : "=&r" (res), "=&r" (oldval), "+Qo" (*ptr)
  220. : "r" (ptr), "r" (old), "r" (new)
  221. : "cc");
  222. return oldval;
  223. }
  224. #define cmpxchg64_relaxed(ptr, o, n) ({ \
  225. (__typeof__(*(ptr)))__cmpxchg64((ptr), \
  226. (unsigned long long)(o), \
  227. (unsigned long long)(n)); \
  228. })
  229. #define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n))
  230. #endif /* __LINUX_ARM_ARCH__ >= 6 */
  231. #endif /* __ASM_ARM_CMPXCHG_H */