atomic.h 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __ASM_CSKY_ATOMIC_H
  3. #define __ASM_CSKY_ATOMIC_H
  4. #include <linux/version.h>
  5. #include <asm/cmpxchg.h>
  6. #include <asm/barrier.h>
  7. #ifdef CONFIG_CPU_HAS_LDSTEX
  8. #define __atomic_add_unless __atomic_add_unless
  9. static inline int __atomic_add_unless(atomic_t *v, int a, int u)
  10. {
  11. unsigned long tmp, ret;
  12. smp_mb();
  13. asm volatile (
  14. "1: ldex.w %0, (%3) \n"
  15. " mov %1, %0 \n"
  16. " cmpne %0, %4 \n"
  17. " bf 2f \n"
  18. " add %0, %2 \n"
  19. " stex.w %0, (%3) \n"
  20. " bez %0, 1b \n"
  21. "2: \n"
  22. : "=&r" (tmp), "=&r" (ret)
  23. : "r" (a), "r"(&v->counter), "r"(u)
  24. : "memory");
  25. if (ret != u)
  26. smp_mb();
  27. return ret;
  28. }
  29. #define ATOMIC_OP(op, c_op) \
  30. static inline void atomic_##op(int i, atomic_t *v) \
  31. { \
  32. unsigned long tmp; \
  33. \
  34. asm volatile ( \
  35. "1: ldex.w %0, (%2) \n" \
  36. " " #op " %0, %1 \n" \
  37. " stex.w %0, (%2) \n" \
  38. " bez %0, 1b \n" \
  39. : "=&r" (tmp) \
  40. : "r" (i), "r"(&v->counter) \
  41. : "memory"); \
  42. }
  43. #define ATOMIC_OP_RETURN(op, c_op) \
  44. static inline int atomic_##op##_return(int i, atomic_t *v) \
  45. { \
  46. unsigned long tmp, ret; \
  47. \
  48. smp_mb(); \
  49. asm volatile ( \
  50. "1: ldex.w %0, (%3) \n" \
  51. " " #op " %0, %2 \n" \
  52. " mov %1, %0 \n" \
  53. " stex.w %0, (%3) \n" \
  54. " bez %0, 1b \n" \
  55. : "=&r" (tmp), "=&r" (ret) \
  56. : "r" (i), "r"(&v->counter) \
  57. : "memory"); \
  58. smp_mb(); \
  59. \
  60. return ret; \
  61. }
  62. #define ATOMIC_FETCH_OP(op, c_op) \
  63. static inline int atomic_fetch_##op(int i, atomic_t *v) \
  64. { \
  65. unsigned long tmp, ret; \
  66. \
  67. smp_mb(); \
  68. asm volatile ( \
  69. "1: ldex.w %0, (%3) \n" \
  70. " mov %1, %0 \n" \
  71. " " #op " %0, %2 \n" \
  72. " stex.w %0, (%3) \n" \
  73. " bez %0, 1b \n" \
  74. : "=&r" (tmp), "=&r" (ret) \
  75. : "r" (i), "r"(&v->counter) \
  76. : "memory"); \
  77. smp_mb(); \
  78. \
  79. return ret; \
  80. }
  81. #else /* CONFIG_CPU_HAS_LDSTEX */
  82. #include <linux/irqflags.h>
  83. #define __atomic_add_unless __atomic_add_unless
  84. static inline int __atomic_add_unless(atomic_t *v, int a, int u)
  85. {
  86. unsigned long tmp, ret, flags;
  87. raw_local_irq_save(flags);
  88. asm volatile (
  89. " ldw %0, (%3) \n"
  90. " mov %1, %0 \n"
  91. " cmpne %0, %4 \n"
  92. " bf 2f \n"
  93. " add %0, %2 \n"
  94. " stw %0, (%3) \n"
  95. "2: \n"
  96. : "=&r" (tmp), "=&r" (ret)
  97. : "r" (a), "r"(&v->counter), "r"(u)
  98. : "memory");
  99. raw_local_irq_restore(flags);
  100. return ret;
  101. }
  102. #define ATOMIC_OP(op, c_op) \
  103. static inline void atomic_##op(int i, atomic_t *v) \
  104. { \
  105. unsigned long tmp, flags; \
  106. \
  107. raw_local_irq_save(flags); \
  108. \
  109. asm volatile ( \
  110. " ldw %0, (%2) \n" \
  111. " " #op " %0, %1 \n" \
  112. " stw %0, (%2) \n" \
  113. : "=&r" (tmp) \
  114. : "r" (i), "r"(&v->counter) \
  115. : "memory"); \
  116. \
  117. raw_local_irq_restore(flags); \
  118. }
  119. #define ATOMIC_OP_RETURN(op, c_op) \
  120. static inline int atomic_##op##_return(int i, atomic_t *v) \
  121. { \
  122. unsigned long tmp, ret, flags; \
  123. \
  124. raw_local_irq_save(flags); \
  125. \
  126. asm volatile ( \
  127. " ldw %0, (%3) \n" \
  128. " " #op " %0, %2 \n" \
  129. " stw %0, (%3) \n" \
  130. " mov %1, %0 \n" \
  131. : "=&r" (tmp), "=&r" (ret) \
  132. : "r" (i), "r"(&v->counter) \
  133. : "memory"); \
  134. \
  135. raw_local_irq_restore(flags); \
  136. \
  137. return ret; \
  138. }
  139. #define ATOMIC_FETCH_OP(op, c_op) \
  140. static inline int atomic_fetch_##op(int i, atomic_t *v) \
  141. { \
  142. unsigned long tmp, ret, flags; \
  143. \
  144. raw_local_irq_save(flags); \
  145. \
  146. asm volatile ( \
  147. " ldw %0, (%3) \n" \
  148. " mov %1, %0 \n" \
  149. " " #op " %0, %2 \n" \
  150. " stw %0, (%3) \n" \
  151. : "=&r" (tmp), "=&r" (ret) \
  152. : "r" (i), "r"(&v->counter) \
  153. : "memory"); \
  154. \
  155. raw_local_irq_restore(flags); \
  156. \
  157. return ret; \
  158. }
  159. #endif /* CONFIG_CPU_HAS_LDSTEX */
  160. #define atomic_add_return atomic_add_return
  161. ATOMIC_OP_RETURN(add, +)
  162. #define atomic_sub_return atomic_sub_return
  163. ATOMIC_OP_RETURN(sub, -)
  164. #define atomic_fetch_add atomic_fetch_add
  165. ATOMIC_FETCH_OP(add, +)
  166. #define atomic_fetch_sub atomic_fetch_sub
  167. ATOMIC_FETCH_OP(sub, -)
  168. #define atomic_fetch_and atomic_fetch_and
  169. ATOMIC_FETCH_OP(and, &)
  170. #define atomic_fetch_or atomic_fetch_or
  171. ATOMIC_FETCH_OP(or, |)
  172. #define atomic_fetch_xor atomic_fetch_xor
  173. ATOMIC_FETCH_OP(xor, ^)
  174. #define atomic_and atomic_and
  175. ATOMIC_OP(and, &)
  176. #define atomic_or atomic_or
  177. ATOMIC_OP(or, |)
  178. #define atomic_xor atomic_xor
  179. ATOMIC_OP(xor, ^)
  180. #undef ATOMIC_FETCH_OP
  181. #undef ATOMIC_OP_RETURN
  182. #undef ATOMIC_OP
  183. #include <asm-generic/atomic.h>
  184. #endif /* __ASM_CSKY_ATOMIC_H */