atomic.h 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240
  1. /* MN10300 Atomic counter operations
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public Licence
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the Licence, or (at your option) any later version.
  10. */
  11. #ifndef _ASM_ATOMIC_H
  12. #define _ASM_ATOMIC_H
  13. #include <asm/irqflags.h>
  14. #include <asm/cmpxchg.h>
  15. #include <asm/barrier.h>
  16. #ifndef CONFIG_SMP
  17. #include <asm-generic/atomic.h>
  18. #else
  19. /*
  20. * Atomic operations that C can't guarantee us. Useful for
  21. * resource counting etc..
  22. */
  23. #define ATOMIC_INIT(i) { (i) }
  24. #ifdef __KERNEL__
  25. /**
  26. * atomic_read - read atomic variable
  27. * @v: pointer of type atomic_t
  28. *
  29. * Atomically reads the value of @v. Note that the guaranteed
  30. * useful range of an atomic_t is only 24 bits.
  31. */
  32. #define atomic_read(v) (ACCESS_ONCE((v)->counter))
  33. /**
  34. * atomic_set - set atomic variable
  35. * @v: pointer of type atomic_t
  36. * @i: required value
  37. *
  38. * Atomically sets the value of @v to @i. Note that the guaranteed
  39. * useful range of an atomic_t is only 24 bits.
  40. */
  41. #define atomic_set(v, i) (((v)->counter) = (i))
  42. /**
  43. * atomic_add_return - add integer to atomic variable
  44. * @i: integer value to add
  45. * @v: pointer of type atomic_t
  46. *
  47. * Atomically adds @i to @v and returns the result
  48. * Note that the guaranteed useful range of an atomic_t is only 24 bits.
  49. */
  50. static inline int atomic_add_return(int i, atomic_t *v)
  51. {
  52. int retval;
  53. #ifdef CONFIG_SMP
  54. int status;
  55. asm volatile(
  56. "1: mov %4,(_AAR,%3) \n"
  57. " mov (_ADR,%3),%1 \n"
  58. " add %5,%1 \n"
  59. " mov %1,(_ADR,%3) \n"
  60. " mov (_ADR,%3),%0 \n" /* flush */
  61. " mov (_ASR,%3),%0 \n"
  62. " or %0,%0 \n"
  63. " bne 1b \n"
  64. : "=&r"(status), "=&r"(retval), "=m"(v->counter)
  65. : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)
  66. : "memory", "cc");
  67. #else
  68. unsigned long flags;
  69. flags = arch_local_cli_save();
  70. retval = v->counter;
  71. retval += i;
  72. v->counter = retval;
  73. arch_local_irq_restore(flags);
  74. #endif
  75. return retval;
  76. }
  77. /**
  78. * atomic_sub_return - subtract integer from atomic variable
  79. * @i: integer value to subtract
  80. * @v: pointer of type atomic_t
  81. *
  82. * Atomically subtracts @i from @v and returns the result
  83. * Note that the guaranteed useful range of an atomic_t is only 24 bits.
  84. */
  85. static inline int atomic_sub_return(int i, atomic_t *v)
  86. {
  87. int retval;
  88. #ifdef CONFIG_SMP
  89. int status;
  90. asm volatile(
  91. "1: mov %4,(_AAR,%3) \n"
  92. " mov (_ADR,%3),%1 \n"
  93. " sub %5,%1 \n"
  94. " mov %1,(_ADR,%3) \n"
  95. " mov (_ADR,%3),%0 \n" /* flush */
  96. " mov (_ASR,%3),%0 \n"
  97. " or %0,%0 \n"
  98. " bne 1b \n"
  99. : "=&r"(status), "=&r"(retval), "=m"(v->counter)
  100. : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)
  101. : "memory", "cc");
  102. #else
  103. unsigned long flags;
  104. flags = arch_local_cli_save();
  105. retval = v->counter;
  106. retval -= i;
  107. v->counter = retval;
  108. arch_local_irq_restore(flags);
  109. #endif
  110. return retval;
  111. }
  112. static inline int atomic_add_negative(int i, atomic_t *v)
  113. {
  114. return atomic_add_return(i, v) < 0;
  115. }
  116. static inline void atomic_add(int i, atomic_t *v)
  117. {
  118. atomic_add_return(i, v);
  119. }
  120. static inline void atomic_sub(int i, atomic_t *v)
  121. {
  122. atomic_sub_return(i, v);
  123. }
  124. static inline void atomic_inc(atomic_t *v)
  125. {
  126. atomic_add_return(1, v);
  127. }
  128. static inline void atomic_dec(atomic_t *v)
  129. {
  130. atomic_sub_return(1, v);
  131. }
  132. #define atomic_dec_return(v) atomic_sub_return(1, (v))
  133. #define atomic_inc_return(v) atomic_add_return(1, (v))
  134. #define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
  135. #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
  136. #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
  137. #define __atomic_add_unless(v, a, u) \
  138. ({ \
  139. int c, old; \
  140. c = atomic_read(v); \
  141. while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
  142. c = old; \
  143. c; \
  144. })
  145. #define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
  146. #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
  147. /**
  148. * atomic_clear_mask - Atomically clear bits in memory
  149. * @mask: Mask of the bits to be cleared
  150. * @v: pointer to word in memory
  151. *
  152. * Atomically clears the bits set in mask from the memory word specified.
  153. */
  154. static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
  155. {
  156. #ifdef CONFIG_SMP
  157. int status;
  158. asm volatile(
  159. "1: mov %3,(_AAR,%2) \n"
  160. " mov (_ADR,%2),%0 \n"
  161. " and %4,%0 \n"
  162. " mov %0,(_ADR,%2) \n"
  163. " mov (_ADR,%2),%0 \n" /* flush */
  164. " mov (_ASR,%2),%0 \n"
  165. " or %0,%0 \n"
  166. " bne 1b \n"
  167. : "=&r"(status), "=m"(*addr)
  168. : "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(~mask)
  169. : "memory", "cc");
  170. #else
  171. unsigned long flags;
  172. mask = ~mask;
  173. flags = arch_local_cli_save();
  174. *addr &= mask;
  175. arch_local_irq_restore(flags);
  176. #endif
  177. }
  178. /**
  179. * atomic_set_mask - Atomically set bits in memory
  180. * @mask: Mask of the bits to be set
  181. * @v: pointer to word in memory
  182. *
  183. * Atomically sets the bits set in mask from the memory word specified.
  184. */
  185. static inline void atomic_set_mask(unsigned long mask, unsigned long *addr)
  186. {
  187. #ifdef CONFIG_SMP
  188. int status;
  189. asm volatile(
  190. "1: mov %3,(_AAR,%2) \n"
  191. " mov (_ADR,%2),%0 \n"
  192. " or %4,%0 \n"
  193. " mov %0,(_ADR,%2) \n"
  194. " mov (_ADR,%2),%0 \n" /* flush */
  195. " mov (_ASR,%2),%0 \n"
  196. " or %0,%0 \n"
  197. " bne 1b \n"
  198. : "=&r"(status), "=m"(*addr)
  199. : "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(mask)
  200. : "memory", "cc");
  201. #else
  202. unsigned long flags;
  203. flags = arch_local_cli_save();
  204. *addr |= mask;
  205. arch_local_irq_restore(flags);
  206. #endif
  207. }
  208. #endif /* __KERNEL__ */
  209. #endif /* CONFIG_SMP */
  210. #endif /* _ASM_ATOMIC_H */