bitops.h 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _ASM_M32R_BITOPS_H
  3. #define _ASM_M32R_BITOPS_H
  4. /*
  5. * linux/include/asm-m32r/bitops.h
  6. *
  7. * Copyright 1992, Linus Torvalds.
  8. *
  9. * M32R version:
  10. * Copyright (C) 2001, 2002 Hitoshi Yamamoto
  11. * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
  12. */
  13. #ifndef _LINUX_BITOPS_H
  14. #error only <linux/bitops.h> can be included directly
  15. #endif
  16. #include <linux/compiler.h>
  17. #include <linux/irqflags.h>
  18. #include <asm/assembler.h>
  19. #include <asm/byteorder.h>
  20. #include <asm/dcache_clear.h>
  21. #include <asm/types.h>
  22. #include <asm/barrier.h>
  23. /*
  24. * These have to be done with inline assembly: that way the bit-setting
  25. * is guaranteed to be atomic. All bit operations return 0 if the bit
  26. * was cleared before the operation and != 0 if it was not.
  27. *
  28. * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
  29. */
  30. /**
  31. * set_bit - Atomically set a bit in memory
  32. * @nr: the bit to set
  33. * @addr: the address to start counting from
  34. *
  35. * This function is atomic and may not be reordered. See __set_bit()
  36. * if you do not require the atomic guarantees.
  37. * Note that @nr may be almost arbitrarily large; this function is not
  38. * restricted to acting on a single-word quantity.
  39. */
  40. static __inline__ void set_bit(int nr, volatile void * addr)
  41. {
  42. __u32 mask;
  43. volatile __u32 *a = addr;
  44. unsigned long flags;
  45. unsigned long tmp;
  46. a += (nr >> 5);
  47. mask = (1 << (nr & 0x1F));
  48. local_irq_save(flags);
  49. __asm__ __volatile__ (
  50. DCACHE_CLEAR("%0", "r6", "%1")
  51. M32R_LOCK" %0, @%1; \n\t"
  52. "or %0, %2; \n\t"
  53. M32R_UNLOCK" %0, @%1; \n\t"
  54. : "=&r" (tmp)
  55. : "r" (a), "r" (mask)
  56. : "memory"
  57. #ifdef CONFIG_CHIP_M32700_TS1
  58. , "r6"
  59. #endif /* CONFIG_CHIP_M32700_TS1 */
  60. );
  61. local_irq_restore(flags);
  62. }
  63. /**
  64. * clear_bit - Clears a bit in memory
  65. * @nr: Bit to clear
  66. * @addr: Address to start counting from
  67. *
  68. * clear_bit() is atomic and may not be reordered. However, it does
  69. * not contain a memory barrier, so if it is used for locking purposes,
  70. * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
  71. * in order to ensure changes are visible on other processors.
  72. */
  73. static __inline__ void clear_bit(int nr, volatile void * addr)
  74. {
  75. __u32 mask;
  76. volatile __u32 *a = addr;
  77. unsigned long flags;
  78. unsigned long tmp;
  79. a += (nr >> 5);
  80. mask = (1 << (nr & 0x1F));
  81. local_irq_save(flags);
  82. __asm__ __volatile__ (
  83. DCACHE_CLEAR("%0", "r6", "%1")
  84. M32R_LOCK" %0, @%1; \n\t"
  85. "and %0, %2; \n\t"
  86. M32R_UNLOCK" %0, @%1; \n\t"
  87. : "=&r" (tmp)
  88. : "r" (a), "r" (~mask)
  89. : "memory"
  90. #ifdef CONFIG_CHIP_M32700_TS1
  91. , "r6"
  92. #endif /* CONFIG_CHIP_M32700_TS1 */
  93. );
  94. local_irq_restore(flags);
  95. }
  96. /**
  97. * change_bit - Toggle a bit in memory
  98. * @nr: Bit to clear
  99. * @addr: Address to start counting from
  100. *
  101. * change_bit() is atomic and may not be reordered.
  102. * Note that @nr may be almost arbitrarily large; this function is not
  103. * restricted to acting on a single-word quantity.
  104. */
  105. static __inline__ void change_bit(int nr, volatile void * addr)
  106. {
  107. __u32 mask;
  108. volatile __u32 *a = addr;
  109. unsigned long flags;
  110. unsigned long tmp;
  111. a += (nr >> 5);
  112. mask = (1 << (nr & 0x1F));
  113. local_irq_save(flags);
  114. __asm__ __volatile__ (
  115. DCACHE_CLEAR("%0", "r6", "%1")
  116. M32R_LOCK" %0, @%1; \n\t"
  117. "xor %0, %2; \n\t"
  118. M32R_UNLOCK" %0, @%1; \n\t"
  119. : "=&r" (tmp)
  120. : "r" (a), "r" (mask)
  121. : "memory"
  122. #ifdef CONFIG_CHIP_M32700_TS1
  123. , "r6"
  124. #endif /* CONFIG_CHIP_M32700_TS1 */
  125. );
  126. local_irq_restore(flags);
  127. }
  128. /**
  129. * test_and_set_bit - Set a bit and return its old value
  130. * @nr: Bit to set
  131. * @addr: Address to count from
  132. *
  133. * This operation is atomic and cannot be reordered.
  134. * It also implies a memory barrier.
  135. */
  136. static __inline__ int test_and_set_bit(int nr, volatile void * addr)
  137. {
  138. __u32 mask, oldbit;
  139. volatile __u32 *a = addr;
  140. unsigned long flags;
  141. unsigned long tmp;
  142. a += (nr >> 5);
  143. mask = (1 << (nr & 0x1F));
  144. local_irq_save(flags);
  145. __asm__ __volatile__ (
  146. DCACHE_CLEAR("%0", "%1", "%2")
  147. M32R_LOCK" %0, @%2; \n\t"
  148. "mv %1, %0; \n\t"
  149. "and %0, %3; \n\t"
  150. "or %1, %3; \n\t"
  151. M32R_UNLOCK" %1, @%2; \n\t"
  152. : "=&r" (oldbit), "=&r" (tmp)
  153. : "r" (a), "r" (mask)
  154. : "memory"
  155. );
  156. local_irq_restore(flags);
  157. return (oldbit != 0);
  158. }
  159. /**
  160. * test_and_clear_bit - Clear a bit and return its old value
  161. * @nr: Bit to set
  162. * @addr: Address to count from
  163. *
  164. * This operation is atomic and cannot be reordered.
  165. * It also implies a memory barrier.
  166. */
  167. static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
  168. {
  169. __u32 mask, oldbit;
  170. volatile __u32 *a = addr;
  171. unsigned long flags;
  172. unsigned long tmp;
  173. a += (nr >> 5);
  174. mask = (1 << (nr & 0x1F));
  175. local_irq_save(flags);
  176. __asm__ __volatile__ (
  177. DCACHE_CLEAR("%0", "%1", "%3")
  178. M32R_LOCK" %0, @%3; \n\t"
  179. "mv %1, %0; \n\t"
  180. "and %0, %2; \n\t"
  181. "not %2, %2; \n\t"
  182. "and %1, %2; \n\t"
  183. M32R_UNLOCK" %1, @%3; \n\t"
  184. : "=&r" (oldbit), "=&r" (tmp), "+r" (mask)
  185. : "r" (a)
  186. : "memory"
  187. );
  188. local_irq_restore(flags);
  189. return (oldbit != 0);
  190. }
  191. /**
  192. * test_and_change_bit - Change a bit and return its old value
  193. * @nr: Bit to set
  194. * @addr: Address to count from
  195. *
  196. * This operation is atomic and cannot be reordered.
  197. * It also implies a memory barrier.
  198. */
  199. static __inline__ int test_and_change_bit(int nr, volatile void * addr)
  200. {
  201. __u32 mask, oldbit;
  202. volatile __u32 *a = addr;
  203. unsigned long flags;
  204. unsigned long tmp;
  205. a += (nr >> 5);
  206. mask = (1 << (nr & 0x1F));
  207. local_irq_save(flags);
  208. __asm__ __volatile__ (
  209. DCACHE_CLEAR("%0", "%1", "%2")
  210. M32R_LOCK" %0, @%2; \n\t"
  211. "mv %1, %0; \n\t"
  212. "and %0, %3; \n\t"
  213. "xor %1, %3; \n\t"
  214. M32R_UNLOCK" %1, @%2; \n\t"
  215. : "=&r" (oldbit), "=&r" (tmp)
  216. : "r" (a), "r" (mask)
  217. : "memory"
  218. );
  219. local_irq_restore(flags);
  220. return (oldbit != 0);
  221. }
  222. #include <asm-generic/bitops/non-atomic.h>
  223. #include <asm-generic/bitops/ffz.h>
  224. #include <asm-generic/bitops/__ffs.h>
  225. #include <asm-generic/bitops/fls.h>
  226. #include <asm-generic/bitops/__fls.h>
  227. #include <asm-generic/bitops/fls64.h>
  228. #ifdef __KERNEL__
  229. #include <asm-generic/bitops/sched.h>
  230. #include <asm-generic/bitops/find.h>
  231. #include <asm-generic/bitops/ffs.h>
  232. #include <asm-generic/bitops/hweight.h>
  233. #include <asm-generic/bitops/lock.h>
  234. #endif /* __KERNEL__ */
  235. #ifdef __KERNEL__
  236. #include <asm-generic/bitops/le.h>
  237. #include <asm-generic/bitops/ext2-atomic.h>
  238. #endif /* __KERNEL__ */
  239. #endif /* _ASM_M32R_BITOPS_H */