spinlock.h 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _ASM_M32R_SPINLOCK_H
  3. #define _ASM_M32R_SPINLOCK_H
  4. /*
  5. * linux/include/asm-m32r/spinlock.h
  6. *
  7. * M32R version:
  8. * Copyright (C) 2001, 2002 Hitoshi Yamamoto
  9. * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
  10. */
  11. #include <linux/compiler.h>
  12. #include <linux/atomic.h>
  13. #include <asm/dcache_clear.h>
  14. #include <asm/page.h>
  15. #include <asm/barrier.h>
  16. #include <asm/processor.h>
  17. /*
  18. * Your basic SMP spinlocks, allowing only a single CPU anywhere
  19. *
  20. * (the type definitions are in asm/spinlock_types.h)
  21. *
  22. * Simple spin lock operations. There are two variants, one clears IRQ's
  23. * on the local processor, one does not.
  24. *
  25. * We make no fairness assumptions. They have a cost.
  26. */
  27. #define arch_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0)
  28. /**
  29. * arch_spin_trylock - Try spin lock and return a result
  30. * @lock: Pointer to the lock variable
  31. *
  32. * arch_spin_trylock() tries to get the lock and returns a result.
  33. * On the m32r, the result value is 1 (= Success) or 0 (= Failure).
  34. */
  35. static inline int arch_spin_trylock(arch_spinlock_t *lock)
  36. {
  37. int oldval;
  38. unsigned long tmp1, tmp2;
  39. /*
  40. * lock->slock : =1 : unlock
  41. * : <=0 : lock
  42. * {
  43. * oldval = lock->slock; <--+ need atomic operation
  44. * lock->slock = 0; <--+
  45. * }
  46. */
  47. __asm__ __volatile__ (
  48. "# arch_spin_trylock \n\t"
  49. "ldi %1, #0; \n\t"
  50. "mvfc %2, psw; \n\t"
  51. "clrpsw #0x40 -> nop; \n\t"
  52. DCACHE_CLEAR("%0", "r6", "%3")
  53. "lock %0, @%3; \n\t"
  54. "unlock %1, @%3; \n\t"
  55. "mvtc %2, psw; \n\t"
  56. : "=&r" (oldval), "=&r" (tmp1), "=&r" (tmp2)
  57. : "r" (&lock->slock)
  58. : "memory"
  59. #ifdef CONFIG_CHIP_M32700_TS1
  60. , "r6"
  61. #endif /* CONFIG_CHIP_M32700_TS1 */
  62. );
  63. return (oldval > 0);
  64. }
  65. static inline void arch_spin_lock(arch_spinlock_t *lock)
  66. {
  67. unsigned long tmp0, tmp1;
  68. /*
  69. * lock->slock : =1 : unlock
  70. * : <=0 : lock
  71. *
  72. * for ( ; ; ) {
  73. * lock->slock -= 1; <-- need atomic operation
  74. * if (lock->slock == 0) break;
  75. * for ( ; lock->slock <= 0 ; );
  76. * }
  77. */
  78. __asm__ __volatile__ (
  79. "# arch_spin_lock \n\t"
  80. ".fillinsn \n"
  81. "1: \n\t"
  82. "mvfc %1, psw; \n\t"
  83. "clrpsw #0x40 -> nop; \n\t"
  84. DCACHE_CLEAR("%0", "r6", "%2")
  85. "lock %0, @%2; \n\t"
  86. "addi %0, #-1; \n\t"
  87. "unlock %0, @%2; \n\t"
  88. "mvtc %1, psw; \n\t"
  89. "bltz %0, 2f; \n\t"
  90. LOCK_SECTION_START(".balign 4 \n\t")
  91. ".fillinsn \n"
  92. "2: \n\t"
  93. "ld %0, @%2; \n\t"
  94. "bgtz %0, 1b; \n\t"
  95. "bra 2b; \n\t"
  96. LOCK_SECTION_END
  97. : "=&r" (tmp0), "=&r" (tmp1)
  98. : "r" (&lock->slock)
  99. : "memory"
  100. #ifdef CONFIG_CHIP_M32700_TS1
  101. , "r6"
  102. #endif /* CONFIG_CHIP_M32700_TS1 */
  103. );
  104. }
  105. static inline void arch_spin_unlock(arch_spinlock_t *lock)
  106. {
  107. mb();
  108. lock->slock = 1;
  109. }
  110. /*
  111. * Read-write spinlocks, allowing multiple readers
  112. * but only one writer.
  113. *
  114. * NOTE! it is quite common to have readers in interrupts
  115. * but no interrupt writers. For those circumstances we
  116. * can "mix" irq-safe locks - any writer needs to get a
  117. * irq-safe write-lock, but readers can get non-irqsafe
  118. * read-locks.
  119. *
  120. * On x86, we implement read-write locks as a 32-bit counter
  121. * with the high bit (sign) being the "contended" bit.
  122. *
  123. * The inline assembly is non-obvious. Think about it.
  124. *
  125. * Changed to use the same technique as rw semaphores. See
  126. * semaphore.h for details. -ben
  127. */
  128. static inline void arch_read_lock(arch_rwlock_t *rw)
  129. {
  130. unsigned long tmp0, tmp1;
  131. /*
  132. * rw->lock : >0 : unlock
  133. * : <=0 : lock
  134. *
  135. * for ( ; ; ) {
  136. * rw->lock -= 1; <-- need atomic operation
  137. * if (rw->lock >= 0) break;
  138. * rw->lock += 1; <-- need atomic operation
  139. * for ( ; rw->lock <= 0 ; );
  140. * }
  141. */
  142. __asm__ __volatile__ (
  143. "# read_lock \n\t"
  144. ".fillinsn \n"
  145. "1: \n\t"
  146. "mvfc %1, psw; \n\t"
  147. "clrpsw #0x40 -> nop; \n\t"
  148. DCACHE_CLEAR("%0", "r6", "%2")
  149. "lock %0, @%2; \n\t"
  150. "addi %0, #-1; \n\t"
  151. "unlock %0, @%2; \n\t"
  152. "mvtc %1, psw; \n\t"
  153. "bltz %0, 2f; \n\t"
  154. LOCK_SECTION_START(".balign 4 \n\t")
  155. ".fillinsn \n"
  156. "2: \n\t"
  157. "clrpsw #0x40 -> nop; \n\t"
  158. DCACHE_CLEAR("%0", "r6", "%2")
  159. "lock %0, @%2; \n\t"
  160. "addi %0, #1; \n\t"
  161. "unlock %0, @%2; \n\t"
  162. "mvtc %1, psw; \n\t"
  163. ".fillinsn \n"
  164. "3: \n\t"
  165. "ld %0, @%2; \n\t"
  166. "bgtz %0, 1b; \n\t"
  167. "bra 3b; \n\t"
  168. LOCK_SECTION_END
  169. : "=&r" (tmp0), "=&r" (tmp1)
  170. : "r" (&rw->lock)
  171. : "memory"
  172. #ifdef CONFIG_CHIP_M32700_TS1
  173. , "r6"
  174. #endif /* CONFIG_CHIP_M32700_TS1 */
  175. );
  176. }
  177. static inline void arch_write_lock(arch_rwlock_t *rw)
  178. {
  179. unsigned long tmp0, tmp1, tmp2;
  180. /*
  181. * rw->lock : =RW_LOCK_BIAS_STR : unlock
  182. * : !=RW_LOCK_BIAS_STR : lock
  183. *
  184. * for ( ; ; ) {
  185. * rw->lock -= RW_LOCK_BIAS_STR; <-- need atomic operation
  186. * if (rw->lock == 0) break;
  187. * rw->lock += RW_LOCK_BIAS_STR; <-- need atomic operation
  188. * for ( ; rw->lock != RW_LOCK_BIAS_STR ; ) ;
  189. * }
  190. */
  191. __asm__ __volatile__ (
  192. "# write_lock \n\t"
  193. "seth %1, #high(" RW_LOCK_BIAS_STR "); \n\t"
  194. "or3 %1, %1, #low(" RW_LOCK_BIAS_STR "); \n\t"
  195. ".fillinsn \n"
  196. "1: \n\t"
  197. "mvfc %2, psw; \n\t"
  198. "clrpsw #0x40 -> nop; \n\t"
  199. DCACHE_CLEAR("%0", "r7", "%3")
  200. "lock %0, @%3; \n\t"
  201. "sub %0, %1; \n\t"
  202. "unlock %0, @%3; \n\t"
  203. "mvtc %2, psw; \n\t"
  204. "bnez %0, 2f; \n\t"
  205. LOCK_SECTION_START(".balign 4 \n\t")
  206. ".fillinsn \n"
  207. "2: \n\t"
  208. "clrpsw #0x40 -> nop; \n\t"
  209. DCACHE_CLEAR("%0", "r7", "%3")
  210. "lock %0, @%3; \n\t"
  211. "add %0, %1; \n\t"
  212. "unlock %0, @%3; \n\t"
  213. "mvtc %2, psw; \n\t"
  214. ".fillinsn \n"
  215. "3: \n\t"
  216. "ld %0, @%3; \n\t"
  217. "beq %0, %1, 1b; \n\t"
  218. "bra 3b; \n\t"
  219. LOCK_SECTION_END
  220. : "=&r" (tmp0), "=&r" (tmp1), "=&r" (tmp2)
  221. : "r" (&rw->lock)
  222. : "memory"
  223. #ifdef CONFIG_CHIP_M32700_TS1
  224. , "r7"
  225. #endif /* CONFIG_CHIP_M32700_TS1 */
  226. );
  227. }
  228. static inline void arch_read_unlock(arch_rwlock_t *rw)
  229. {
  230. unsigned long tmp0, tmp1;
  231. __asm__ __volatile__ (
  232. "# read_unlock \n\t"
  233. "mvfc %1, psw; \n\t"
  234. "clrpsw #0x40 -> nop; \n\t"
  235. DCACHE_CLEAR("%0", "r6", "%2")
  236. "lock %0, @%2; \n\t"
  237. "addi %0, #1; \n\t"
  238. "unlock %0, @%2; \n\t"
  239. "mvtc %1, psw; \n\t"
  240. : "=&r" (tmp0), "=&r" (tmp1)
  241. : "r" (&rw->lock)
  242. : "memory"
  243. #ifdef CONFIG_CHIP_M32700_TS1
  244. , "r6"
  245. #endif /* CONFIG_CHIP_M32700_TS1 */
  246. );
  247. }
  248. static inline void arch_write_unlock(arch_rwlock_t *rw)
  249. {
  250. unsigned long tmp0, tmp1, tmp2;
  251. __asm__ __volatile__ (
  252. "# write_unlock \n\t"
  253. "seth %1, #high(" RW_LOCK_BIAS_STR "); \n\t"
  254. "or3 %1, %1, #low(" RW_LOCK_BIAS_STR "); \n\t"
  255. "mvfc %2, psw; \n\t"
  256. "clrpsw #0x40 -> nop; \n\t"
  257. DCACHE_CLEAR("%0", "r7", "%3")
  258. "lock %0, @%3; \n\t"
  259. "add %0, %1; \n\t"
  260. "unlock %0, @%3; \n\t"
  261. "mvtc %2, psw; \n\t"
  262. : "=&r" (tmp0), "=&r" (tmp1), "=&r" (tmp2)
  263. : "r" (&rw->lock)
  264. : "memory"
  265. #ifdef CONFIG_CHIP_M32700_TS1
  266. , "r7"
  267. #endif /* CONFIG_CHIP_M32700_TS1 */
  268. );
  269. }
  270. static inline int arch_read_trylock(arch_rwlock_t *lock)
  271. {
  272. atomic_t *count = (atomic_t*)lock;
  273. if (atomic_dec_return(count) >= 0)
  274. return 1;
  275. atomic_inc(count);
  276. return 0;
  277. }
  278. static inline int arch_write_trylock(arch_rwlock_t *lock)
  279. {
  280. atomic_t *count = (atomic_t *)lock;
  281. if (atomic_sub_and_test(RW_LOCK_BIAS, count))
  282. return 1;
  283. atomic_add(RW_LOCK_BIAS, count);
  284. return 0;
  285. }
  286. #endif /* _ASM_M32R_SPINLOCK_H */