spinlock.h 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287
  1. /*
  2. * S390 version
  3. * Copyright IBM Corp. 1999
  4. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  5. *
  6. * Derived from "include/asm-i386/spinlock.h"
  7. */
  8. #ifndef __ASM_SPINLOCK_H
  9. #define __ASM_SPINLOCK_H
  10. #include <linux/smp.h>
  11. #define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
  12. extern int spin_retry;
  13. static inline int
  14. _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
  15. {
  16. unsigned int old_expected = old;
  17. asm volatile(
  18. " cs %0,%3,%1"
  19. : "=d" (old), "=Q" (*lock)
  20. : "0" (old), "d" (new), "Q" (*lock)
  21. : "cc", "memory" );
  22. return old == old_expected;
  23. }
  24. /*
  25. * Simple spin lock operations. There are two variants, one clears IRQ's
  26. * on the local processor, one does not.
  27. *
  28. * We make no fairness assumptions. They have a cost.
  29. *
  30. * (the type definitions are in asm/spinlock_types.h)
  31. */
  32. void arch_lock_relax(unsigned int cpu);
  33. void arch_spin_lock_wait(arch_spinlock_t *);
  34. int arch_spin_trylock_retry(arch_spinlock_t *);
  35. void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
  36. static inline void arch_spin_relax(arch_spinlock_t *lock)
  37. {
  38. arch_lock_relax(lock->lock);
  39. }
  40. static inline u32 arch_spin_lockval(int cpu)
  41. {
  42. return ~cpu;
  43. }
  44. static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
  45. {
  46. return lock.lock == 0;
  47. }
  48. static inline int arch_spin_is_locked(arch_spinlock_t *lp)
  49. {
  50. return ACCESS_ONCE(lp->lock) != 0;
  51. }
  52. static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
  53. {
  54. barrier();
  55. return likely(arch_spin_value_unlocked(*lp) &&
  56. _raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL));
  57. }
  58. static inline void arch_spin_lock(arch_spinlock_t *lp)
  59. {
  60. if (!arch_spin_trylock_once(lp))
  61. arch_spin_lock_wait(lp);
  62. }
  63. static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
  64. unsigned long flags)
  65. {
  66. if (!arch_spin_trylock_once(lp))
  67. arch_spin_lock_wait_flags(lp, flags);
  68. }
  69. static inline int arch_spin_trylock(arch_spinlock_t *lp)
  70. {
  71. if (!arch_spin_trylock_once(lp))
  72. return arch_spin_trylock_retry(lp);
  73. return 1;
  74. }
  75. static inline void arch_spin_unlock(arch_spinlock_t *lp)
  76. {
  77. typecheck(unsigned int, lp->lock);
  78. asm volatile(
  79. __ASM_BARRIER
  80. "st %1,%0\n"
  81. : "+Q" (lp->lock)
  82. : "d" (0)
  83. : "cc", "memory");
  84. }
  85. static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
  86. {
  87. while (arch_spin_is_locked(lock))
  88. arch_spin_relax(lock);
  89. }
  90. /*
  91. * Read-write spinlocks, allowing multiple readers
  92. * but only one writer.
  93. *
  94. * NOTE! it is quite common to have readers in interrupts
  95. * but no interrupt writers. For those circumstances we
  96. * can "mix" irq-safe locks - any writer needs to get a
  97. * irq-safe write-lock, but readers can get non-irqsafe
  98. * read-locks.
  99. */
  100. /**
  101. * read_can_lock - would read_trylock() succeed?
  102. * @lock: the rwlock in question.
  103. */
  104. #define arch_read_can_lock(x) ((int)(x)->lock >= 0)
  105. /**
  106. * write_can_lock - would write_trylock() succeed?
  107. * @lock: the rwlock in question.
  108. */
  109. #define arch_write_can_lock(x) ((x)->lock == 0)
  110. extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
  111. extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
  112. #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
  113. #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
  114. static inline int arch_read_trylock_once(arch_rwlock_t *rw)
  115. {
  116. unsigned int old = ACCESS_ONCE(rw->lock);
  117. return likely((int) old >= 0 &&
  118. _raw_compare_and_swap(&rw->lock, old, old + 1));
  119. }
  120. static inline int arch_write_trylock_once(arch_rwlock_t *rw)
  121. {
  122. unsigned int old = ACCESS_ONCE(rw->lock);
  123. return likely(old == 0 &&
  124. _raw_compare_and_swap(&rw->lock, 0, 0x80000000));
  125. }
  126. #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
  127. #define __RAW_OP_OR "lao"
  128. #define __RAW_OP_AND "lan"
  129. #define __RAW_OP_ADD "laa"
  130. #define __RAW_LOCK(ptr, op_val, op_string) \
  131. ({ \
  132. unsigned int old_val; \
  133. \
  134. typecheck(unsigned int *, ptr); \
  135. asm volatile( \
  136. op_string " %0,%2,%1\n" \
  137. "bcr 14,0\n" \
  138. : "=d" (old_val), "+Q" (*ptr) \
  139. : "d" (op_val) \
  140. : "cc", "memory"); \
  141. old_val; \
  142. })
  143. #define __RAW_UNLOCK(ptr, op_val, op_string) \
  144. ({ \
  145. unsigned int old_val; \
  146. \
  147. typecheck(unsigned int *, ptr); \
  148. asm volatile( \
  149. "bcr 14,0\n" \
  150. op_string " %0,%2,%1\n" \
  151. : "=d" (old_val), "+Q" (*ptr) \
  152. : "d" (op_val) \
  153. : "cc", "memory"); \
  154. old_val; \
  155. })
  156. extern void _raw_read_lock_wait(arch_rwlock_t *lp);
  157. extern void _raw_write_lock_wait(arch_rwlock_t *lp, unsigned int prev);
  158. static inline void arch_read_lock(arch_rwlock_t *rw)
  159. {
  160. unsigned int old;
  161. old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD);
  162. if ((int) old < 0)
  163. _raw_read_lock_wait(rw);
  164. }
  165. static inline void arch_read_unlock(arch_rwlock_t *rw)
  166. {
  167. __RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD);
  168. }
  169. static inline void arch_write_lock(arch_rwlock_t *rw)
  170. {
  171. unsigned int old;
  172. old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
  173. if (old != 0)
  174. _raw_write_lock_wait(rw, old);
  175. rw->owner = SPINLOCK_LOCKVAL;
  176. }
  177. static inline void arch_write_unlock(arch_rwlock_t *rw)
  178. {
  179. rw->owner = 0;
  180. __RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND);
  181. }
  182. #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
  183. extern void _raw_read_lock_wait(arch_rwlock_t *lp);
  184. extern void _raw_write_lock_wait(arch_rwlock_t *lp);
  185. static inline void arch_read_lock(arch_rwlock_t *rw)
  186. {
  187. if (!arch_read_trylock_once(rw))
  188. _raw_read_lock_wait(rw);
  189. }
  190. static inline void arch_read_unlock(arch_rwlock_t *rw)
  191. {
  192. unsigned int old;
  193. do {
  194. old = ACCESS_ONCE(rw->lock);
  195. } while (!_raw_compare_and_swap(&rw->lock, old, old - 1));
  196. }
  197. static inline void arch_write_lock(arch_rwlock_t *rw)
  198. {
  199. if (!arch_write_trylock_once(rw))
  200. _raw_write_lock_wait(rw);
  201. rw->owner = SPINLOCK_LOCKVAL;
  202. }
  203. static inline void arch_write_unlock(arch_rwlock_t *rw)
  204. {
  205. typecheck(unsigned int, rw->lock);
  206. rw->owner = 0;
  207. asm volatile(
  208. __ASM_BARRIER
  209. "st %1,%0\n"
  210. : "+Q" (rw->lock)
  211. : "d" (0)
  212. : "cc", "memory");
  213. }
  214. #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
  215. static inline int arch_read_trylock(arch_rwlock_t *rw)
  216. {
  217. if (!arch_read_trylock_once(rw))
  218. return _raw_read_trylock_retry(rw);
  219. return 1;
  220. }
  221. static inline int arch_write_trylock(arch_rwlock_t *rw)
  222. {
  223. if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw))
  224. return 0;
  225. rw->owner = SPINLOCK_LOCKVAL;
  226. return 1;
  227. }
  228. static inline void arch_read_relax(arch_rwlock_t *rw)
  229. {
  230. arch_lock_relax(rw->owner);
  231. }
  232. static inline void arch_write_relax(arch_rwlock_t *rw)
  233. {
  234. arch_lock_relax(rw->owner);
  235. }
  236. #endif /* __ASM_SPINLOCK_H */