spinlock.h 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288
  1. /*
  2. * S390 version
  3. * Copyright IBM Corp. 1999
  4. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  5. *
  6. * Derived from "include/asm-i386/spinlock.h"
  7. */
  8. #ifndef __ASM_SPINLOCK_H
  9. #define __ASM_SPINLOCK_H
  10. #include <linux/smp.h>
  11. #include <asm/barrier.h>
  12. #include <asm/processor.h>
  13. #define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
  14. extern int spin_retry;
  15. static inline int
  16. _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
  17. {
  18. return __sync_bool_compare_and_swap(lock, old, new);
  19. }
  20. #ifndef CONFIG_SMP
  21. static inline bool arch_vcpu_is_preempted(int cpu) { return false; }
  22. #else
  23. bool arch_vcpu_is_preempted(int cpu);
  24. #endif
  25. #define vcpu_is_preempted arch_vcpu_is_preempted
  26. /*
  27. * Simple spin lock operations. There are two variants, one clears IRQ's
  28. * on the local processor, one does not.
  29. *
  30. * We make no fairness assumptions. They have a cost.
  31. *
  32. * (the type definitions are in asm/spinlock_types.h)
  33. */
  34. void arch_lock_relax(unsigned int cpu);
  35. void arch_spin_lock_wait(arch_spinlock_t *);
  36. int arch_spin_trylock_retry(arch_spinlock_t *);
  37. void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
  38. static inline void arch_spin_relax(arch_spinlock_t *lock)
  39. {
  40. arch_lock_relax(lock->lock);
  41. }
  42. static inline u32 arch_spin_lockval(int cpu)
  43. {
  44. return ~cpu;
  45. }
  46. static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
  47. {
  48. return lock.lock == 0;
  49. }
  50. static inline int arch_spin_is_locked(arch_spinlock_t *lp)
  51. {
  52. return ACCESS_ONCE(lp->lock) != 0;
  53. }
  54. static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
  55. {
  56. barrier();
  57. return likely(arch_spin_value_unlocked(*lp) &&
  58. _raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL));
  59. }
  60. static inline void arch_spin_lock(arch_spinlock_t *lp)
  61. {
  62. if (!arch_spin_trylock_once(lp))
  63. arch_spin_lock_wait(lp);
  64. }
  65. static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
  66. unsigned long flags)
  67. {
  68. if (!arch_spin_trylock_once(lp))
  69. arch_spin_lock_wait_flags(lp, flags);
  70. }
  71. static inline int arch_spin_trylock(arch_spinlock_t *lp)
  72. {
  73. if (!arch_spin_trylock_once(lp))
  74. return arch_spin_trylock_retry(lp);
  75. return 1;
  76. }
  77. static inline void arch_spin_unlock(arch_spinlock_t *lp)
  78. {
  79. typecheck(unsigned int, lp->lock);
  80. asm volatile(
  81. "st %1,%0\n"
  82. : "+Q" (lp->lock)
  83. : "d" (0)
  84. : "cc", "memory");
  85. }
  86. static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
  87. {
  88. while (arch_spin_is_locked(lock))
  89. arch_spin_relax(lock);
  90. smp_acquire__after_ctrl_dep();
  91. }
  92. /*
  93. * Read-write spinlocks, allowing multiple readers
  94. * but only one writer.
  95. *
  96. * NOTE! it is quite common to have readers in interrupts
  97. * but no interrupt writers. For those circumstances we
  98. * can "mix" irq-safe locks - any writer needs to get a
  99. * irq-safe write-lock, but readers can get non-irqsafe
  100. * read-locks.
  101. */
  102. /**
  103. * read_can_lock - would read_trylock() succeed?
  104. * @lock: the rwlock in question.
  105. */
  106. #define arch_read_can_lock(x) ((int)(x)->lock >= 0)
  107. /**
  108. * write_can_lock - would write_trylock() succeed?
  109. * @lock: the rwlock in question.
  110. */
  111. #define arch_write_can_lock(x) ((x)->lock == 0)
  112. extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
  113. extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
  114. #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
  115. #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
  116. static inline int arch_read_trylock_once(arch_rwlock_t *rw)
  117. {
  118. unsigned int old = ACCESS_ONCE(rw->lock);
  119. return likely((int) old >= 0 &&
  120. _raw_compare_and_swap(&rw->lock, old, old + 1));
  121. }
  122. static inline int arch_write_trylock_once(arch_rwlock_t *rw)
  123. {
  124. unsigned int old = ACCESS_ONCE(rw->lock);
  125. return likely(old == 0 &&
  126. _raw_compare_and_swap(&rw->lock, 0, 0x80000000));
  127. }
  128. #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
  129. #define __RAW_OP_OR "lao"
  130. #define __RAW_OP_AND "lan"
  131. #define __RAW_OP_ADD "laa"
  132. #define __RAW_LOCK(ptr, op_val, op_string) \
  133. ({ \
  134. unsigned int old_val; \
  135. \
  136. typecheck(unsigned int *, ptr); \
  137. asm volatile( \
  138. op_string " %0,%2,%1\n" \
  139. "bcr 14,0\n" \
  140. : "=d" (old_val), "+Q" (*ptr) \
  141. : "d" (op_val) \
  142. : "cc", "memory"); \
  143. old_val; \
  144. })
  145. #define __RAW_UNLOCK(ptr, op_val, op_string) \
  146. ({ \
  147. unsigned int old_val; \
  148. \
  149. typecheck(unsigned int *, ptr); \
  150. asm volatile( \
  151. op_string " %0,%2,%1\n" \
  152. : "=d" (old_val), "+Q" (*ptr) \
  153. : "d" (op_val) \
  154. : "cc", "memory"); \
  155. old_val; \
  156. })
  157. extern void _raw_read_lock_wait(arch_rwlock_t *lp);
  158. extern void _raw_write_lock_wait(arch_rwlock_t *lp, unsigned int prev);
  159. static inline void arch_read_lock(arch_rwlock_t *rw)
  160. {
  161. unsigned int old;
  162. old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD);
  163. if ((int) old < 0)
  164. _raw_read_lock_wait(rw);
  165. }
  166. static inline void arch_read_unlock(arch_rwlock_t *rw)
  167. {
  168. __RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD);
  169. }
  170. static inline void arch_write_lock(arch_rwlock_t *rw)
  171. {
  172. unsigned int old;
  173. old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
  174. if (old != 0)
  175. _raw_write_lock_wait(rw, old);
  176. rw->owner = SPINLOCK_LOCKVAL;
  177. }
  178. static inline void arch_write_unlock(arch_rwlock_t *rw)
  179. {
  180. rw->owner = 0;
  181. __RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND);
  182. }
  183. #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
  184. extern void _raw_read_lock_wait(arch_rwlock_t *lp);
  185. extern void _raw_write_lock_wait(arch_rwlock_t *lp);
  186. static inline void arch_read_lock(arch_rwlock_t *rw)
  187. {
  188. if (!arch_read_trylock_once(rw))
  189. _raw_read_lock_wait(rw);
  190. }
  191. static inline void arch_read_unlock(arch_rwlock_t *rw)
  192. {
  193. unsigned int old;
  194. do {
  195. old = ACCESS_ONCE(rw->lock);
  196. } while (!_raw_compare_and_swap(&rw->lock, old, old - 1));
  197. }
  198. static inline void arch_write_lock(arch_rwlock_t *rw)
  199. {
  200. if (!arch_write_trylock_once(rw))
  201. _raw_write_lock_wait(rw);
  202. rw->owner = SPINLOCK_LOCKVAL;
  203. }
  204. static inline void arch_write_unlock(arch_rwlock_t *rw)
  205. {
  206. typecheck(unsigned int, rw->lock);
  207. rw->owner = 0;
  208. asm volatile(
  209. "st %1,%0\n"
  210. : "+Q" (rw->lock)
  211. : "d" (0)
  212. : "cc", "memory");
  213. }
  214. #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
  215. static inline int arch_read_trylock(arch_rwlock_t *rw)
  216. {
  217. if (!arch_read_trylock_once(rw))
  218. return _raw_read_trylock_retry(rw);
  219. return 1;
  220. }
  221. static inline int arch_write_trylock(arch_rwlock_t *rw)
  222. {
  223. if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw))
  224. return 0;
  225. rw->owner = SPINLOCK_LOCKVAL;
  226. return 1;
  227. }
  228. static inline void arch_read_relax(arch_rwlock_t *rw)
  229. {
  230. arch_lock_relax(rw->owner);
  231. }
  232. static inline void arch_write_relax(arch_rwlock_t *rw)
  233. {
  234. arch_lock_relax(rw->owner);
  235. }
  236. #endif /* __ASM_SPINLOCK_H */