spinlock.h 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283
  1. /*
  2. * S390 version
  3. * Copyright IBM Corp. 1999
  4. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  5. *
  6. * Derived from "include/asm-i386/spinlock.h"
  7. */
  8. #ifndef __ASM_SPINLOCK_H
  9. #define __ASM_SPINLOCK_H
  10. #include <linux/smp.h>
  11. #include <asm/atomic_ops.h>
  12. #include <asm/barrier.h>
  13. #include <asm/processor.h>
  14. #define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
  15. extern int spin_retry;
  16. #ifndef CONFIG_SMP
  17. static inline bool arch_vcpu_is_preempted(int cpu) { return false; }
  18. #else
  19. bool arch_vcpu_is_preempted(int cpu);
  20. #endif
  21. #define vcpu_is_preempted arch_vcpu_is_preempted
  22. /*
  23. * Simple spin lock operations. There are two variants, one clears IRQ's
  24. * on the local processor, one does not.
  25. *
  26. * We make no fairness assumptions. They have a cost.
  27. *
  28. * (the type definitions are in asm/spinlock_types.h)
  29. */
  30. void arch_lock_relax(int cpu);
  31. void arch_spin_lock_wait(arch_spinlock_t *);
  32. int arch_spin_trylock_retry(arch_spinlock_t *);
  33. void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
  34. static inline void arch_spin_relax(arch_spinlock_t *lock)
  35. {
  36. arch_lock_relax(lock->lock);
  37. }
  38. static inline u32 arch_spin_lockval(int cpu)
  39. {
  40. return ~cpu;
  41. }
  42. static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
  43. {
  44. return lock.lock == 0;
  45. }
  46. static inline int arch_spin_is_locked(arch_spinlock_t *lp)
  47. {
  48. return READ_ONCE(lp->lock) != 0;
  49. }
  50. static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
  51. {
  52. barrier();
  53. return likely(arch_spin_value_unlocked(*lp) &&
  54. __atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL));
  55. }
  56. static inline void arch_spin_lock(arch_spinlock_t *lp)
  57. {
  58. if (!arch_spin_trylock_once(lp))
  59. arch_spin_lock_wait(lp);
  60. }
  61. static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
  62. unsigned long flags)
  63. {
  64. if (!arch_spin_trylock_once(lp))
  65. arch_spin_lock_wait_flags(lp, flags);
  66. }
  67. static inline int arch_spin_trylock(arch_spinlock_t *lp)
  68. {
  69. if (!arch_spin_trylock_once(lp))
  70. return arch_spin_trylock_retry(lp);
  71. return 1;
  72. }
  73. static inline void arch_spin_unlock(arch_spinlock_t *lp)
  74. {
  75. typecheck(int, lp->lock);
  76. asm volatile(
  77. "st %1,%0\n"
  78. : "+Q" (lp->lock)
  79. : "d" (0)
  80. : "cc", "memory");
  81. }
  82. static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
  83. {
  84. while (arch_spin_is_locked(lock))
  85. arch_spin_relax(lock);
  86. smp_acquire__after_ctrl_dep();
  87. }
  88. /*
  89. * Read-write spinlocks, allowing multiple readers
  90. * but only one writer.
  91. *
  92. * NOTE! it is quite common to have readers in interrupts
  93. * but no interrupt writers. For those circumstances we
  94. * can "mix" irq-safe locks - any writer needs to get a
  95. * irq-safe write-lock, but readers can get non-irqsafe
  96. * read-locks.
  97. */
  98. /**
  99. * read_can_lock - would read_trylock() succeed?
  100. * @lock: the rwlock in question.
  101. */
  102. #define arch_read_can_lock(x) ((int)(x)->lock >= 0)
  103. /**
  104. * write_can_lock - would write_trylock() succeed?
  105. * @lock: the rwlock in question.
  106. */
  107. #define arch_write_can_lock(x) ((x)->lock == 0)
  108. extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
  109. extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
  110. #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
  111. #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
  112. static inline int arch_read_trylock_once(arch_rwlock_t *rw)
  113. {
  114. int old = ACCESS_ONCE(rw->lock);
  115. return likely(old >= 0 &&
  116. __atomic_cmpxchg_bool(&rw->lock, old, old + 1));
  117. }
  118. static inline int arch_write_trylock_once(arch_rwlock_t *rw)
  119. {
  120. int old = ACCESS_ONCE(rw->lock);
  121. return likely(old == 0 &&
  122. __atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000));
  123. }
  124. #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
  125. #define __RAW_OP_OR "lao"
  126. #define __RAW_OP_AND "lan"
  127. #define __RAW_OP_ADD "laa"
  128. #define __RAW_LOCK(ptr, op_val, op_string) \
  129. ({ \
  130. int old_val; \
  131. \
  132. typecheck(int *, ptr); \
  133. asm volatile( \
  134. op_string " %0,%2,%1\n" \
  135. "bcr 14,0\n" \
  136. : "=d" (old_val), "+Q" (*ptr) \
  137. : "d" (op_val) \
  138. : "cc", "memory"); \
  139. old_val; \
  140. })
  141. #define __RAW_UNLOCK(ptr, op_val, op_string) \
  142. ({ \
  143. int old_val; \
  144. \
  145. typecheck(int *, ptr); \
  146. asm volatile( \
  147. op_string " %0,%2,%1\n" \
  148. : "=d" (old_val), "+Q" (*ptr) \
  149. : "d" (op_val) \
  150. : "cc", "memory"); \
  151. old_val; \
  152. })
  153. extern void _raw_read_lock_wait(arch_rwlock_t *lp);
  154. extern void _raw_write_lock_wait(arch_rwlock_t *lp, int prev);
  155. static inline void arch_read_lock(arch_rwlock_t *rw)
  156. {
  157. int old;
  158. old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD);
  159. if (old < 0)
  160. _raw_read_lock_wait(rw);
  161. }
  162. static inline void arch_read_unlock(arch_rwlock_t *rw)
  163. {
  164. __RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD);
  165. }
  166. static inline void arch_write_lock(arch_rwlock_t *rw)
  167. {
  168. int old;
  169. old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
  170. if (old != 0)
  171. _raw_write_lock_wait(rw, old);
  172. rw->owner = SPINLOCK_LOCKVAL;
  173. }
  174. static inline void arch_write_unlock(arch_rwlock_t *rw)
  175. {
  176. rw->owner = 0;
  177. __RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND);
  178. }
  179. #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
  180. extern void _raw_read_lock_wait(arch_rwlock_t *lp);
  181. extern void _raw_write_lock_wait(arch_rwlock_t *lp);
  182. static inline void arch_read_lock(arch_rwlock_t *rw)
  183. {
  184. if (!arch_read_trylock_once(rw))
  185. _raw_read_lock_wait(rw);
  186. }
  187. static inline void arch_read_unlock(arch_rwlock_t *rw)
  188. {
  189. int old;
  190. do {
  191. old = ACCESS_ONCE(rw->lock);
  192. } while (!__atomic_cmpxchg_bool(&rw->lock, old, old - 1));
  193. }
  194. static inline void arch_write_lock(arch_rwlock_t *rw)
  195. {
  196. if (!arch_write_trylock_once(rw))
  197. _raw_write_lock_wait(rw);
  198. rw->owner = SPINLOCK_LOCKVAL;
  199. }
  200. static inline void arch_write_unlock(arch_rwlock_t *rw)
  201. {
  202. typecheck(int, rw->lock);
  203. rw->owner = 0;
  204. asm volatile(
  205. "st %1,%0\n"
  206. : "+Q" (rw->lock)
  207. : "d" (0)
  208. : "cc", "memory");
  209. }
  210. #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
  211. static inline int arch_read_trylock(arch_rwlock_t *rw)
  212. {
  213. if (!arch_read_trylock_once(rw))
  214. return _raw_read_trylock_retry(rw);
  215. return 1;
  216. }
  217. static inline int arch_write_trylock(arch_rwlock_t *rw)
  218. {
  219. if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw))
  220. return 0;
  221. rw->owner = SPINLOCK_LOCKVAL;
  222. return 1;
  223. }
  224. static inline void arch_read_relax(arch_rwlock_t *rw)
  225. {
  226. arch_lock_relax(rw->owner);
  227. }
  228. static inline void arch_write_relax(arch_rwlock_t *rw)
  229. {
  230. arch_lock_relax(rw->owner);
  231. }
  232. #endif /* __ASM_SPINLOCK_H */