spinlock.c 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257
  1. /*
  2. * Out of line spinlock code.
  3. *
  4. * Copyright IBM Corp. 2004, 2006
  5. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  6. */
  7. #include <linux/types.h>
  8. #include <linux/export.h>
  9. #include <linux/spinlock.h>
  10. #include <linux/init.h>
  11. #include <linux/smp.h>
  12. #include <asm/io.h>
  13. int spin_retry = -1;
  14. static int __init spin_retry_init(void)
  15. {
  16. if (spin_retry < 0)
  17. spin_retry = 1000;
  18. return 0;
  19. }
  20. early_initcall(spin_retry_init);
  21. /**
  22. * spin_retry= parameter
  23. */
  24. static int __init spin_retry_setup(char *str)
  25. {
  26. spin_retry = simple_strtoul(str, &str, 0);
  27. return 1;
  28. }
  29. __setup("spin_retry=", spin_retry_setup);
  30. void arch_spin_lock_wait(arch_spinlock_t *lp)
  31. {
  32. int cpu = SPINLOCK_LOCKVAL;
  33. int owner, count, first_diag;
  34. first_diag = 1;
  35. while (1) {
  36. owner = ACCESS_ONCE(lp->lock);
  37. /* Try to get the lock if it is free. */
  38. if (!owner) {
  39. if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
  40. return;
  41. continue;
  42. }
  43. /* First iteration: check if the lock owner is running. */
  44. if (first_diag && arch_vcpu_is_preempted(~owner)) {
  45. smp_yield_cpu(~owner);
  46. first_diag = 0;
  47. continue;
  48. }
  49. /* Loop for a while on the lock value. */
  50. count = spin_retry;
  51. do {
  52. owner = ACCESS_ONCE(lp->lock);
  53. } while (owner && count-- > 0);
  54. if (!owner)
  55. continue;
  56. /*
  57. * For multiple layers of hypervisors, e.g. z/VM + LPAR
  58. * yield the CPU unconditionally. For LPAR rely on the
  59. * sense running status.
  60. */
  61. if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
  62. smp_yield_cpu(~owner);
  63. first_diag = 0;
  64. }
  65. }
  66. }
  67. EXPORT_SYMBOL(arch_spin_lock_wait);
  68. void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
  69. {
  70. int cpu = SPINLOCK_LOCKVAL;
  71. int owner, count, first_diag;
  72. local_irq_restore(flags);
  73. first_diag = 1;
  74. while (1) {
  75. owner = ACCESS_ONCE(lp->lock);
  76. /* Try to get the lock if it is free. */
  77. if (!owner) {
  78. local_irq_disable();
  79. if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
  80. return;
  81. local_irq_restore(flags);
  82. continue;
  83. }
  84. /* Check if the lock owner is running. */
  85. if (first_diag && arch_vcpu_is_preempted(~owner)) {
  86. smp_yield_cpu(~owner);
  87. first_diag = 0;
  88. continue;
  89. }
  90. /* Loop for a while on the lock value. */
  91. count = spin_retry;
  92. do {
  93. owner = ACCESS_ONCE(lp->lock);
  94. } while (owner && count-- > 0);
  95. if (!owner)
  96. continue;
  97. /*
  98. * For multiple layers of hypervisors, e.g. z/VM + LPAR
  99. * yield the CPU unconditionally. For LPAR rely on the
  100. * sense running status.
  101. */
  102. if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
  103. smp_yield_cpu(~owner);
  104. first_diag = 0;
  105. }
  106. }
  107. }
  108. EXPORT_SYMBOL(arch_spin_lock_wait_flags);
  109. int arch_spin_trylock_retry(arch_spinlock_t *lp)
  110. {
  111. int cpu = SPINLOCK_LOCKVAL;
  112. int owner, count;
  113. for (count = spin_retry; count > 0; count--) {
  114. owner = READ_ONCE(lp->lock);
  115. /* Try to get the lock if it is free. */
  116. if (!owner) {
  117. if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
  118. return 1;
  119. }
  120. }
  121. return 0;
  122. }
  123. EXPORT_SYMBOL(arch_spin_trylock_retry);
  124. void _raw_read_lock_wait(arch_rwlock_t *rw)
  125. {
  126. int count = spin_retry;
  127. int owner, old;
  128. #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
  129. __RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD);
  130. #endif
  131. owner = 0;
  132. while (1) {
  133. if (count-- <= 0) {
  134. if (owner && arch_vcpu_is_preempted(~owner))
  135. smp_yield_cpu(~owner);
  136. count = spin_retry;
  137. }
  138. old = ACCESS_ONCE(rw->lock);
  139. owner = ACCESS_ONCE(rw->owner);
  140. if (old < 0)
  141. continue;
  142. if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
  143. return;
  144. }
  145. }
  146. EXPORT_SYMBOL(_raw_read_lock_wait);
  147. int _raw_read_trylock_retry(arch_rwlock_t *rw)
  148. {
  149. int count = spin_retry;
  150. int old;
  151. while (count-- > 0) {
  152. old = ACCESS_ONCE(rw->lock);
  153. if (old < 0)
  154. continue;
  155. if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
  156. return 1;
  157. }
  158. return 0;
  159. }
  160. EXPORT_SYMBOL(_raw_read_trylock_retry);
  161. #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
  162. void _raw_write_lock_wait(arch_rwlock_t *rw, int prev)
  163. {
  164. int count = spin_retry;
  165. int owner, old;
  166. owner = 0;
  167. while (1) {
  168. if (count-- <= 0) {
  169. if (owner && arch_vcpu_is_preempted(~owner))
  170. smp_yield_cpu(~owner);
  171. count = spin_retry;
  172. }
  173. old = ACCESS_ONCE(rw->lock);
  174. owner = ACCESS_ONCE(rw->owner);
  175. smp_mb();
  176. if (old >= 0) {
  177. prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
  178. old = prev;
  179. }
  180. if ((old & 0x7fffffff) == 0 && prev >= 0)
  181. break;
  182. }
  183. }
  184. EXPORT_SYMBOL(_raw_write_lock_wait);
  185. #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
  186. void _raw_write_lock_wait(arch_rwlock_t *rw)
  187. {
  188. int count = spin_retry;
  189. int owner, old, prev;
  190. prev = 0x80000000;
  191. owner = 0;
  192. while (1) {
  193. if (count-- <= 0) {
  194. if (owner && arch_vcpu_is_preempted(~owner))
  195. smp_yield_cpu(~owner);
  196. count = spin_retry;
  197. }
  198. old = ACCESS_ONCE(rw->lock);
  199. owner = ACCESS_ONCE(rw->owner);
  200. if (old >= 0 &&
  201. __atomic_cmpxchg_bool(&rw->lock, old, old | 0x80000000))
  202. prev = old;
  203. else
  204. smp_mb();
  205. if ((old & 0x7fffffff) == 0 && prev >= 0)
  206. break;
  207. }
  208. }
  209. EXPORT_SYMBOL(_raw_write_lock_wait);
  210. #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
  211. int _raw_write_trylock_retry(arch_rwlock_t *rw)
  212. {
  213. int count = spin_retry;
  214. int old;
  215. while (count-- > 0) {
  216. old = ACCESS_ONCE(rw->lock);
  217. if (old)
  218. continue;
  219. if (__atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000))
  220. return 1;
  221. }
  222. return 0;
  223. }
  224. EXPORT_SYMBOL(_raw_write_trylock_retry);
  225. void arch_lock_relax(int cpu)
  226. {
  227. if (!cpu)
  228. return;
  229. if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(~cpu))
  230. return;
  231. smp_yield_cpu(~cpu);
  232. }
  233. EXPORT_SYMBOL(arch_lock_relax);