spinlock.c 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234
  1. /*
  2. * Out of line spinlock code.
  3. *
  4. * Copyright IBM Corp. 2004, 2006
  5. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  6. */
  7. #include <linux/types.h>
  8. #include <linux/module.h>
  9. #include <linux/spinlock.h>
  10. #include <linux/init.h>
  11. #include <linux/smp.h>
  12. #include <asm/io.h>
  13. int spin_retry = 1000;
  14. /**
  15. * spin_retry= parameter
  16. */
  17. static int __init spin_retry_setup(char *str)
  18. {
  19. spin_retry = simple_strtoul(str, &str, 0);
  20. return 1;
  21. }
  22. __setup("spin_retry=", spin_retry_setup);
  23. void arch_spin_lock_wait(arch_spinlock_t *lp)
  24. {
  25. unsigned int cpu = SPINLOCK_LOCKVAL;
  26. unsigned int owner;
  27. int count;
  28. while (1) {
  29. owner = ACCESS_ONCE(lp->lock);
  30. /* Try to get the lock if it is free. */
  31. if (!owner) {
  32. if (_raw_compare_and_swap(&lp->lock, 0, cpu))
  33. return;
  34. continue;
  35. }
  36. /* Check if the lock owner is running. */
  37. if (!smp_vcpu_scheduled(~owner)) {
  38. smp_yield_cpu(~owner);
  39. continue;
  40. }
  41. /* Loop for a while on the lock value. */
  42. count = spin_retry;
  43. do {
  44. owner = ACCESS_ONCE(lp->lock);
  45. } while (owner && count-- > 0);
  46. if (!owner)
  47. continue;
  48. /*
  49. * For multiple layers of hypervisors, e.g. z/VM + LPAR
  50. * yield the CPU if the lock is still unavailable.
  51. */
  52. if (!MACHINE_IS_LPAR)
  53. smp_yield_cpu(~owner);
  54. }
  55. }
  56. EXPORT_SYMBOL(arch_spin_lock_wait);
  57. void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
  58. {
  59. unsigned int cpu = SPINLOCK_LOCKVAL;
  60. unsigned int owner;
  61. int count;
  62. local_irq_restore(flags);
  63. while (1) {
  64. owner = ACCESS_ONCE(lp->lock);
  65. /* Try to get the lock if it is free. */
  66. if (!owner) {
  67. local_irq_disable();
  68. if (_raw_compare_and_swap(&lp->lock, 0, cpu))
  69. return;
  70. local_irq_restore(flags);
  71. }
  72. /* Check if the lock owner is running. */
  73. if (!smp_vcpu_scheduled(~owner)) {
  74. smp_yield_cpu(~owner);
  75. continue;
  76. }
  77. /* Loop for a while on the lock value. */
  78. count = spin_retry;
  79. do {
  80. owner = ACCESS_ONCE(lp->lock);
  81. } while (owner && count-- > 0);
  82. if (!owner)
  83. continue;
  84. /*
  85. * For multiple layers of hypervisors, e.g. z/VM + LPAR
  86. * yield the CPU if the lock is still unavailable.
  87. */
  88. if (!MACHINE_IS_LPAR)
  89. smp_yield_cpu(~owner);
  90. }
  91. }
  92. EXPORT_SYMBOL(arch_spin_lock_wait_flags);
  93. int arch_spin_trylock_retry(arch_spinlock_t *lp)
  94. {
  95. int count;
  96. for (count = spin_retry; count > 0; count--)
  97. if (arch_spin_trylock_once(lp))
  98. return 1;
  99. return 0;
  100. }
  101. EXPORT_SYMBOL(arch_spin_trylock_retry);
  102. void _raw_read_lock_wait(arch_rwlock_t *rw)
  103. {
  104. unsigned int owner, old;
  105. int count = spin_retry;
  106. #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
  107. __RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD);
  108. #endif
  109. owner = 0;
  110. while (1) {
  111. if (count-- <= 0) {
  112. if (owner && !smp_vcpu_scheduled(~owner))
  113. smp_yield_cpu(~owner);
  114. count = spin_retry;
  115. }
  116. old = ACCESS_ONCE(rw->lock);
  117. owner = ACCESS_ONCE(rw->owner);
  118. if ((int) old < 0)
  119. continue;
  120. if (_raw_compare_and_swap(&rw->lock, old, old + 1))
  121. return;
  122. }
  123. }
  124. EXPORT_SYMBOL(_raw_read_lock_wait);
  125. int _raw_read_trylock_retry(arch_rwlock_t *rw)
  126. {
  127. unsigned int old;
  128. int count = spin_retry;
  129. while (count-- > 0) {
  130. old = ACCESS_ONCE(rw->lock);
  131. if ((int) old < 0)
  132. continue;
  133. if (_raw_compare_and_swap(&rw->lock, old, old + 1))
  134. return 1;
  135. }
  136. return 0;
  137. }
  138. EXPORT_SYMBOL(_raw_read_trylock_retry);
  139. #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
  140. void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
  141. {
  142. unsigned int owner, old;
  143. int count = spin_retry;
  144. owner = 0;
  145. while (1) {
  146. if (count-- <= 0) {
  147. if (owner && !smp_vcpu_scheduled(~owner))
  148. smp_yield_cpu(~owner);
  149. count = spin_retry;
  150. }
  151. old = ACCESS_ONCE(rw->lock);
  152. owner = ACCESS_ONCE(rw->owner);
  153. smp_rmb();
  154. if ((int) old >= 0) {
  155. prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
  156. old = prev;
  157. }
  158. if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
  159. break;
  160. }
  161. }
  162. EXPORT_SYMBOL(_raw_write_lock_wait);
  163. #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
  164. void _raw_write_lock_wait(arch_rwlock_t *rw)
  165. {
  166. unsigned int owner, old, prev;
  167. int count = spin_retry;
  168. prev = 0x80000000;
  169. owner = 0;
  170. while (1) {
  171. if (count-- <= 0) {
  172. if (owner && !smp_vcpu_scheduled(~owner))
  173. smp_yield_cpu(~owner);
  174. count = spin_retry;
  175. }
  176. old = ACCESS_ONCE(rw->lock);
  177. owner = ACCESS_ONCE(rw->owner);
  178. if ((int) old >= 0 &&
  179. _raw_compare_and_swap(&rw->lock, old, old | 0x80000000))
  180. prev = old;
  181. else
  182. smp_rmb();
  183. if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
  184. break;
  185. }
  186. }
  187. EXPORT_SYMBOL(_raw_write_lock_wait);
  188. #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
  189. int _raw_write_trylock_retry(arch_rwlock_t *rw)
  190. {
  191. unsigned int old;
  192. int count = spin_retry;
  193. while (count-- > 0) {
  194. old = ACCESS_ONCE(rw->lock);
  195. if (old)
  196. continue;
  197. if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
  198. return 1;
  199. }
  200. return 0;
  201. }
  202. EXPORT_SYMBOL(_raw_write_trylock_retry);
  203. void arch_lock_relax(unsigned int cpu)
  204. {
  205. if (!cpu)
  206. return;
  207. if (MACHINE_IS_LPAR && smp_vcpu_scheduled(~cpu))
  208. return;
  209. smp_yield_cpu(~cpu);
  210. }
  211. EXPORT_SYMBOL(arch_lock_relax);