spinlock.c 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235
  1. /*
  2. * Out of line spinlock code.
  3. *
  4. * Copyright IBM Corp. 2004, 2006
  5. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  6. */
  7. #include <linux/types.h>
  8. #include <linux/module.h>
  9. #include <linux/spinlock.h>
  10. #include <linux/init.h>
  11. #include <linux/smp.h>
  12. #include <asm/io.h>
  13. int spin_retry = 1000;
  14. /**
  15. * spin_retry= parameter
  16. */
  17. static int __init spin_retry_setup(char *str)
  18. {
  19. spin_retry = simple_strtoul(str, &str, 0);
  20. return 1;
  21. }
  22. __setup("spin_retry=", spin_retry_setup);
  23. void arch_spin_lock_wait(arch_spinlock_t *lp)
  24. {
  25. unsigned int cpu = SPINLOCK_LOCKVAL;
  26. unsigned int owner;
  27. int count;
  28. while (1) {
  29. owner = ACCESS_ONCE(lp->lock);
  30. /* Try to get the lock if it is free. */
  31. if (!owner) {
  32. if (_raw_compare_and_swap(&lp->lock, 0, cpu))
  33. return;
  34. continue;
  35. }
  36. /* Check if the lock owner is running. */
  37. if (!smp_vcpu_scheduled(~owner)) {
  38. smp_yield_cpu(~owner);
  39. continue;
  40. }
  41. /* Loop for a while on the lock value. */
  42. count = spin_retry;
  43. do {
  44. owner = ACCESS_ONCE(lp->lock);
  45. } while (owner && count-- > 0);
  46. if (!owner)
  47. continue;
  48. /*
  49. * For multiple layers of hypervisors, e.g. z/VM + LPAR
  50. * yield the CPU if the lock is still unavailable.
  51. */
  52. if (!MACHINE_IS_LPAR)
  53. smp_yield_cpu(~owner);
  54. }
  55. }
  56. EXPORT_SYMBOL(arch_spin_lock_wait);
  57. void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
  58. {
  59. unsigned int cpu = SPINLOCK_LOCKVAL;
  60. unsigned int owner;
  61. int count;
  62. local_irq_restore(flags);
  63. while (1) {
  64. owner = ACCESS_ONCE(lp->lock);
  65. /* Try to get the lock if it is free. */
  66. if (!owner) {
  67. local_irq_disable();
  68. if (_raw_compare_and_swap(&lp->lock, 0, cpu))
  69. return;
  70. local_irq_restore(flags);
  71. }
  72. /* Check if the lock owner is running. */
  73. if (!smp_vcpu_scheduled(~owner)) {
  74. smp_yield_cpu(~owner);
  75. continue;
  76. }
  77. /* Loop for a while on the lock value. */
  78. count = spin_retry;
  79. do {
  80. owner = ACCESS_ONCE(lp->lock);
  81. } while (owner && count-- > 0);
  82. if (!owner)
  83. continue;
  84. /*
  85. * For multiple layers of hypervisors, e.g. z/VM + LPAR
  86. * yield the CPU if the lock is still unavailable.
  87. */
  88. if (!MACHINE_IS_LPAR)
  89. smp_yield_cpu(~owner);
  90. }
  91. }
  92. EXPORT_SYMBOL(arch_spin_lock_wait_flags);
  93. void arch_spin_relax(arch_spinlock_t *lp)
  94. {
  95. unsigned int cpu = lp->lock;
  96. if (cpu != 0) {
  97. if (MACHINE_IS_VM || MACHINE_IS_KVM ||
  98. !smp_vcpu_scheduled(~cpu))
  99. smp_yield_cpu(~cpu);
  100. }
  101. }
  102. EXPORT_SYMBOL(arch_spin_relax);
  103. int arch_spin_trylock_retry(arch_spinlock_t *lp)
  104. {
  105. int count;
  106. for (count = spin_retry; count > 0; count--)
  107. if (arch_spin_trylock_once(lp))
  108. return 1;
  109. return 0;
  110. }
  111. EXPORT_SYMBOL(arch_spin_trylock_retry);
  112. void _raw_read_lock_wait(arch_rwlock_t *rw)
  113. {
  114. unsigned int old;
  115. int count = spin_retry;
  116. while (1) {
  117. if (count-- <= 0) {
  118. smp_yield();
  119. count = spin_retry;
  120. }
  121. old = ACCESS_ONCE(rw->lock);
  122. if ((int) old < 0)
  123. continue;
  124. if (_raw_compare_and_swap(&rw->lock, old, old + 1))
  125. return;
  126. }
  127. }
  128. EXPORT_SYMBOL(_raw_read_lock_wait);
  129. void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
  130. {
  131. unsigned int old;
  132. int count = spin_retry;
  133. local_irq_restore(flags);
  134. while (1) {
  135. if (count-- <= 0) {
  136. smp_yield();
  137. count = spin_retry;
  138. }
  139. old = ACCESS_ONCE(rw->lock);
  140. if ((int) old < 0)
  141. continue;
  142. local_irq_disable();
  143. if (_raw_compare_and_swap(&rw->lock, old, old + 1))
  144. return;
  145. local_irq_restore(flags);
  146. }
  147. }
  148. EXPORT_SYMBOL(_raw_read_lock_wait_flags);
  149. int _raw_read_trylock_retry(arch_rwlock_t *rw)
  150. {
  151. unsigned int old;
  152. int count = spin_retry;
  153. while (count-- > 0) {
  154. old = ACCESS_ONCE(rw->lock);
  155. if ((int) old < 0)
  156. continue;
  157. if (_raw_compare_and_swap(&rw->lock, old, old + 1))
  158. return 1;
  159. }
  160. return 0;
  161. }
  162. EXPORT_SYMBOL(_raw_read_trylock_retry);
  163. void _raw_write_lock_wait(arch_rwlock_t *rw)
  164. {
  165. unsigned int old;
  166. int count = spin_retry;
  167. while (1) {
  168. if (count-- <= 0) {
  169. smp_yield();
  170. count = spin_retry;
  171. }
  172. old = ACCESS_ONCE(rw->lock);
  173. if (old)
  174. continue;
  175. if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
  176. return;
  177. }
  178. }
  179. EXPORT_SYMBOL(_raw_write_lock_wait);
  180. void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
  181. {
  182. unsigned int old;
  183. int count = spin_retry;
  184. local_irq_restore(flags);
  185. while (1) {
  186. if (count-- <= 0) {
  187. smp_yield();
  188. count = spin_retry;
  189. }
  190. old = ACCESS_ONCE(rw->lock);
  191. if (old)
  192. continue;
  193. local_irq_disable();
  194. if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
  195. return;
  196. local_irq_restore(flags);
  197. }
  198. }
  199. EXPORT_SYMBOL(_raw_write_lock_wait_flags);
  200. int _raw_write_trylock_retry(arch_rwlock_t *rw)
  201. {
  202. unsigned int old;
  203. int count = spin_retry;
  204. while (count-- > 0) {
  205. old = ACCESS_ONCE(rw->lock);
  206. if (old)
  207. continue;
  208. if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
  209. return 1;
  210. }
  211. return 0;
  212. }
  213. EXPORT_SYMBOL(_raw_write_trylock_retry);