qrwlock.c 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132
  1. /*
  2. * Queue read/write lock
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P.
  15. *
  16. * Authors: Waiman Long <waiman.long@hp.com>
  17. */
  18. #include <linux/smp.h>
  19. #include <linux/bug.h>
  20. #include <linux/cpumask.h>
  21. #include <linux/percpu.h>
  22. #include <linux/hardirq.h>
  23. #include <asm/qrwlock.h>
  24. /**
  25. * rspin_until_writer_unlock - inc reader count & spin until writer is gone
  26. * @lock : Pointer to queue rwlock structure
  27. * @writer: Current queue rwlock writer status byte
  28. *
  29. * In interrupt context or at the head of the queue, the reader will just
  30. * increment the reader count & wait until the writer releases the lock.
  31. */
  32. static __always_inline void
  33. rspin_until_writer_unlock(struct qrwlock *lock, u32 cnts)
  34. {
  35. while ((cnts & _QW_WMASK) == _QW_LOCKED) {
  36. cpu_relax_lowlatency();
  37. cnts = smp_load_acquire((u32 *)&lock->cnts);
  38. }
  39. }
  40. /**
  41. * queue_read_lock_slowpath - acquire read lock of a queue rwlock
  42. * @lock: Pointer to queue rwlock structure
  43. */
  44. void queue_read_lock_slowpath(struct qrwlock *lock)
  45. {
  46. u32 cnts;
  47. /*
  48. * Readers come here when they cannot get the lock without waiting
  49. */
  50. if (unlikely(in_interrupt())) {
  51. /*
  52. * Readers in interrupt context will spin until the lock is
  53. * available without waiting in the queue.
  54. */
  55. cnts = smp_load_acquire((u32 *)&lock->cnts);
  56. rspin_until_writer_unlock(lock, cnts);
  57. return;
  58. }
  59. atomic_sub(_QR_BIAS, &lock->cnts);
  60. /*
  61. * Put the reader into the wait queue
  62. */
  63. arch_spin_lock(&lock->lock);
  64. /*
  65. * At the head of the wait queue now, wait until the writer state
  66. * goes to 0 and then try to increment the reader count and get
  67. * the lock. It is possible that an incoming writer may steal the
  68. * lock in the interim, so it is necessary to check the writer byte
  69. * to make sure that the write lock isn't taken.
  70. */
  71. while (atomic_read(&lock->cnts) & _QW_WMASK)
  72. cpu_relax_lowlatency();
  73. cnts = atomic_add_return(_QR_BIAS, &lock->cnts) - _QR_BIAS;
  74. rspin_until_writer_unlock(lock, cnts);
  75. /*
  76. * Signal the next one in queue to become queue head
  77. */
  78. arch_spin_unlock(&lock->lock);
  79. }
  80. EXPORT_SYMBOL(queue_read_lock_slowpath);
  81. /**
  82. * queue_write_lock_slowpath - acquire write lock of a queue rwlock
  83. * @lock : Pointer to queue rwlock structure
  84. */
  85. void queue_write_lock_slowpath(struct qrwlock *lock)
  86. {
  87. u32 cnts;
  88. /* Put the writer into the wait queue */
  89. arch_spin_lock(&lock->lock);
  90. /* Try to acquire the lock directly if no reader is present */
  91. if (!atomic_read(&lock->cnts) &&
  92. (atomic_cmpxchg(&lock->cnts, 0, _QW_LOCKED) == 0))
  93. goto unlock;
  94. /*
  95. * Set the waiting flag to notify readers that a writer is pending,
  96. * or wait for a previous writer to go away.
  97. */
  98. for (;;) {
  99. cnts = atomic_read(&lock->cnts);
  100. if (!(cnts & _QW_WMASK) &&
  101. (atomic_cmpxchg(&lock->cnts, cnts,
  102. cnts | _QW_WAITING) == cnts))
  103. break;
  104. cpu_relax_lowlatency();
  105. }
  106. /* When no more readers, set the locked flag */
  107. for (;;) {
  108. cnts = atomic_read(&lock->cnts);
  109. if ((cnts == _QW_WAITING) &&
  110. (atomic_cmpxchg(&lock->cnts, _QW_WAITING,
  111. _QW_LOCKED) == _QW_WAITING))
  112. break;
  113. cpu_relax_lowlatency();
  114. }
  115. unlock:
  116. arch_spin_unlock(&lock->lock);
  117. }
  118. EXPORT_SYMBOL(queue_write_lock_slowpath);