qrwlock.c 2.8 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192
  1. /*
  2. * Queued read/write locks
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P.
  15. *
  16. * Authors: Waiman Long <waiman.long@hp.com>
  17. */
  18. #include <linux/smp.h>
  19. #include <linux/bug.h>
  20. #include <linux/cpumask.h>
  21. #include <linux/percpu.h>
  22. #include <linux/hardirq.h>
  23. #include <linux/spinlock.h>
  24. #include <asm/qrwlock.h>
  25. /**
  26. * queued_read_lock_slowpath - acquire read lock of a queue rwlock
  27. * @lock: Pointer to queue rwlock structure
  28. */
  29. void queued_read_lock_slowpath(struct qrwlock *lock)
  30. {
  31. /*
  32. * Readers come here when they cannot get the lock without waiting
  33. */
  34. if (unlikely(in_interrupt())) {
  35. /*
  36. * Readers in interrupt context will get the lock immediately
  37. * if the writer is just waiting (not holding the lock yet),
  38. * so spin with ACQUIRE semantics until the lock is available
  39. * without waiting in the queue.
  40. */
  41. atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
  42. return;
  43. }
  44. atomic_sub(_QR_BIAS, &lock->cnts);
  45. /*
  46. * Put the reader into the wait queue
  47. */
  48. arch_spin_lock(&lock->wait_lock);
  49. atomic_add(_QR_BIAS, &lock->cnts);
  50. /*
  51. * The ACQUIRE semantics of the following spinning code ensure
  52. * that accesses can't leak upwards out of our subsequent critical
  53. * section in the case that the lock is currently held for write.
  54. */
  55. atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
  56. /*
  57. * Signal the next one in queue to become queue head
  58. */
  59. arch_spin_unlock(&lock->wait_lock);
  60. }
  61. EXPORT_SYMBOL(queued_read_lock_slowpath);
  62. /**
  63. * queued_write_lock_slowpath - acquire write lock of a queue rwlock
  64. * @lock : Pointer to queue rwlock structure
  65. */
  66. void queued_write_lock_slowpath(struct qrwlock *lock)
  67. {
  68. /* Put the writer into the wait queue */
  69. arch_spin_lock(&lock->wait_lock);
  70. /* Try to acquire the lock directly if no reader is present */
  71. if (!atomic_read(&lock->cnts) &&
  72. (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0))
  73. goto unlock;
  74. /* Set the waiting flag to notify readers that a writer is pending */
  75. atomic_add(_QW_WAITING, &lock->cnts);
  76. /* When no more readers or writers, set the locked flag */
  77. do {
  78. atomic_cond_read_acquire(&lock->cnts, VAL == _QW_WAITING);
  79. } while (atomic_cmpxchg_relaxed(&lock->cnts, _QW_WAITING,
  80. _QW_LOCKED) != _QW_WAITING);
  81. unlock:
  82. arch_spin_unlock(&lock->wait_lock);
  83. }
  84. EXPORT_SYMBOL(queued_write_lock_slowpath);