|
@@ -79,6 +79,9 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
|
|
|
write_lock(&eb->lock);
|
|
|
WARN_ON(atomic_read(&eb->spinning_writers));
|
|
|
atomic_inc(&eb->spinning_writers);
|
|
|
+ /*
|
|
|
+ * atomic_dec_and_test implies a barrier for waitqueue_active
|
|
|
+ */
|
|
|
if (atomic_dec_and_test(&eb->blocking_writers) &&
|
|
|
waitqueue_active(&eb->write_lock_wq))
|
|
|
wake_up(&eb->write_lock_wq);
|
|
@@ -86,6 +89,9 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
|
|
|
BUG_ON(atomic_read(&eb->blocking_readers) == 0);
|
|
|
read_lock(&eb->lock);
|
|
|
atomic_inc(&eb->spinning_readers);
|
|
|
+ /*
|
|
|
+ * atomic_dec_and_test implies a barrier for waitqueue_active
|
|
|
+ */
|
|
|
if (atomic_dec_and_test(&eb->blocking_readers) &&
|
|
|
waitqueue_active(&eb->read_lock_wq))
|
|
|
wake_up(&eb->read_lock_wq);
|
|
@@ -229,6 +235,9 @@ void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
|
|
|
}
|
|
|
btrfs_assert_tree_read_locked(eb);
|
|
|
WARN_ON(atomic_read(&eb->blocking_readers) == 0);
|
|
|
+ /*
|
|
|
+ * atomic_dec_and_test implies a barrier for waitqueue_active
|
|
|
+ */
|
|
|
if (atomic_dec_and_test(&eb->blocking_readers) &&
|
|
|
waitqueue_active(&eb->read_lock_wq))
|
|
|
wake_up(&eb->read_lock_wq);
|
|
@@ -280,6 +289,9 @@ void btrfs_tree_unlock(struct extent_buffer *eb)
|
|
|
if (blockers) {
|
|
|
WARN_ON(atomic_read(&eb->spinning_writers));
|
|
|
atomic_dec(&eb->blocking_writers);
|
|
|
+ /*
|
|
|
+ * Make sure counter is updated before we wake up waiters.
|
|
|
+ */
|
|
|
smp_mb();
|
|
|
if (waitqueue_active(&eb->write_lock_wq))
|
|
|
wake_up(&eb->write_lock_wq);
|