|
@@ -80,13 +80,6 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
|
|
{
|
|
{
|
|
int i;
|
|
int i;
|
|
|
|
|
|
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
|
|
- /* lockdep really cares that we take all of these spinlocks
|
|
|
|
- * in the right order. If any of the locks in the path are not
|
|
|
|
- * currently blocking, it is going to complain. So, make really
|
|
|
|
- * really sure by forcing the path to blocking before we clear
|
|
|
|
- * the path blocking.
|
|
|
|
- */
|
|
|
|
if (held) {
|
|
if (held) {
|
|
btrfs_set_lock_blocking_rw(held, held_rw);
|
|
btrfs_set_lock_blocking_rw(held, held_rw);
|
|
if (held_rw == BTRFS_WRITE_LOCK)
|
|
if (held_rw == BTRFS_WRITE_LOCK)
|
|
@@ -95,7 +88,6 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
|
|
held_rw = BTRFS_READ_LOCK_BLOCKING;
|
|
held_rw = BTRFS_READ_LOCK_BLOCKING;
|
|
}
|
|
}
|
|
btrfs_set_path_blocking(p);
|
|
btrfs_set_path_blocking(p);
|
|
-#endif
|
|
|
|
|
|
|
|
for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
|
|
for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
|
|
if (p->nodes[i] && p->locks[i]) {
|
|
if (p->nodes[i] && p->locks[i]) {
|
|
@@ -107,10 +99,8 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
|
|
if (held)
|
|
if (held)
|
|
btrfs_clear_lock_blocking_rw(held, held_rw);
|
|
btrfs_clear_lock_blocking_rw(held, held_rw);
|
|
-#endif
|
|
|
|
}
|
|
}
|
|
|
|
|
|
/* this also releases the path */
|
|
/* this also releases the path */
|
|
@@ -2893,7 +2883,7 @@ cow_done:
|
|
}
|
|
}
|
|
p->locks[level] = BTRFS_WRITE_LOCK;
|
|
p->locks[level] = BTRFS_WRITE_LOCK;
|
|
} else {
|
|
} else {
|
|
- err = btrfs_try_tree_read_lock(b);
|
|
|
|
|
|
+ err = btrfs_tree_read_lock_atomic(b);
|
|
if (!err) {
|
|
if (!err) {
|
|
btrfs_set_path_blocking(p);
|
|
btrfs_set_path_blocking(p);
|
|
btrfs_tree_read_lock(b);
|
|
btrfs_tree_read_lock(b);
|
|
@@ -3025,7 +3015,7 @@ again:
|
|
}
|
|
}
|
|
|
|
|
|
level = btrfs_header_level(b);
|
|
level = btrfs_header_level(b);
|
|
- err = btrfs_try_tree_read_lock(b);
|
|
|
|
|
|
+ err = btrfs_tree_read_lock_atomic(b);
|
|
if (!err) {
|
|
if (!err) {
|
|
btrfs_set_path_blocking(p);
|
|
btrfs_set_path_blocking(p);
|
|
btrfs_tree_read_lock(b);
|
|
btrfs_tree_read_lock(b);
|