|
@@ -2522,11 +2522,11 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
|
|
|
if (ref && ref->seq &&
|
|
|
btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
|
|
|
spin_unlock(&locked_ref->lock);
|
|
|
- btrfs_delayed_ref_unlock(locked_ref);
|
|
|
spin_lock(&delayed_refs->lock);
|
|
|
locked_ref->processing = 0;
|
|
|
delayed_refs->num_heads_ready++;
|
|
|
spin_unlock(&delayed_refs->lock);
|
|
|
+ btrfs_delayed_ref_unlock(locked_ref);
|
|
|
locked_ref = NULL;
|
|
|
cond_resched();
|
|
|
count++;
|
|
@@ -2572,7 +2572,10 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
|
|
|
*/
|
|
|
if (must_insert_reserved)
|
|
|
locked_ref->must_insert_reserved = 1;
|
|
|
+ spin_lock(&delayed_refs->lock);
|
|
|
locked_ref->processing = 0;
|
|
|
+ delayed_refs->num_heads_ready++;
|
|
|
+ spin_unlock(&delayed_refs->lock);
|
|
|
btrfs_debug(fs_info,
|
|
|
"run_delayed_extent_op returned %d",
|
|
|
ret);
|
|
@@ -7384,7 +7387,8 @@ btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
|
|
|
|
|
|
spin_unlock(&cluster->refill_lock);
|
|
|
|
|
|
- down_read(&used_bg->data_rwsem);
|
|
|
+ /* We should only have one-level nested. */
|
|
|
+ down_read_nested(&used_bg->data_rwsem, SINGLE_DEPTH_NESTING);
|
|
|
|
|
|
spin_lock(&cluster->refill_lock);
|
|
|
if (used_bg == cluster->block_group)
|