|
@@ -550,13 +550,14 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
|
|
|
struct btrfs_delayed_ref_node *ref,
|
|
|
struct btrfs_qgroup_extent_record *qrecord,
|
|
|
u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
|
|
|
- int action, int is_data)
|
|
|
+ int action, int is_data, int *qrecord_inserted_ret)
|
|
|
{
|
|
|
struct btrfs_delayed_ref_head *existing;
|
|
|
struct btrfs_delayed_ref_head *head_ref = NULL;
|
|
|
struct btrfs_delayed_ref_root *delayed_refs;
|
|
|
int count_mod = 1;
|
|
|
int must_insert_reserved = 0;
|
|
|
+ int qrecord_inserted = 0;
|
|
|
|
|
|
/* If reserved is provided, it must be a data extent. */
|
|
|
BUG_ON(!is_data && reserved);
|
|
@@ -623,6 +624,8 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
|
|
|
if(btrfs_qgroup_trace_extent_nolock(fs_info,
|
|
|
delayed_refs, qrecord))
|
|
|
kfree(qrecord);
|
|
|
+ else
|
|
|
+ qrecord_inserted = 1;
|
|
|
}
|
|
|
|
|
|
spin_lock_init(&head_ref->lock);
|
|
@@ -650,6 +653,8 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
|
|
|
atomic_inc(&delayed_refs->num_entries);
|
|
|
trans->delayed_ref_updates++;
|
|
|
}
|
|
|
+ if (qrecord_inserted_ret)
|
|
|
+ *qrecord_inserted_ret = qrecord_inserted;
|
|
|
return head_ref;
|
|
|
}
|
|
|
|
|
@@ -779,6 +784,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
|
|
|
struct btrfs_delayed_ref_head *head_ref;
|
|
|
struct btrfs_delayed_ref_root *delayed_refs;
|
|
|
struct btrfs_qgroup_extent_record *record = NULL;
|
|
|
+ int qrecord_inserted;
|
|
|
|
|
|
BUG_ON(extent_op && extent_op->is_data);
|
|
|
ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
|
|
@@ -806,12 +812,15 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
|
|
|
* the spin lock
|
|
|
*/
|
|
|
head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
|
|
|
- bytenr, num_bytes, 0, 0, action, 0);
|
|
|
+ bytenr, num_bytes, 0, 0, action, 0,
|
|
|
+ &qrecord_inserted);
|
|
|
|
|
|
add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
|
|
|
num_bytes, parent, ref_root, level, action);
|
|
|
spin_unlock(&delayed_refs->lock);
|
|
|
|
|
|
+ if (qrecord_inserted)
|
|
|
+ return btrfs_qgroup_trace_extent_post(fs_info, record);
|
|
|
return 0;
|
|
|
|
|
|
free_head_ref:
|
|
@@ -835,6 +844,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
|
|
|
struct btrfs_delayed_ref_head *head_ref;
|
|
|
struct btrfs_delayed_ref_root *delayed_refs;
|
|
|
struct btrfs_qgroup_extent_record *record = NULL;
|
|
|
+ int qrecord_inserted;
|
|
|
|
|
|
ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
|
|
|
if (!ref)
|
|
@@ -868,13 +878,15 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
|
|
|
*/
|
|
|
head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
|
|
|
bytenr, num_bytes, ref_root, reserved,
|
|
|
- action, 1);
|
|
|
+ action, 1, &qrecord_inserted);
|
|
|
|
|
|
add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
|
|
|
num_bytes, parent, ref_root, owner, offset,
|
|
|
action);
|
|
|
spin_unlock(&delayed_refs->lock);
|
|
|
|
|
|
+ if (qrecord_inserted)
|
|
|
+ return btrfs_qgroup_trace_extent_post(fs_info, record);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -897,7 +909,7 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
|
|
|
|
|
|
add_delayed_ref_head(fs_info, trans, &head_ref->node, NULL, bytenr,
|
|
|
num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD,
|
|
|
- extent_op->is_data);
|
|
|
+ extent_op->is_data, NULL);
|
|
|
|
|
|
spin_unlock(&delayed_refs->lock);
|
|
|
return 0;
|