|
@@ -197,6 +197,119 @@ static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
|
|
trans->delayed_ref_updates--;
|
|
trans->delayed_ref_updates--;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static bool merge_ref(struct btrfs_trans_handle *trans,
|
|
|
|
+ struct btrfs_delayed_ref_root *delayed_refs,
|
|
|
|
+ struct btrfs_delayed_ref_head *head,
|
|
|
|
+ struct btrfs_delayed_ref_node *ref,
|
|
|
|
+ u64 seq)
|
|
|
|
+{
|
|
|
|
+ struct btrfs_delayed_ref_node *next;
|
|
|
|
+ bool done = false;
|
|
|
|
+
|
|
|
|
+ next = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
|
|
|
|
+ list);
|
|
|
|
+ while (!done && &next->list != &head->ref_list) {
|
|
|
|
+ int mod;
|
|
|
|
+ struct btrfs_delayed_ref_node *next2;
|
|
|
|
+
|
|
|
|
+ next2 = list_next_entry(next, list);
|
|
|
|
+
|
|
|
|
+ if (next == ref)
|
|
|
|
+ goto next;
|
|
|
|
+
|
|
|
|
+ if (seq && next->seq >= seq)
|
|
|
|
+ goto next;
|
|
|
|
+
|
|
|
|
+ if (next->type != ref->type || next->no_quota != ref->no_quota)
|
|
|
|
+ goto next;
|
|
|
|
+
|
|
|
|
+ if ((ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
|
|
|
|
+ ref->type == BTRFS_SHARED_BLOCK_REF_KEY) &&
|
|
|
|
+ comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref),
|
|
|
|
+ btrfs_delayed_node_to_tree_ref(next),
|
|
|
|
+ ref->type))
|
|
|
|
+ goto next;
|
|
|
|
+ if ((ref->type == BTRFS_EXTENT_DATA_REF_KEY ||
|
|
|
|
+ ref->type == BTRFS_SHARED_DATA_REF_KEY) &&
|
|
|
|
+ comp_data_refs(btrfs_delayed_node_to_data_ref(ref),
|
|
|
|
+ btrfs_delayed_node_to_data_ref(next)))
|
|
|
|
+ goto next;
|
|
|
|
+
|
|
|
|
+ if (ref->action == next->action) {
|
|
|
|
+ mod = next->ref_mod;
|
|
|
|
+ } else {
|
|
|
|
+ if (ref->ref_mod < next->ref_mod) {
|
|
|
|
+ swap(ref, next);
|
|
|
|
+ done = true;
|
|
|
|
+ }
|
|
|
|
+ mod = -next->ref_mod;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ drop_delayed_ref(trans, delayed_refs, head, next);
|
|
|
|
+ ref->ref_mod += mod;
|
|
|
|
+ if (ref->ref_mod == 0) {
|
|
|
|
+ drop_delayed_ref(trans, delayed_refs, head, ref);
|
|
|
|
+ done = true;
|
|
|
|
+ } else {
|
|
|
|
+ /*
|
|
|
|
+ * Can't have multiples of the same ref on a tree block.
|
|
|
|
+ */
|
|
|
|
+ WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
|
|
|
|
+ ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
|
|
|
|
+ }
|
|
|
|
+next:
|
|
|
|
+ next = next2;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return done;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
|
|
|
|
+ struct btrfs_fs_info *fs_info,
|
|
|
|
+ struct btrfs_delayed_ref_root *delayed_refs,
|
|
|
|
+ struct btrfs_delayed_ref_head *head)
|
|
|
|
+{
|
|
|
|
+ struct btrfs_delayed_ref_node *ref;
|
|
|
|
+ u64 seq = 0;
|
|
|
|
+
|
|
|
|
+ assert_spin_locked(&head->lock);
|
|
|
|
+
|
|
|
|
+ if (list_empty(&head->ref_list))
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ /* We don't have too many refs to merge for data. */
|
|
|
|
+ if (head->is_data)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ spin_lock(&fs_info->tree_mod_seq_lock);
|
|
|
|
+ if (!list_empty(&fs_info->tree_mod_seq_list)) {
|
|
|
|
+ struct seq_list *elem;
|
|
|
|
+
|
|
|
|
+ elem = list_first_entry(&fs_info->tree_mod_seq_list,
|
|
|
|
+ struct seq_list, list);
|
|
|
|
+ seq = elem->seq;
|
|
|
|
+ }
|
|
|
|
+ spin_unlock(&fs_info->tree_mod_seq_lock);
|
|
|
|
+
|
|
|
|
+ ref = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
|
|
|
|
+ list);
|
|
|
|
+ while (&ref->list != &head->ref_list) {
|
|
|
|
+ if (seq && ref->seq >= seq)
|
|
|
|
+ goto next;
|
|
|
|
+
|
|
|
|
+ if (merge_ref(trans, delayed_refs, head, ref, seq)) {
|
|
|
|
+ if (list_empty(&head->ref_list))
|
|
|
|
+ break;
|
|
|
|
+ ref = list_first_entry(&head->ref_list,
|
|
|
|
+ struct btrfs_delayed_ref_node,
|
|
|
|
+ list);
|
|
|
|
+ continue;
|
|
|
|
+ }
|
|
|
|
+next:
|
|
|
|
+ ref = list_next_entry(ref, list);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
|
|
int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
|
|
struct btrfs_delayed_ref_root *delayed_refs,
|
|
struct btrfs_delayed_ref_root *delayed_refs,
|
|
u64 seq)
|
|
u64 seq)
|