Browse Source

Btrfs: delayed-refs: use rb_first_cached for ref_tree

rb_first_cached() trades an extra pointer "leftmost" for doing the same
job as rb_first() but in O(1).

Functions manipulating href->ref_tree need to get the first entry, this
converts href->ref_tree to use rb_first_cached().

For more details about the optimization see patch "Btrfs: delayed-refs:
use rb_first_cached for href_root".

Tested-by: Holger Hoffstätte <holger@applied-asynchrony.com>
Signed-off-by: Liu Bo <bo.liu@linux.alibaba.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
Liu Bo 7 years ago
parent
commit
e3d0396563
5 changed files with 25 additions and 20 deletions
  1. 1 1
      fs/btrfs/backref.c
  2. 14 10
      fs/btrfs/delayed-ref.c
  3. 1 1
      fs/btrfs/delayed-ref.h
  4. 2 2
      fs/btrfs/disk-io.c
  5. 7 6
      fs/btrfs/extent-tree.c

+ 1 - 1
fs/btrfs/backref.c

@@ -769,7 +769,7 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
 		btrfs_disk_key_to_cpu(&tmp_op_key, &extent_op->key);
 		btrfs_disk_key_to_cpu(&tmp_op_key, &extent_op->key);
 
 
 	spin_lock(&head->lock);
 	spin_lock(&head->lock);
-	for (n = rb_first(&head->ref_tree); n; n = rb_next(n)) {
+	for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
 		node = rb_entry(n, struct btrfs_delayed_ref_node,
 		node = rb_entry(n, struct btrfs_delayed_ref_node,
 				ref_node);
 				ref_node);
 		if (node->seq > seq)
 		if (node->seq > seq)

+ 14 - 10
fs/btrfs/delayed-ref.c

@@ -133,13 +133,14 @@ static struct btrfs_delayed_ref_head *htree_insert(struct rb_root_cached *root,
 	return NULL;
 	return NULL;
 }
 }
 
 
-static struct btrfs_delayed_ref_node* tree_insert(struct rb_root *root,
+static struct btrfs_delayed_ref_node* tree_insert(struct rb_root_cached *root,
 		struct btrfs_delayed_ref_node *ins)
 		struct btrfs_delayed_ref_node *ins)
 {
 {
-	struct rb_node **p = &root->rb_node;
+	struct rb_node **p = &root->rb_root.rb_node;
 	struct rb_node *node = &ins->ref_node;
 	struct rb_node *node = &ins->ref_node;
 	struct rb_node *parent_node = NULL;
 	struct rb_node *parent_node = NULL;
 	struct btrfs_delayed_ref_node *entry;
 	struct btrfs_delayed_ref_node *entry;
+	bool leftmost = true;
 
 
 	while (*p) {
 	while (*p) {
 		int comp;
 		int comp;
@@ -148,16 +149,18 @@ static struct btrfs_delayed_ref_node* tree_insert(struct rb_root *root,
 		entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
 		entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
 				 ref_node);
 				 ref_node);
 		comp = comp_refs(ins, entry, true);
 		comp = comp_refs(ins, entry, true);
-		if (comp < 0)
+		if (comp < 0) {
 			p = &(*p)->rb_left;
 			p = &(*p)->rb_left;
-		else if (comp > 0)
+		} else if (comp > 0) {
 			p = &(*p)->rb_right;
 			p = &(*p)->rb_right;
-		else
+			leftmost = false;
+		} else {
 			return entry;
 			return entry;
+		}
 	}
 	}
 
 
 	rb_link_node(node, parent_node, p);
 	rb_link_node(node, parent_node, p);
-	rb_insert_color(node, root);
+	rb_insert_color_cached(node, root, leftmost);
 	return NULL;
 	return NULL;
 }
 }
 
 
@@ -231,7 +234,7 @@ static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
 				    struct btrfs_delayed_ref_node *ref)
 				    struct btrfs_delayed_ref_node *ref)
 {
 {
 	lockdep_assert_held(&head->lock);
 	lockdep_assert_held(&head->lock);
-	rb_erase(&ref->ref_node, &head->ref_tree);
+	rb_erase_cached(&ref->ref_node, &head->ref_tree);
 	RB_CLEAR_NODE(&ref->ref_node);
 	RB_CLEAR_NODE(&ref->ref_node);
 	if (!list_empty(&ref->add_list))
 	if (!list_empty(&ref->add_list))
 		list_del(&ref->add_list);
 		list_del(&ref->add_list);
@@ -300,7 +303,7 @@ void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
 
 
 	lockdep_assert_held(&head->lock);
 	lockdep_assert_held(&head->lock);
 
 
-	if (RB_EMPTY_ROOT(&head->ref_tree))
+	if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
 		return;
 		return;
 
 
 	/* We don't have too many refs to merge for data. */
 	/* We don't have too many refs to merge for data. */
@@ -318,7 +321,8 @@ void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
 	spin_unlock(&fs_info->tree_mod_seq_lock);
 	spin_unlock(&fs_info->tree_mod_seq_lock);
 
 
 again:
 again:
-	for (node = rb_first(&head->ref_tree); node; node = rb_next(node)) {
+	for (node = rb_first_cached(&head->ref_tree); node;
+	     node = rb_next(node)) {
 		ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
 		ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
 		if (seq && ref->seq >= seq)
 		if (seq && ref->seq >= seq)
 			continue;
 			continue;
@@ -573,7 +577,7 @@ static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
 	head_ref->must_insert_reserved = must_insert_reserved;
 	head_ref->must_insert_reserved = must_insert_reserved;
 	head_ref->is_data = is_data;
 	head_ref->is_data = is_data;
 	head_ref->is_system = is_system;
 	head_ref->is_system = is_system;
-	head_ref->ref_tree = RB_ROOT;
+	head_ref->ref_tree = RB_ROOT_CACHED;
 	INIT_LIST_HEAD(&head_ref->ref_add_list);
 	INIT_LIST_HEAD(&head_ref->ref_add_list);
 	RB_CLEAR_NODE(&head_ref->href_node);
 	RB_CLEAR_NODE(&head_ref->href_node);
 	head_ref->processing = 0;
 	head_ref->processing = 0;

+ 1 - 1
fs/btrfs/delayed-ref.h

@@ -79,7 +79,7 @@ struct btrfs_delayed_ref_head {
 	struct mutex mutex;
 	struct mutex mutex;
 
 
 	spinlock_t lock;
 	spinlock_t lock;
-	struct rb_root ref_tree;
+	struct rb_root_cached ref_tree;
 	/* accumulate add BTRFS_ADD_DELAYED_REF nodes to this ref_add_list. */
 	/* accumulate add BTRFS_ADD_DELAYED_REF nodes to this ref_add_list. */
 	struct list_head ref_add_list;
 	struct list_head ref_add_list;
 
 

+ 2 - 2
fs/btrfs/disk-io.c

@@ -4221,11 +4221,11 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
 			continue;
 			continue;
 		}
 		}
 		spin_lock(&head->lock);
 		spin_lock(&head->lock);
-		while ((n = rb_first(&head->ref_tree)) != NULL) {
+		while ((n = rb_first_cached(&head->ref_tree)) != NULL) {
 			ref = rb_entry(n, struct btrfs_delayed_ref_node,
 			ref = rb_entry(n, struct btrfs_delayed_ref_node,
 				       ref_node);
 				       ref_node);
 			ref->in_tree = 0;
 			ref->in_tree = 0;
-			rb_erase(&ref->ref_node, &head->ref_tree);
+			rb_erase_cached(&ref->ref_node, &head->ref_tree);
 			RB_CLEAR_NODE(&ref->ref_node);
 			RB_CLEAR_NODE(&ref->ref_node);
 			if (!list_empty(&ref->add_list))
 			if (!list_empty(&ref->add_list))
 				list_del(&ref->add_list);
 				list_del(&ref->add_list);

+ 7 - 6
fs/btrfs/extent-tree.c

@@ -2374,7 +2374,7 @@ select_delayed_ref(struct btrfs_delayed_ref_head *head)
 {
 {
 	struct btrfs_delayed_ref_node *ref;
 	struct btrfs_delayed_ref_node *ref;
 
 
-	if (RB_EMPTY_ROOT(&head->ref_tree))
+	if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
 		return NULL;
 		return NULL;
 
 
 	/*
 	/*
@@ -2387,7 +2387,7 @@ select_delayed_ref(struct btrfs_delayed_ref_head *head)
 		return list_first_entry(&head->ref_add_list,
 		return list_first_entry(&head->ref_add_list,
 				struct btrfs_delayed_ref_node, add_list);
 				struct btrfs_delayed_ref_node, add_list);
 
 
-	ref = rb_entry(rb_first(&head->ref_tree),
+	ref = rb_entry(rb_first_cached(&head->ref_tree),
 		       struct btrfs_delayed_ref_node, ref_node);
 		       struct btrfs_delayed_ref_node, ref_node);
 	ASSERT(list_empty(&ref->add_list));
 	ASSERT(list_empty(&ref->add_list));
 	return ref;
 	return ref;
@@ -2448,7 +2448,7 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
 	spin_unlock(&head->lock);
 	spin_unlock(&head->lock);
 	spin_lock(&delayed_refs->lock);
 	spin_lock(&delayed_refs->lock);
 	spin_lock(&head->lock);
 	spin_lock(&head->lock);
-	if (!RB_EMPTY_ROOT(&head->ref_tree) || head->extent_op) {
+	if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root) || head->extent_op) {
 		spin_unlock(&head->lock);
 		spin_unlock(&head->lock);
 		spin_unlock(&delayed_refs->lock);
 		spin_unlock(&delayed_refs->lock);
 		return 1;
 		return 1;
@@ -2597,7 +2597,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
 
 
 		actual_count++;
 		actual_count++;
 		ref->in_tree = 0;
 		ref->in_tree = 0;
-		rb_erase(&ref->ref_node, &locked_ref->ref_tree);
+		rb_erase_cached(&ref->ref_node, &locked_ref->ref_tree);
 		RB_CLEAR_NODE(&ref->ref_node);
 		RB_CLEAR_NODE(&ref->ref_node);
 		if (!list_empty(&ref->add_list))
 		if (!list_empty(&ref->add_list))
 			list_del(&ref->add_list);
 			list_del(&ref->add_list);
@@ -3040,7 +3040,8 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
 	 * XXX: We should replace this with a proper search function in the
 	 * XXX: We should replace this with a proper search function in the
 	 * future.
 	 * future.
 	 */
 	 */
-	for (node = rb_first(&head->ref_tree); node; node = rb_next(node)) {
+	for (node = rb_first_cached(&head->ref_tree); node;
+	     node = rb_next(node)) {
 		ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
 		ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
 		/* If it's a shared ref we know a cross reference exists */
 		/* If it's a shared ref we know a cross reference exists */
 		if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
 		if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
@@ -6908,7 +6909,7 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
 		goto out_delayed_unlock;
 		goto out_delayed_unlock;
 
 
 	spin_lock(&head->lock);
 	spin_lock(&head->lock);
-	if (!RB_EMPTY_ROOT(&head->ref_tree))
+	if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root))
 		goto out;
 		goto out;
 
 
 	if (head->extent_op) {
 	if (head->extent_op) {