|
@@ -161,35 +161,61 @@ static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root,
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
+/* insert a new ref to head ref rbtree */
|
|
|
+static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
|
|
|
+ struct rb_node *node)
|
|
|
+{
|
|
|
+ struct rb_node **p = &root->rb_node;
|
|
|
+ struct rb_node *parent_node = NULL;
|
|
|
+ struct btrfs_delayed_ref_head *entry;
|
|
|
+ struct btrfs_delayed_ref_head *ins;
|
|
|
+ u64 bytenr;
|
|
|
+
|
|
|
+ ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
|
|
|
+ bytenr = ins->node.bytenr;
|
|
|
+ while (*p) {
|
|
|
+ parent_node = *p;
|
|
|
+ entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
|
|
|
+ href_node);
|
|
|
+
|
|
|
+ if (bytenr < entry->node.bytenr)
|
|
|
+ p = &(*p)->rb_left;
|
|
|
+ else if (bytenr > entry->node.bytenr)
|
|
|
+ p = &(*p)->rb_right;
|
|
|
+ else
|
|
|
+ return entry;
|
|
|
+ }
|
|
|
+
|
|
|
+ rb_link_node(node, parent_node, p);
|
|
|
+ rb_insert_color(node, root);
|
|
|
+ return NULL;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* find an head entry based on bytenr. This returns the delayed ref
|
|
|
* head if it was able to find one, or NULL if nothing was in that spot.
|
|
|
* If return_bigger is given, the next bigger entry is returned if no exact
|
|
|
* match is found.
|
|
|
*/
|
|
|
-static struct btrfs_delayed_ref_node *find_ref_head(struct rb_root *root,
|
|
|
- u64 bytenr,
|
|
|
- struct btrfs_delayed_ref_node **last,
|
|
|
- int return_bigger)
|
|
|
+static struct btrfs_delayed_ref_head *
|
|
|
+find_ref_head(struct rb_root *root, u64 bytenr,
|
|
|
+ struct btrfs_delayed_ref_head **last, int return_bigger)
|
|
|
{
|
|
|
struct rb_node *n;
|
|
|
- struct btrfs_delayed_ref_node *entry;
|
|
|
+ struct btrfs_delayed_ref_head *entry;
|
|
|
int cmp = 0;
|
|
|
|
|
|
again:
|
|
|
n = root->rb_node;
|
|
|
entry = NULL;
|
|
|
while (n) {
|
|
|
- entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
|
|
|
- WARN_ON(!entry->in_tree);
|
|
|
+ entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
|
|
|
if (last)
|
|
|
*last = entry;
|
|
|
|
|
|
- if (bytenr < entry->bytenr)
|
|
|
+ if (bytenr < entry->node.bytenr)
|
|
|
cmp = -1;
|
|
|
- else if (bytenr > entry->bytenr)
|
|
|
- cmp = 1;
|
|
|
- else if (!btrfs_delayed_ref_is_head(entry))
|
|
|
+ else if (bytenr > entry->node.bytenr)
|
|
|
cmp = 1;
|
|
|
else
|
|
|
cmp = 0;
|
|
@@ -203,12 +229,12 @@ again:
|
|
|
}
|
|
|
if (entry && return_bigger) {
|
|
|
if (cmp > 0) {
|
|
|
- n = rb_next(&entry->rb_node);
|
|
|
+ n = rb_next(&entry->href_node);
|
|
|
if (!n)
|
|
|
n = rb_first(root);
|
|
|
- entry = rb_entry(n, struct btrfs_delayed_ref_node,
|
|
|
- rb_node);
|
|
|
- bytenr = entry->bytenr;
|
|
|
+ entry = rb_entry(n, struct btrfs_delayed_ref_head,
|
|
|
+ href_node);
|
|
|
+ bytenr = entry->node.bytenr;
|
|
|
return_bigger = 0;
|
|
|
goto again;
|
|
|
}
|
|
@@ -246,6 +272,12 @@ static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
|
|
|
struct btrfs_delayed_ref_node *ref)
|
|
|
{
|
|
|
rb_erase(&ref->rb_node, &delayed_refs->root);
|
|
|
+ if (btrfs_delayed_ref_is_head(ref)) {
|
|
|
+ struct btrfs_delayed_ref_head *head;
|
|
|
+
|
|
|
+ head = btrfs_delayed_node_to_head(ref);
|
|
|
+ rb_erase(&head->href_node, &delayed_refs->href_root);
|
|
|
+ }
|
|
|
ref->in_tree = 0;
|
|
|
btrfs_put_delayed_ref(ref);
|
|
|
delayed_refs->num_entries--;
|
|
@@ -379,42 +411,35 @@ int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
|
|
|
int count = 0;
|
|
|
struct btrfs_delayed_ref_root *delayed_refs;
|
|
|
struct rb_node *node;
|
|
|
- struct btrfs_delayed_ref_node *ref;
|
|
|
- struct btrfs_delayed_ref_head *head;
|
|
|
+ struct btrfs_delayed_ref_head *head = NULL;
|
|
|
|
|
|
delayed_refs = &trans->transaction->delayed_refs;
|
|
|
- if (start == 0) {
|
|
|
- node = rb_first(&delayed_refs->root);
|
|
|
- } else {
|
|
|
- ref = NULL;
|
|
|
- find_ref_head(&delayed_refs->root, start + 1, &ref, 1);
|
|
|
- if (ref) {
|
|
|
- node = &ref->rb_node;
|
|
|
- } else
|
|
|
- node = rb_first(&delayed_refs->root);
|
|
|
+ node = rb_first(&delayed_refs->href_root);
|
|
|
+
|
|
|
+ if (start) {
|
|
|
+ find_ref_head(&delayed_refs->href_root, start + 1, &head, 1);
|
|
|
+ if (head)
|
|
|
+ node = &head->href_node;
|
|
|
}
|
|
|
again:
|
|
|
while (node && count < 32) {
|
|
|
- ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
|
|
|
- if (btrfs_delayed_ref_is_head(ref)) {
|
|
|
- head = btrfs_delayed_node_to_head(ref);
|
|
|
- if (list_empty(&head->cluster)) {
|
|
|
- list_add_tail(&head->cluster, cluster);
|
|
|
- delayed_refs->run_delayed_start =
|
|
|
- head->node.bytenr;
|
|
|
- count++;
|
|
|
-
|
|
|
- WARN_ON(delayed_refs->num_heads_ready == 0);
|
|
|
- delayed_refs->num_heads_ready--;
|
|
|
- } else if (count) {
|
|
|
- /* the goal of the clustering is to find extents
|
|
|
- * that are likely to end up in the same extent
|
|
|
- * leaf on disk. So, we don't want them spread
|
|
|
- * all over the tree. Stop now if we've hit
|
|
|
- * a head that was already in use
|
|
|
- */
|
|
|
- break;
|
|
|
- }
|
|
|
+ head = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
|
|
|
+ if (list_empty(&head->cluster)) {
|
|
|
+ list_add_tail(&head->cluster, cluster);
|
|
|
+ delayed_refs->run_delayed_start =
|
|
|
+ head->node.bytenr;
|
|
|
+ count++;
|
|
|
+
|
|
|
+ WARN_ON(delayed_refs->num_heads_ready == 0);
|
|
|
+ delayed_refs->num_heads_ready--;
|
|
|
+ } else if (count) {
|
|
|
+ /* the goal of the clustering is to find extents
|
|
|
+ * that are likely to end up in the same extent
|
|
|
+ * leaf on disk. So, we don't want them spread
|
|
|
+ * all over the tree. Stop now if we've hit
|
|
|
+ * a head that was already in use
|
|
|
+ */
|
|
|
+ break;
|
|
|
}
|
|
|
node = rb_next(node);
|
|
|
}
|
|
@@ -426,7 +451,7 @@ again:
|
|
|
* clusters. start from the beginning and try again
|
|
|
*/
|
|
|
start = 0;
|
|
|
- node = rb_first(&delayed_refs->root);
|
|
|
+ node = rb_first(&delayed_refs->href_root);
|
|
|
goto again;
|
|
|
}
|
|
|
return 1;
|
|
@@ -612,6 +637,7 @@ static noinline void add_delayed_ref_head(struct btrfs_fs_info *fs_info,
|
|
|
*/
|
|
|
kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
|
|
|
} else {
|
|
|
+ htree_insert(&delayed_refs->href_root, &head_ref->href_node);
|
|
|
delayed_refs->num_heads++;
|
|
|
delayed_refs->num_heads_ready++;
|
|
|
delayed_refs->num_entries++;
|
|
@@ -869,14 +895,10 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
|
|
|
struct btrfs_delayed_ref_head *
|
|
|
btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
|
|
|
{
|
|
|
- struct btrfs_delayed_ref_node *ref;
|
|
|
struct btrfs_delayed_ref_root *delayed_refs;
|
|
|
|
|
|
delayed_refs = &trans->transaction->delayed_refs;
|
|
|
- ref = find_ref_head(&delayed_refs->root, bytenr, NULL, 0);
|
|
|
- if (ref)
|
|
|
- return btrfs_delayed_node_to_head(ref);
|
|
|
- return NULL;
|
|
|
+ return find_ref_head(&delayed_refs->href_root, bytenr, NULL, 0);
|
|
|
}
|
|
|
|
|
|
void btrfs_delayed_ref_exit(void)
|