|
@@ -33,6 +33,7 @@
|
|
|
#include "raid56.h"
|
|
|
#include "locking.h"
|
|
|
#include "free-space-cache.h"
|
|
|
+#include "free-space-tree.h"
|
|
|
#include "math.h"
|
|
|
#include "sysfs.h"
|
|
|
#include "qgroup.h"
|
|
@@ -518,7 +519,10 @@ static noinline void caching_thread(struct btrfs_work *work)
|
|
|
mutex_lock(&caching_ctl->mutex);
|
|
|
down_read(&fs_info->commit_root_sem);
|
|
|
|
|
|
- ret = load_extent_tree_free(caching_ctl);
|
|
|
+ if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
|
|
|
+ ret = load_free_space_tree(caching_ctl);
|
|
|
+ else
|
|
|
+ ret = load_extent_tree_free(caching_ctl);
|
|
|
|
|
|
spin_lock(&block_group->lock);
|
|
|
block_group->caching_ctl = NULL;
|
|
@@ -624,8 +628,8 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
|
|
|
}
|
|
|
} else {
|
|
|
/*
|
|
|
- * We are not going to do the fast caching, set cached to the
|
|
|
- * appropriate value and wakeup any waiters.
|
|
|
+ * We're either using the free space tree or no caching at all.
|
|
|
+ * Set cached to the appropriate value and wakeup any waiters.
|
|
|
*/
|
|
|
spin_lock(&cache->lock);
|
|
|
if (load_cache_only) {
|
|
@@ -6479,6 +6483,13 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ ret = add_to_free_space_tree(trans, root->fs_info, bytenr,
|
|
|
+ num_bytes);
|
|
|
+ if (ret) {
|
|
|
+ btrfs_abort_transaction(trans, extent_root, ret);
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
ret = update_block_group(trans, root, bytenr, num_bytes, 0);
|
|
|
if (ret) {
|
|
|
btrfs_abort_transaction(trans, extent_root, ret);
|
|
@@ -7422,6 +7433,11 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
|
|
|
btrfs_mark_buffer_dirty(path->nodes[0]);
|
|
|
btrfs_free_path(path);
|
|
|
|
|
|
+ ret = remove_from_free_space_tree(trans, fs_info, ins->objectid,
|
|
|
+ ins->offset);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
|
|
|
if (ret) { /* -ENOENT, logic error */
|
|
|
btrfs_err(fs_info, "update block group failed for %llu %llu",
|
|
@@ -7503,6 +7519,11 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
|
|
|
btrfs_mark_buffer_dirty(leaf);
|
|
|
btrfs_free_path(path);
|
|
|
|
|
|
+ ret = remove_from_free_space_tree(trans, fs_info, ins->objectid,
|
|
|
+ num_bytes);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
ret = update_block_group(trans, root, ins->objectid, root->nodesize,
|
|
|
1);
|
|
|
if (ret) { /* -ENOENT, logic error */
|
|
@@ -9370,6 +9391,8 @@ btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
|
|
|
cache->full_stripe_len = btrfs_full_stripe_len(root,
|
|
|
&root->fs_info->mapping_tree,
|
|
|
start);
|
|
|
+ set_free_space_tree_thresholds(cache);
|
|
|
+
|
|
|
atomic_set(&cache->count, 1);
|
|
|
spin_lock_init(&cache->lock);
|
|
|
init_rwsem(&cache->data_rwsem);
|
|
@@ -9592,6 +9615,8 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
|
|
|
key.objectid, key.offset);
|
|
|
if (ret)
|
|
|
btrfs_abort_transaction(trans, extent_root, ret);
|
|
|
+ add_block_group_free_space(trans, root->fs_info, block_group);
|
|
|
+ /* already aborted the transaction if it failed. */
|
|
|
next:
|
|
|
list_del_init(&block_group->bg_list);
|
|
|
}
|
|
@@ -9622,6 +9647,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
|
|
|
cache->flags = type;
|
|
|
cache->last_byte_to_unpin = (u64)-1;
|
|
|
cache->cached = BTRFS_CACHE_FINISHED;
|
|
|
+ cache->needs_free_space = 1;
|
|
|
ret = exclude_super_stripes(root, cache);
|
|
|
if (ret) {
|
|
|
/*
|
|
@@ -9984,6 +10010,10 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
|
|
|
|
|
|
unlock_chunks(root);
|
|
|
|
|
|
+ ret = remove_block_group_free_space(trans, root->fs_info, block_group);
|
|
|
+ if (ret)
|
|
|
+ goto out;
|
|
|
+
|
|
|
btrfs_put_block_group(block_group);
|
|
|
btrfs_put_block_group(block_group);
|
|
|
|