|
@@ -6921,6 +6921,7 @@ static noinline int find_free_extent(struct btrfs_root *orig_root,
|
|
bool failed_alloc = false;
|
|
bool failed_alloc = false;
|
|
bool use_cluster = true;
|
|
bool use_cluster = true;
|
|
bool have_caching_bg = false;
|
|
bool have_caching_bg = false;
|
|
|
|
+ bool full_search = false;
|
|
|
|
|
|
WARN_ON(num_bytes < root->sectorsize);
|
|
WARN_ON(num_bytes < root->sectorsize);
|
|
ins->type = BTRFS_EXTENT_ITEM_KEY;
|
|
ins->type = BTRFS_EXTENT_ITEM_KEY;
|
|
@@ -7023,6 +7024,8 @@ static noinline int find_free_extent(struct btrfs_root *orig_root,
|
|
}
|
|
}
|
|
search:
|
|
search:
|
|
have_caching_bg = false;
|
|
have_caching_bg = false;
|
|
|
|
+ if (index == 0 || index == __get_raid_index(flags))
|
|
|
|
+ full_search = true;
|
|
down_read(&space_info->groups_sem);
|
|
down_read(&space_info->groups_sem);
|
|
list_for_each_entry(block_group, &space_info->block_groups[index],
|
|
list_for_each_entry(block_group, &space_info->block_groups[index],
|
|
list) {
|
|
list) {
|
|
@@ -7056,6 +7059,7 @@ search:
|
|
have_block_group:
|
|
have_block_group:
|
|
cached = block_group_cache_done(block_group);
|
|
cached = block_group_cache_done(block_group);
|
|
if (unlikely(!cached)) {
|
|
if (unlikely(!cached)) {
|
|
|
|
+ have_caching_bg = true;
|
|
ret = cache_block_group(block_group, 0);
|
|
ret = cache_block_group(block_group, 0);
|
|
BUG_ON(ret < 0);
|
|
BUG_ON(ret < 0);
|
|
ret = 0;
|
|
ret = 0;
|
|
@@ -7228,8 +7232,6 @@ unclustered_alloc:
|
|
failed_alloc = true;
|
|
failed_alloc = true;
|
|
goto have_block_group;
|
|
goto have_block_group;
|
|
} else if (!offset) {
|
|
} else if (!offset) {
|
|
- if (!cached)
|
|
|
|
- have_caching_bg = true;
|
|
|
|
goto loop;
|
|
goto loop;
|
|
}
|
|
}
|
|
checks:
|
|
checks:
|
|
@@ -7286,7 +7288,20 @@ loop:
|
|
*/
|
|
*/
|
|
if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
|
|
if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
|
|
index = 0;
|
|
index = 0;
|
|
- loop++;
|
|
|
|
|
|
+ if (loop == LOOP_CACHING_NOWAIT) {
|
|
|
|
+ /*
|
|
|
|
+ * We want to skip the LOOP_CACHING_WAIT step if we
|
|
|
|
+ * don't have any unached bgs and we've alrelady done a
|
|
|
|
+ * full search through.
|
|
|
|
+ */
|
|
|
|
+ if (have_caching_bg || !full_search)
|
|
|
|
+ loop = LOOP_CACHING_WAIT;
|
|
|
|
+ else
|
|
|
|
+ loop = LOOP_ALLOC_CHUNK;
|
|
|
|
+ } else {
|
|
|
|
+ loop++;
|
|
|
|
+ }
|
|
|
|
+
|
|
if (loop == LOOP_ALLOC_CHUNK) {
|
|
if (loop == LOOP_ALLOC_CHUNK) {
|
|
struct btrfs_trans_handle *trans;
|
|
struct btrfs_trans_handle *trans;
|
|
int exist = 0;
|
|
int exist = 0;
|
|
@@ -7304,6 +7319,15 @@ loop:
|
|
|
|
|
|
ret = do_chunk_alloc(trans, root, flags,
|
|
ret = do_chunk_alloc(trans, root, flags,
|
|
CHUNK_ALLOC_FORCE);
|
|
CHUNK_ALLOC_FORCE);
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * If we can't allocate a new chunk we've already looped
|
|
|
|
+ * through at least once, move on to the NO_EMPTY_SIZE
|
|
|
|
+ * case.
|
|
|
|
+ */
|
|
|
|
+ if (ret == -ENOSPC)
|
|
|
|
+ loop = LOOP_NO_EMPTY_SIZE;
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Do not bail out on ENOSPC since we
|
|
* Do not bail out on ENOSPC since we
|
|
* can do more things.
|
|
* can do more things.
|
|
@@ -7320,6 +7344,15 @@ loop:
|
|
}
|
|
}
|
|
|
|
|
|
if (loop == LOOP_NO_EMPTY_SIZE) {
|
|
if (loop == LOOP_NO_EMPTY_SIZE) {
|
|
|
|
+ /*
|
|
|
|
+ * Don't loop again if we already have no empty_size and
|
|
|
|
+ * no empty_cluster.
|
|
|
|
+ */
|
|
|
|
+ if (empty_size == 0 &&
|
|
|
|
+ empty_cluster == 0) {
|
|
|
|
+ ret = -ENOSPC;
|
|
|
|
+ goto out;
|
|
|
|
+ }
|
|
empty_size = 0;
|
|
empty_size = 0;
|
|
empty_cluster = 0;
|
|
empty_cluster = 0;
|
|
}
|
|
}
|