|
@@ -3059,16 +3059,19 @@ static void update_balance_args(struct btrfs_balance_control *bctl)
|
|
|
* (albeit full) chunks.
|
|
|
*/
|
|
|
if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
|
|
|
+ !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
|
|
|
!(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
|
|
|
bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
|
|
|
bctl->data.usage = 90;
|
|
|
}
|
|
|
if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
|
|
|
+ !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
|
|
|
!(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
|
|
|
bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
|
|
|
bctl->sys.usage = 90;
|
|
|
}
|
|
|
if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
|
|
|
+ !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
|
|
|
!(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
|
|
|
bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
|
|
|
bctl->meta.usage = 90;
|
|
@@ -3122,6 +3125,39 @@ static int chunk_profiles_filter(u64 chunk_type,
|
|
|
|
|
|
static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
|
|
|
struct btrfs_balance_args *bargs)
|
|
|
+{
|
|
|
+ struct btrfs_block_group_cache *cache;
|
|
|
+ u64 chunk_used;
|
|
|
+ u64 user_thresh_min;
|
|
|
+ u64 user_thresh_max;
|
|
|
+ int ret = 1;
|
|
|
+
|
|
|
+ cache = btrfs_lookup_block_group(fs_info, chunk_offset);
|
|
|
+ chunk_used = btrfs_block_group_used(&cache->item);
|
|
|
+
|
|
|
+ if (bargs->usage_min == 0)
|
|
|
+ user_thresh_min = 0;
|
|
|
+ else
|
|
|
+ user_thresh_min = div_factor_fine(cache->key.offset,
|
|
|
+ bargs->usage_min);
|
|
|
+
|
|
|
+ if (bargs->usage_max == 0)
|
|
|
+ user_thresh_max = 1;
|
|
|
+ else if (bargs->usage_max > 100)
|
|
|
+ user_thresh_max = cache->key.offset;
|
|
|
+ else
|
|
|
+ user_thresh_max = div_factor_fine(cache->key.offset,
|
|
|
+ bargs->usage_max);
|
|
|
+
|
|
|
+ if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
|
|
|
+ ret = 0;
|
|
|
+
|
|
|
+ btrfs_put_block_group(cache);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info,
|
|
|
+ u64 chunk_offset, struct btrfs_balance_args *bargs)
|
|
|
{
|
|
|
struct btrfs_block_group_cache *cache;
|
|
|
u64 chunk_used, user_thresh;
|
|
@@ -3130,7 +3166,7 @@ static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
|
|
|
cache = btrfs_lookup_block_group(fs_info, chunk_offset);
|
|
|
chunk_used = btrfs_block_group_used(&cache->item);
|
|
|
|
|
|
- if (bargs->usage == 0)
|
|
|
+ if (bargs->usage_min == 0)
|
|
|
user_thresh = 1;
|
|
|
else if (bargs->usage > 100)
|
|
|
user_thresh = cache->key.offset;
|
|
@@ -3279,6 +3315,9 @@ static int should_balance_chunk(struct btrfs_root *root,
|
|
|
if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
|
|
|
chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
|
|
|
return 0;
|
|
|
+ } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
|
|
|
+ chunk_usage_range_filter(bctl->fs_info, chunk_offset, bargs)) {
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
/* devid filter */
|