|
@@ -9687,6 +9687,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
|
|
u64 cur_offset = start;
|
|
u64 cur_offset = start;
|
|
u64 i_size;
|
|
u64 i_size;
|
|
u64 cur_bytes;
|
|
u64 cur_bytes;
|
|
|
|
+ u64 last_alloc = (u64)-1;
|
|
int ret = 0;
|
|
int ret = 0;
|
|
bool own_trans = true;
|
|
bool own_trans = true;
|
|
|
|
|
|
@@ -9703,6 +9704,13 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
|
|
|
|
|
|
cur_bytes = min(num_bytes, 256ULL * 1024 * 1024);
|
|
cur_bytes = min(num_bytes, 256ULL * 1024 * 1024);
|
|
cur_bytes = max(cur_bytes, min_size);
|
|
cur_bytes = max(cur_bytes, min_size);
|
|
|
|
+ /*
|
|
|
|
+ * If we are severely fragmented we could end up with really
|
|
|
|
+ * small allocations, so if the allocator is returning small
|
|
|
|
+ * chunks lets make its job easier by only searching for those
|
|
|
|
+ * sized chunks.
|
|
|
|
+ */
|
|
|
|
+ cur_bytes = min(cur_bytes, last_alloc);
|
|
ret = btrfs_reserve_extent(root, cur_bytes, min_size, 0,
|
|
ret = btrfs_reserve_extent(root, cur_bytes, min_size, 0,
|
|
*alloc_hint, &ins, 1, 0);
|
|
*alloc_hint, &ins, 1, 0);
|
|
if (ret) {
|
|
if (ret) {
|
|
@@ -9711,6 +9719,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ last_alloc = ins.offset;
|
|
ret = insert_reserved_file_extent(trans, inode,
|
|
ret = insert_reserved_file_extent(trans, inode,
|
|
cur_offset, ins.objectid,
|
|
cur_offset, ins.objectid,
|
|
ins.offset, ins.offset,
|
|
ins.offset, ins.offset,
|