|
@@ -4204,6 +4204,104 @@ static int flush_space(struct btrfs_root *root,
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
|
+
|
|
|
+static inline u64
|
|
|
+btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
|
|
|
+ struct btrfs_space_info *space_info)
|
|
|
+{
|
|
|
+ u64 used;
|
|
|
+ u64 expected;
|
|
|
+ u64 to_reclaim;
|
|
|
+
|
|
|
+ to_reclaim = min_t(u64, num_online_cpus() * 1024 * 1024,
|
|
|
+ 16 * 1024 * 1024);
|
|
|
+ spin_lock(&space_info->lock);
|
|
|
+ if (can_overcommit(root, space_info, to_reclaim,
|
|
|
+ BTRFS_RESERVE_FLUSH_ALL)) {
|
|
|
+ to_reclaim = 0;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
+ used = space_info->bytes_used + space_info->bytes_reserved +
|
|
|
+ space_info->bytes_pinned + space_info->bytes_readonly +
|
|
|
+ space_info->bytes_may_use;
|
|
|
+ if (can_overcommit(root, space_info, 1024 * 1024,
|
|
|
+ BTRFS_RESERVE_FLUSH_ALL))
|
|
|
+ expected = div_factor_fine(space_info->total_bytes, 95);
|
|
|
+ else
|
|
|
+ expected = div_factor_fine(space_info->total_bytes, 90);
|
|
|
+
|
|
|
+ if (used > expected)
|
|
|
+ to_reclaim = used - expected;
|
|
|
+ else
|
|
|
+ to_reclaim = 0;
|
|
|
+ to_reclaim = min(to_reclaim, space_info->bytes_may_use +
|
|
|
+ space_info->bytes_reserved);
|
|
|
+out:
|
|
|
+ spin_unlock(&space_info->lock);
|
|
|
+
|
|
|
+ return to_reclaim;
|
|
|
+}
|
|
|
+
|
|
|
+static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
|
|
|
+ struct btrfs_fs_info *fs_info, u64 used)
|
|
|
+{
|
|
|
+ return (used >= div_factor_fine(space_info->total_bytes, 98) &&
|
|
|
+ !btrfs_fs_closing(fs_info) &&
|
|
|
+ !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
|
|
|
+}
|
|
|
+
|
|
|
+static int btrfs_need_do_async_reclaim(struct btrfs_space_info *space_info,
|
|
|
+ struct btrfs_fs_info *fs_info)
|
|
|
+{
|
|
|
+ u64 used;
|
|
|
+
|
|
|
+ spin_lock(&space_info->lock);
|
|
|
+ used = space_info->bytes_used + space_info->bytes_reserved +
|
|
|
+ space_info->bytes_pinned + space_info->bytes_readonly +
|
|
|
+ space_info->bytes_may_use;
|
|
|
+ if (need_do_async_reclaim(space_info, fs_info, used)) {
|
|
|
+ spin_unlock(&space_info->lock);
|
|
|
+ return 1;
|
|
|
+ }
|
|
|
+ spin_unlock(&space_info->lock);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
|
|
|
+{
|
|
|
+ struct btrfs_fs_info *fs_info;
|
|
|
+ struct btrfs_space_info *space_info;
|
|
|
+ u64 to_reclaim;
|
|
|
+ int flush_state;
|
|
|
+
|
|
|
+ fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
|
|
|
+ space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
|
|
|
+
|
|
|
+ to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
|
|
|
+ space_info);
|
|
|
+ if (!to_reclaim)
|
|
|
+ return;
|
|
|
+
|
|
|
+ flush_state = FLUSH_DELAYED_ITEMS_NR;
|
|
|
+ do {
|
|
|
+ flush_space(fs_info->fs_root, space_info, to_reclaim,
|
|
|
+ to_reclaim, flush_state);
|
|
|
+ flush_state++;
|
|
|
+ if (!btrfs_need_do_async_reclaim(space_info, fs_info))
|
|
|
+ return;
|
|
|
+ } while (flush_state <= COMMIT_TRANS);
|
|
|
+
|
|
|
+ if (btrfs_need_do_async_reclaim(space_info, fs_info))
|
|
|
+ queue_work(system_unbound_wq, work);
|
|
|
+}
|
|
|
+
|
|
|
+void btrfs_init_async_reclaim_work(struct work_struct *work)
|
|
|
+{
|
|
|
+ INIT_WORK(work, btrfs_async_reclaim_metadata_space);
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
|
|
|
* @root - the root we're allocating for
|
|
@@ -4311,8 +4409,13 @@ again:
|
|
|
if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
|
|
|
flushing = true;
|
|
|
space_info->flush = 1;
|
|
|
+ } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
|
|
|
+ used += orig_bytes;
|
|
|
+ if (need_do_async_reclaim(space_info, root->fs_info, used) &&
|
|
|
+ !work_busy(&root->fs_info->async_reclaim_work))
|
|
|
+ queue_work(system_unbound_wq,
|
|
|
+ &root->fs_info->async_reclaim_work);
|
|
|
}
|
|
|
-
|
|
|
spin_unlock(&space_info->lock);
|
|
|
|
|
|
if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
|