|
|
@@ -322,6 +322,7 @@ static struct full_stripe_lock *insert_full_stripe_lock(
|
|
|
struct rb_node *parent = NULL;
|
|
|
struct full_stripe_lock *entry;
|
|
|
struct full_stripe_lock *ret;
|
|
|
+ unsigned int nofs_flag;
|
|
|
|
|
|
lockdep_assert_held(&locks_root->lock);
|
|
|
|
|
|
@@ -339,8 +340,17 @@ static struct full_stripe_lock *insert_full_stripe_lock(
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- /* Insert new lock */
|
|
|
+ /*
|
|
|
+ * Insert new lock.
|
|
|
+ *
|
|
|
+ * We must use GFP_NOFS because the scrub task might be waiting for a
|
|
|
+ * worker task executing this function and in turn a transaction commit
|
|
|
+ * might be waiting the scrub task to pause (which needs to wait for all
|
|
|
+ * the worker tasks to complete before pausing).
|
|
|
+ */
|
|
|
+ nofs_flag = memalloc_nofs_save();
|
|
|
ret = kmalloc(sizeof(*ret), GFP_KERNEL);
|
|
|
+ memalloc_nofs_restore(nofs_flag);
|
|
|
if (!ret)
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
ret->logical = fstripe_logical;
|
|
|
@@ -1622,8 +1632,19 @@ static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
|
|
|
mutex_lock(&sctx->wr_lock);
|
|
|
again:
|
|
|
if (!sctx->wr_curr_bio) {
|
|
|
+ unsigned int nofs_flag;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * We must use GFP_NOFS because the scrub task might be waiting
|
|
|
+ * for a worker task executing this function and in turn a
|
|
|
+ * transaction commit might be waiting the scrub task to pause
|
|
|
+ * (which needs to wait for all the worker tasks to complete
|
|
|
+ * before pausing).
|
|
|
+ */
|
|
|
+ nofs_flag = memalloc_nofs_save();
|
|
|
sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio),
|
|
|
GFP_KERNEL);
|
|
|
+ memalloc_nofs_restore(nofs_flag);
|
|
|
if (!sctx->wr_curr_bio) {
|
|
|
mutex_unlock(&sctx->wr_lock);
|
|
|
return -ENOMEM;
|
|
|
@@ -3775,6 +3796,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
|
|
|
struct scrub_ctx *sctx;
|
|
|
int ret;
|
|
|
struct btrfs_device *dev;
|
|
|
+ unsigned int nofs_flag;
|
|
|
|
|
|
if (btrfs_fs_closing(fs_info))
|
|
|
return -EINVAL;
|
|
|
@@ -3878,6 +3900,16 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
|
|
|
atomic_inc(&fs_info->scrubs_running);
|
|
|
mutex_unlock(&fs_info->scrub_lock);
|
|
|
|
|
|
+ /*
|
|
|
+ * In order to avoid deadlock with reclaim when there is a transaction
|
|
|
+ * trying to pause scrub, make sure we use GFP_NOFS for all the
|
|
|
+ * allocations done at btrfs_scrub_pages() and scrub_pages_for_parity()
|
|
|
+ * invoked by our callees. The pausing request is done when the
|
|
|
+ * transaction commit starts, and it blocks the transaction until scrub
|
|
|
+ * is paused (done at specific points at scrub_stripe() or right above
|
|
|
+ * before incrementing fs_info->scrubs_running).
|
|
|
+ */
|
|
|
+ nofs_flag = memalloc_nofs_save();
|
|
|
if (!is_dev_replace) {
|
|
|
/*
|
|
|
* by holding device list mutex, we can
|
|
|
@@ -3890,6 +3922,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
|
|
|
|
|
|
if (!ret)
|
|
|
ret = scrub_enumerate_chunks(sctx, dev, start, end);
|
|
|
+ memalloc_nofs_restore(nofs_flag);
|
|
|
|
|
|
wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
|
|
|
atomic_dec(&fs_info->scrubs_running);
|