|
@@ -1101,8 +1101,10 @@ static void make_request(struct mddev *mddev, struct bio * bio)
|
|
md_write_start(mddev, bio); /* wait on superblock update early */
|
|
md_write_start(mddev, bio); /* wait on superblock update early */
|
|
|
|
|
|
if (bio_data_dir(bio) == WRITE &&
|
|
if (bio_data_dir(bio) == WRITE &&
|
|
- bio_end_sector(bio) > mddev->suspend_lo &&
|
|
|
|
- bio->bi_iter.bi_sector < mddev->suspend_hi) {
|
|
|
|
|
|
+ ((bio_end_sector(bio) > mddev->suspend_lo &&
|
|
|
|
+ bio->bi_iter.bi_sector < mddev->suspend_hi) ||
|
|
|
|
+ (mddev_is_clustered(mddev) &&
|
|
|
|
+ md_cluster_ops->area_resyncing(mddev, bio->bi_iter.bi_sector, bio_end_sector(bio))))) {
|
|
/* As the suspend_* range is controlled by
|
|
/* As the suspend_* range is controlled by
|
|
* userspace, we want an interruptible
|
|
* userspace, we want an interruptible
|
|
* wait.
|
|
* wait.
|
|
@@ -1113,7 +1115,10 @@ static void make_request(struct mddev *mddev, struct bio * bio)
|
|
prepare_to_wait(&conf->wait_barrier,
|
|
prepare_to_wait(&conf->wait_barrier,
|
|
&w, TASK_INTERRUPTIBLE);
|
|
&w, TASK_INTERRUPTIBLE);
|
|
if (bio_end_sector(bio) <= mddev->suspend_lo ||
|
|
if (bio_end_sector(bio) <= mddev->suspend_lo ||
|
|
- bio->bi_iter.bi_sector >= mddev->suspend_hi)
|
|
|
|
|
|
+ bio->bi_iter.bi_sector >= mddev->suspend_hi ||
|
|
|
|
+ (mddev_is_clustered(mddev) &&
|
|
|
|
+ !md_cluster_ops->area_resyncing(mddev,
|
|
|
|
+ bio->bi_iter.bi_sector, bio_end_sector(bio))))
|
|
break;
|
|
break;
|
|
schedule();
|
|
schedule();
|
|
}
|
|
}
|