|
@@ -1478,11 +1478,24 @@ retry_write:
|
|
|
mbio->bi_bdev = (void*)rdev;
|
|
|
|
|
|
atomic_inc(&r10_bio->remaining);
|
|
|
+
|
|
|
+ cb = blk_check_plugged(raid10_unplug, mddev,
|
|
|
+ sizeof(*plug));
|
|
|
+ if (cb)
|
|
|
+ plug = container_of(cb, struct raid10_plug_cb,
|
|
|
+ cb);
|
|
|
+ else
|
|
|
+ plug = NULL;
|
|
|
spin_lock_irqsave(&conf->device_lock, flags);
|
|
|
- bio_list_add(&conf->pending_bio_list, mbio);
|
|
|
- conf->pending_count++;
|
|
|
+ if (plug) {
|
|
|
+ bio_list_add(&plug->pending, mbio);
|
|
|
+ plug->pending_cnt++;
|
|
|
+ } else {
|
|
|
+ bio_list_add(&conf->pending_bio_list, mbio);
|
|
|
+ conf->pending_count++;
|
|
|
+ }
|
|
|
spin_unlock_irqrestore(&conf->device_lock, flags);
|
|
|
- if (!mddev_check_plugged(mddev))
|
|
|
+ if (!plug)
|
|
|
md_wakeup_thread(mddev->thread);
|
|
|
}
|
|
|
}
|
|
@@ -1572,7 +1585,25 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio)
|
|
|
split = bio;
|
|
|
}
|
|
|
|
|
|
+ /*
|
|
|
+ * If a bio is splitted, the first part of bio will pass
|
|
|
+ * barrier but the bio is queued in current->bio_list (see
|
|
|
+ * generic_make_request). If there is a raise_barrier() called
|
|
|
+ * here, the second part of bio can't pass barrier. But since
|
|
|
+ * the first part bio isn't dispatched to underlaying disks
|
|
|
+ * yet, the barrier is never released, hence raise_barrier will
|
|
|
+ * alays wait. We have a deadlock.
|
|
|
+ * Note, this only happens in read path. For write path, the
|
|
|
+ * first part of bio is dispatched in a schedule() call
|
|
|
+ * (because of blk plug) or offloaded to raid10d.
|
|
|
+ * Quitting from the function immediately can change the bio
|
|
|
+ * order queued in bio_list and avoid the deadlock.
|
|
|
+ */
|
|
|
__make_request(mddev, split);
|
|
|
+ if (split != bio && bio_data_dir(bio) == READ) {
|
|
|
+ generic_make_request(bio);
|
|
|
+ break;
|
|
|
+ }
|
|
|
} while (split != bio);
|
|
|
|
|
|
/* In case raid10d snuck in to freeze_array */
|
|
@@ -3944,10 +3975,6 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors)
|
|
|
return ret;
|
|
|
}
|
|
|
md_set_array_sectors(mddev, size);
|
|
|
- if (mddev->queue) {
|
|
|
- set_capacity(mddev->gendisk, mddev->array_sectors);
|
|
|
- revalidate_disk(mddev->gendisk);
|
|
|
- }
|
|
|
if (sectors > mddev->dev_sectors &&
|
|
|
mddev->recovery_cp > oldsize) {
|
|
|
mddev->recovery_cp = oldsize;
|