|
@@ -376,12 +376,6 @@ static int raid0_run(struct mddev *mddev)
|
|
|
struct md_rdev *rdev;
|
|
|
bool discard_supported = false;
|
|
|
|
|
|
- rdev_for_each(rdev, mddev) {
|
|
|
- disk_stack_limits(mddev->gendisk, rdev->bdev,
|
|
|
- rdev->data_offset << 9);
|
|
|
- if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
|
|
|
- discard_supported = true;
|
|
|
- }
|
|
|
blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
|
|
|
blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
|
|
|
blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
|
|
@@ -390,6 +384,12 @@ static int raid0_run(struct mddev *mddev)
|
|
|
blk_queue_io_opt(mddev->queue,
|
|
|
(mddev->chunk_sectors << 9) * mddev->raid_disks);
|
|
|
|
|
|
+ rdev_for_each(rdev, mddev) {
|
|
|
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
|
|
|
+ rdev->data_offset << 9);
|
|
|
+ if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
|
|
|
+ discard_supported = true;
|
|
|
+ }
|
|
|
if (!discard_supported)
|
|
|
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
|
|
|
else
|