|
@@ -83,7 +83,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
|
|
|
char b[BDEVNAME_SIZE];
|
|
|
char b2[BDEVNAME_SIZE];
|
|
|
struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
|
|
|
- bool discard_supported = false;
|
|
|
+ unsigned short blksize = 512;
|
|
|
|
|
|
if (!conf)
|
|
|
return -ENOMEM;
|
|
@@ -98,6 +98,9 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
|
|
|
sector_div(sectors, mddev->chunk_sectors);
|
|
|
rdev1->sectors = sectors * mddev->chunk_sectors;
|
|
|
|
|
|
+ blksize = max(blksize, queue_logical_block_size(
|
|
|
+ rdev1->bdev->bd_disk->queue));
|
|
|
+
|
|
|
rdev_for_each(rdev2, mddev) {
|
|
|
pr_debug("md/raid0:%s: comparing %s(%llu)"
|
|
|
" with %s(%llu)\n",
|
|
@@ -134,6 +137,18 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
|
|
|
}
|
|
|
pr_debug("md/raid0:%s: FINAL %d zones\n",
|
|
|
mdname(mddev), conf->nr_strip_zones);
|
|
|
+ /*
|
|
|
+ * now since we have the hard sector sizes, we can make sure
|
|
|
+ * chunk size is a multiple of that sector size
|
|
|
+ */
|
|
|
+ if ((mddev->chunk_sectors << 9) % blksize) {
|
|
|
+ printk(KERN_ERR "md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
|
|
|
+ mdname(mddev),
|
|
|
+ mddev->chunk_sectors << 9, blksize);
|
|
|
+ err = -EINVAL;
|
|
|
+ goto abort;
|
|
|
+ }
|
|
|
+
|
|
|
err = -ENOMEM;
|
|
|
conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
|
|
|
conf->nr_strip_zones, GFP_KERNEL);
|
|
@@ -188,19 +203,12 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
|
|
|
}
|
|
|
dev[j] = rdev1;
|
|
|
|
|
|
- if (mddev->queue)
|
|
|
- disk_stack_limits(mddev->gendisk, rdev1->bdev,
|
|
|
- rdev1->data_offset << 9);
|
|
|
-
|
|
|
if (rdev1->bdev->bd_disk->queue->merge_bvec_fn)
|
|
|
conf->has_merge_bvec = 1;
|
|
|
|
|
|
if (!smallest || (rdev1->sectors < smallest->sectors))
|
|
|
smallest = rdev1;
|
|
|
cnt++;
|
|
|
-
|
|
|
- if (blk_queue_discard(bdev_get_queue(rdev1->bdev)))
|
|
|
- discard_supported = true;
|
|
|
}
|
|
|
if (cnt != mddev->raid_disks) {
|
|
|
printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - "
|
|
@@ -261,28 +269,6 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
|
|
|
(unsigned long long)smallest->sectors);
|
|
|
}
|
|
|
|
|
|
- /*
|
|
|
- * now since we have the hard sector sizes, we can make sure
|
|
|
- * chunk size is a multiple of that sector size
|
|
|
- */
|
|
|
- if ((mddev->chunk_sectors << 9) % queue_logical_block_size(mddev->queue)) {
|
|
|
- printk(KERN_ERR "md/raid0:%s: chunk_size of %d not valid\n",
|
|
|
- mdname(mddev),
|
|
|
- mddev->chunk_sectors << 9);
|
|
|
- goto abort;
|
|
|
- }
|
|
|
-
|
|
|
- if (mddev->queue) {
|
|
|
- blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
|
|
|
- blk_queue_io_opt(mddev->queue,
|
|
|
- (mddev->chunk_sectors << 9) * mddev->raid_disks);
|
|
|
-
|
|
|
- if (!discard_supported)
|
|
|
- queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
|
|
|
- else
|
|
|
- queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
|
|
|
- }
|
|
|
-
|
|
|
pr_debug("md/raid0:%s: done.\n", mdname(mddev));
|
|
|
*private_conf = conf;
|
|
|
|
|
@@ -433,12 +419,6 @@ static int raid0_run(struct mddev *mddev)
|
|
|
if (md_check_no_bitmap(mddev))
|
|
|
return -EINVAL;
|
|
|
|
|
|
- if (mddev->queue) {
|
|
|
- blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
|
|
|
- blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
|
|
|
- blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
|
|
|
- }
|
|
|
-
|
|
|
/* if private is not null, we are here after takeover */
|
|
|
if (mddev->private == NULL) {
|
|
|
ret = create_strip_zones(mddev, &conf);
|
|
@@ -447,6 +427,29 @@ static int raid0_run(struct mddev *mddev)
|
|
|
mddev->private = conf;
|
|
|
}
|
|
|
conf = mddev->private;
|
|
|
+ if (mddev->queue) {
|
|
|
+ struct md_rdev *rdev;
|
|
|
+ bool discard_supported = false;
|
|
|
+
|
|
|
+ rdev_for_each(rdev, mddev) {
|
|
|
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
|
|
|
+ rdev->data_offset << 9);
|
|
|
+ if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
|
|
|
+ discard_supported = true;
|
|
|
+ }
|
|
|
+ blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
|
|
|
+ blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
|
|
|
+ blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
|
|
|
+
|
|
|
+ blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
|
|
|
+ blk_queue_io_opt(mddev->queue,
|
|
|
+ (mddev->chunk_sectors << 9) * mddev->raid_disks);
|
|
|
+
|
|
|
+ if (!discard_supported)
|
|
|
+ queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
|
|
|
+ else
|
|
|
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
|
|
|
+ }
|
|
|
|
|
|
/* calculate array device size */
|
|
|
md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
|