|
|
@@ -4800,7 +4800,7 @@ static int bio_fits_rdev(struct bio *bi)
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
|
-static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
|
|
|
+static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
|
|
|
{
|
|
|
struct r5conf *conf = mddev->private;
|
|
|
int dd_idx;
|
|
|
@@ -4809,7 +4809,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
|
|
|
sector_t end_sector;
|
|
|
|
|
|
if (!in_chunk_boundary(mddev, raid_bio)) {
|
|
|
- pr_debug("chunk_aligned_read : non aligned\n");
|
|
|
+ pr_debug("%s: non aligned\n", __func__);
|
|
|
return 0;
|
|
|
}
|
|
|
/*
|
|
|
@@ -4886,6 +4886,31 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio)
|
|
|
+{
|
|
|
+ struct bio *split;
|
|
|
+
|
|
|
+ do {
|
|
|
+ sector_t sector = raid_bio->bi_iter.bi_sector;
|
|
|
+ unsigned chunk_sects = mddev->chunk_sectors;
|
|
|
+ unsigned sectors = chunk_sects - (sector & (chunk_sects-1));
|
|
|
+
|
|
|
+ if (sectors < bio_sectors(raid_bio)) {
|
|
|
+ split = bio_split(raid_bio, sectors, GFP_NOIO, fs_bio_set);
|
|
|
+ bio_chain(split, raid_bio);
|
|
|
+ } else
|
|
|
+ split = raid_bio;
|
|
|
+
|
|
|
+ if (!raid5_read_one_chunk(mddev, split)) {
|
|
|
+ if (split != raid_bio)
|
|
|
+ generic_make_request(raid_bio);
|
|
|
+ return split;
|
|
|
+ }
|
|
|
+ } while (split != raid_bio);
|
|
|
+
|
|
|
+ return NULL;
|
|
|
+}
|
|
|
+
|
|
|
/* __get_priority_stripe - get the next stripe to process
|
|
|
*
|
|
|
* Full stripe writes are allowed to pass preread active stripes up until
|
|
|
@@ -5163,9 +5188,11 @@ static void make_request(struct mddev *mddev, struct bio * bi)
|
|
|
* data on failed drives.
|
|
|
*/
|
|
|
if (rw == READ && mddev->degraded == 0 &&
|
|
|
- mddev->reshape_position == MaxSector &&
|
|
|
- chunk_aligned_read(mddev,bi))
|
|
|
- return;
|
|
|
+ mddev->reshape_position == MaxSector) {
|
|
|
+ bi = chunk_aligned_read(mddev, bi);
|
|
|
+ if (!bi)
|
|
|
+ return;
|
|
|
+ }
|
|
|
|
|
|
if (unlikely(bi->bi_rw & REQ_DISCARD)) {
|
|
|
make_discard_request(mddev, bi);
|