|
@@ -4781,25 +4781,6 @@ static void raid5_align_endio(struct bio *bi)
|
|
|
add_bio_to_retry(raid_bi, conf);
|
|
|
}
|
|
|
|
|
|
-static int bio_fits_rdev(struct bio *bi)
|
|
|
-{
|
|
|
- struct request_queue *q = bdev_get_queue(bi->bi_bdev);
|
|
|
-
|
|
|
- if (bio_sectors(bi) > queue_max_sectors(q))
|
|
|
- return 0;
|
|
|
- blk_recount_segments(q, bi);
|
|
|
- if (bi->bi_phys_segments > queue_max_segments(q))
|
|
|
- return 0;
|
|
|
-
|
|
|
- if (q->merge_bvec_fn)
|
|
|
- /* it's too hard to apply the merge_bvec_fn at this stage,
|
|
|
- * just just give up
|
|
|
- */
|
|
|
- return 0;
|
|
|
-
|
|
|
- return 1;
|
|
|
-}
|
|
|
-
|
|
|
static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
|
|
|
{
|
|
|
struct r5conf *conf = mddev->private;
|
|
@@ -4853,11 +4834,9 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
|
|
|
align_bi->bi_bdev = rdev->bdev;
|
|
|
bio_clear_flag(align_bi, BIO_SEG_VALID);
|
|
|
|
|
|
- if (!bio_fits_rdev(align_bi) ||
|
|
|
- is_badblock(rdev, align_bi->bi_iter.bi_sector,
|
|
|
+ if (is_badblock(rdev, align_bi->bi_iter.bi_sector,
|
|
|
bio_sectors(align_bi),
|
|
|
&first_bad, &bad_sectors)) {
|
|
|
- /* too big in some way, or has a known bad block */
|
|
|
bio_put(align_bi);
|
|
|
rdev_dec_pending(rdev, mddev);
|
|
|
return 0;
|