|
@@ -81,14 +81,7 @@ static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
|
|
|
#define raid1_log(md, fmt, args...) \
|
|
|
do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0)
|
|
|
|
|
|
-/*
|
|
|
- * 'strct resync_pages' stores actual pages used for doing the resync
|
|
|
- * IO, and it is per-bio, so make .bi_private points to it.
|
|
|
- */
|
|
|
-static inline struct resync_pages *get_resync_pages(struct bio *bio)
|
|
|
-{
|
|
|
- return bio->bi_private;
|
|
|
-}
|
|
|
+#include "raid1-10.c"
|
|
|
|
|
|
/*
|
|
|
* for resync bio, r1bio pointer can be retrieved from the per-bio
|
|
@@ -170,7 +163,6 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
|
|
|
resync_get_all_pages(rp);
|
|
|
}
|
|
|
|
|
|
- rp->idx = 0;
|
|
|
rp->raid_bio = r1_bio;
|
|
|
bio->bi_private = rp;
|
|
|
}
|
|
@@ -492,10 +484,6 @@ static void raid1_end_write_request(struct bio *bio)
|
|
|
}
|
|
|
|
|
|
if (behind) {
|
|
|
- /* we release behind master bio when all write are done */
|
|
|
- if (r1_bio->behind_master_bio == bio)
|
|
|
- to_put = NULL;
|
|
|
-
|
|
|
if (test_bit(WriteMostly, &rdev->flags))
|
|
|
atomic_dec(&r1_bio->behind_remaining);
|
|
|
|
|
@@ -802,8 +790,7 @@ static void flush_bio_list(struct r1conf *conf, struct bio *bio)
|
|
|
bio->bi_next = NULL;
|
|
|
bio->bi_bdev = rdev->bdev;
|
|
|
if (test_bit(Faulty, &rdev->flags)) {
|
|
|
- bio->bi_status = BLK_STS_IOERR;
|
|
|
- bio_endio(bio);
|
|
|
+ bio_io_error(bio);
|
|
|
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
|
|
|
!blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
|
|
|
/* Just ignore it */
|
|
@@ -1088,7 +1075,7 @@ static void unfreeze_array(struct r1conf *conf)
|
|
|
wake_up(&conf->wait_barrier);
|
|
|
}
|
|
|
|
|
|
-static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio,
|
|
|
+static void alloc_behind_master_bio(struct r1bio *r1_bio,
|
|
|
struct bio *bio)
|
|
|
{
|
|
|
int size = bio->bi_iter.bi_size;
|
|
@@ -1098,11 +1085,13 @@ static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio,
|
|
|
|
|
|
behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev);
|
|
|
if (!behind_bio)
|
|
|
- goto fail;
|
|
|
+ return;
|
|
|
|
|
|
/* discard op, we don't support writezero/writesame yet */
|
|
|
- if (!bio_has_data(bio))
|
|
|
+ if (!bio_has_data(bio)) {
|
|
|
+ behind_bio->bi_iter.bi_size = size;
|
|
|
goto skip_copy;
|
|
|
+ }
|
|
|
|
|
|
while (i < vcnt && size) {
|
|
|
struct page *page;
|
|
@@ -1123,14 +1112,13 @@ skip_copy:
|
|
|
r1_bio->behind_master_bio = behind_bio;;
|
|
|
set_bit(R1BIO_BehindIO, &r1_bio->state);
|
|
|
|
|
|
- return behind_bio;
|
|
|
+ return;
|
|
|
|
|
|
free_pages:
|
|
|
pr_debug("%dB behind alloc failed, doing sync I/O\n",
|
|
|
bio->bi_iter.bi_size);
|
|
|
bio_free_pages(behind_bio);
|
|
|
-fail:
|
|
|
- return behind_bio;
|
|
|
+ bio_put(behind_bio);
|
|
|
}
|
|
|
|
|
|
struct raid1_plug_cb {
|
|
@@ -1483,7 +1471,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
|
|
|
(atomic_read(&bitmap->behind_writes)
|
|
|
< mddev->bitmap_info.max_write_behind) &&
|
|
|
!waitqueue_active(&bitmap->behind_wait)) {
|
|
|
- mbio = alloc_behind_master_bio(r1_bio, bio);
|
|
|
+ alloc_behind_master_bio(r1_bio, bio);
|
|
|
}
|
|
|
|
|
|
bitmap_startwrite(bitmap, r1_bio->sector,
|
|
@@ -1493,14 +1481,11 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
|
|
|
first_clone = 0;
|
|
|
}
|
|
|
|
|
|
- if (!mbio) {
|
|
|
- if (r1_bio->behind_master_bio)
|
|
|
- mbio = bio_clone_fast(r1_bio->behind_master_bio,
|
|
|
- GFP_NOIO,
|
|
|
- mddev->bio_set);
|
|
|
- else
|
|
|
- mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set);
|
|
|
- }
|
|
|
+ if (r1_bio->behind_master_bio)
|
|
|
+ mbio = bio_clone_fast(r1_bio->behind_master_bio,
|
|
|
+ GFP_NOIO, mddev->bio_set);
|
|
|
+ else
|
|
|
+ mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set);
|
|
|
|
|
|
if (r1_bio->behind_master_bio) {
|
|
|
if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
|
|
@@ -2086,10 +2071,7 @@ static void process_checks(struct r1bio *r1_bio)
|
|
|
/* Fix variable parts of all bios */
|
|
|
vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
|
|
|
for (i = 0; i < conf->raid_disks * 2; i++) {
|
|
|
- int j;
|
|
|
- int size;
|
|
|
blk_status_t status;
|
|
|
- struct bio_vec *bi;
|
|
|
struct bio *b = r1_bio->bios[i];
|
|
|
struct resync_pages *rp = get_resync_pages(b);
|
|
|
if (b->bi_end_io != end_sync_read)
|
|
@@ -2098,8 +2080,6 @@ static void process_checks(struct r1bio *r1_bio)
|
|
|
status = b->bi_status;
|
|
|
bio_reset(b);
|
|
|
b->bi_status = status;
|
|
|
- b->bi_vcnt = vcnt;
|
|
|
- b->bi_iter.bi_size = r1_bio->sectors << 9;
|
|
|
b->bi_iter.bi_sector = r1_bio->sector +
|
|
|
conf->mirrors[i].rdev->data_offset;
|
|
|
b->bi_bdev = conf->mirrors[i].rdev->bdev;
|
|
@@ -2107,15 +2087,8 @@ static void process_checks(struct r1bio *r1_bio)
|
|
|
rp->raid_bio = r1_bio;
|
|
|
b->bi_private = rp;
|
|
|
|
|
|
- size = b->bi_iter.bi_size;
|
|
|
- bio_for_each_segment_all(bi, b, j) {
|
|
|
- bi->bv_offset = 0;
|
|
|
- if (size > PAGE_SIZE)
|
|
|
- bi->bv_len = PAGE_SIZE;
|
|
|
- else
|
|
|
- bi->bv_len = size;
|
|
|
- size -= PAGE_SIZE;
|
|
|
- }
|
|
|
+ /* initialize bvec table again */
|
|
|
+ md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9);
|
|
|
}
|
|
|
for (primary = 0; primary < conf->raid_disks * 2; primary++)
|
|
|
if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
|
|
@@ -2366,8 +2339,6 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
|
|
|
wbio = bio_clone_fast(r1_bio->behind_master_bio,
|
|
|
GFP_NOIO,
|
|
|
mddev->bio_set);
|
|
|
- /* We really need a _all clone */
|
|
|
- wbio->bi_iter = (struct bvec_iter){ 0 };
|
|
|
} else {
|
|
|
wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO,
|
|
|
mddev->bio_set);
|
|
@@ -2619,6 +2590,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
|
|
|
int good_sectors = RESYNC_SECTORS;
|
|
|
int min_bad = 0; /* number of sectors that are bad in all devices */
|
|
|
int idx = sector_to_idx(sector_nr);
|
|
|
+ int page_idx = 0;
|
|
|
|
|
|
if (!conf->r1buf_pool)
|
|
|
if (init_resync(conf))
|
|
@@ -2846,7 +2818,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
|
|
|
bio = r1_bio->bios[i];
|
|
|
rp = get_resync_pages(bio);
|
|
|
if (bio->bi_end_io) {
|
|
|
- page = resync_fetch_page(rp, rp->idx++);
|
|
|
+ page = resync_fetch_page(rp, page_idx);
|
|
|
|
|
|
/*
|
|
|
* won't fail because the vec table is big
|
|
@@ -2858,7 +2830,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
|
|
|
nr_sectors += len>>9;
|
|
|
sector_nr += len>>9;
|
|
|
sync_blocks -= (len>>9);
|
|
|
- } while (get_resync_pages(r1_bio->bios[disk]->bi_private)->idx < RESYNC_PAGES);
|
|
|
+ } while (++page_idx < RESYNC_PAGES);
|
|
|
|
|
|
r1_bio->sectors = nr_sectors;
|
|
|
|