|
@@ -221,7 +221,6 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
|
|
|
resync_get_all_pages(rp);
|
|
|
}
|
|
|
|
|
|
- rp->idx = 0;
|
|
|
rp->raid_bio = r10_bio;
|
|
|
bio->bi_private = rp;
|
|
|
if (rbio) {
|
|
@@ -2853,6 +2852,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
|
|
|
sector_t sectors_skipped = 0;
|
|
|
int chunks_skipped = 0;
|
|
|
sector_t chunk_mask = conf->geo.chunk_mask;
|
|
|
+ int page_idx = 0;
|
|
|
|
|
|
if (!conf->r10buf_pool)
|
|
|
if (init_resync(conf))
|
|
@@ -3355,7 +3355,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
|
|
|
break;
|
|
|
for (bio= biolist ; bio ; bio=bio->bi_next) {
|
|
|
struct resync_pages *rp = get_resync_pages(bio);
|
|
|
- page = resync_fetch_page(rp, rp->idx++);
|
|
|
+ page = resync_fetch_page(rp, page_idx);
|
|
|
/*
|
|
|
* won't fail because the vec table is big enough
|
|
|
* to hold all these pages
|
|
@@ -3364,7 +3364,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
|
|
|
}
|
|
|
nr_sectors += len>>9;
|
|
|
sector_nr += len>>9;
|
|
|
- } while (get_resync_pages(biolist)->idx < RESYNC_PAGES);
|
|
|
+ } while (++page_idx < RESYNC_PAGES);
|
|
|
r10_bio->sectors = nr_sectors;
|
|
|
|
|
|
while (biolist) {
|