|
@@ -221,7 +221,7 @@ static void free_r1bio(struct r1bio *r1_bio)
|
|
|
struct r1conf *conf = r1_bio->mddev->private;
|
|
|
|
|
|
put_all_bios(conf, r1_bio);
|
|
|
- mempool_free(r1_bio, conf->r1bio_pool);
|
|
|
+ mempool_free(r1_bio, &conf->r1bio_pool);
|
|
|
}
|
|
|
|
|
|
static void put_buf(struct r1bio *r1_bio)
|
|
@@ -236,7 +236,7 @@ static void put_buf(struct r1bio *r1_bio)
|
|
|
rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
|
|
|
}
|
|
|
|
|
|
- mempool_free(r1_bio, conf->r1buf_pool);
|
|
|
+ mempool_free(r1_bio, &conf->r1buf_pool);
|
|
|
|
|
|
lower_barrier(conf, sect);
|
|
|
}
|
|
@@ -1178,7 +1178,7 @@ alloc_r1bio(struct mddev *mddev, struct bio *bio)
|
|
|
struct r1conf *conf = mddev->private;
|
|
|
struct r1bio *r1_bio;
|
|
|
|
|
|
- r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
|
|
|
+ r1_bio = mempool_alloc(&conf->r1bio_pool, GFP_NOIO);
|
|
|
/* Ensure no bio records IO_BLOCKED */
|
|
|
memset(r1_bio->bios, 0, conf->raid_disks * sizeof(r1_bio->bios[0]));
|
|
|
init_r1bio(r1_bio, mddev, bio);
|
|
@@ -1268,7 +1268,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
|
|
|
|
|
|
if (max_sectors < bio_sectors(bio)) {
|
|
|
struct bio *split = bio_split(bio, max_sectors,
|
|
|
- gfp, conf->bio_split);
|
|
|
+ gfp, &conf->bio_split);
|
|
|
bio_chain(split, bio);
|
|
|
generic_make_request(bio);
|
|
|
bio = split;
|
|
@@ -1278,7 +1278,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
|
|
|
|
|
|
r1_bio->read_disk = rdisk;
|
|
|
|
|
|
- read_bio = bio_clone_fast(bio, gfp, mddev->bio_set);
|
|
|
+ read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set);
|
|
|
|
|
|
r1_bio->bios[rdisk] = read_bio;
|
|
|
|
|
@@ -1439,7 +1439,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
|
|
|
|
|
|
if (max_sectors < bio_sectors(bio)) {
|
|
|
struct bio *split = bio_split(bio, max_sectors,
|
|
|
- GFP_NOIO, conf->bio_split);
|
|
|
+ GFP_NOIO, &conf->bio_split);
|
|
|
bio_chain(split, bio);
|
|
|
generic_make_request(bio);
|
|
|
bio = split;
|
|
@@ -1479,9 +1479,9 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
|
|
|
|
|
|
if (r1_bio->behind_master_bio)
|
|
|
mbio = bio_clone_fast(r1_bio->behind_master_bio,
|
|
|
- GFP_NOIO, mddev->bio_set);
|
|
|
+ GFP_NOIO, &mddev->bio_set);
|
|
|
else
|
|
|
- mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set);
|
|
|
+ mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
|
|
|
|
|
|
if (r1_bio->behind_master_bio) {
|
|
|
if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
|
|
@@ -1657,8 +1657,7 @@ static void close_sync(struct r1conf *conf)
|
|
|
_allow_barrier(conf, idx);
|
|
|
}
|
|
|
|
|
|
- mempool_destroy(conf->r1buf_pool);
|
|
|
- conf->r1buf_pool = NULL;
|
|
|
+ mempool_exit(&conf->r1buf_pool);
|
|
|
}
|
|
|
|
|
|
static int raid1_spare_active(struct mddev *mddev)
|
|
@@ -2348,10 +2347,10 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
|
|
|
if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
|
|
|
wbio = bio_clone_fast(r1_bio->behind_master_bio,
|
|
|
GFP_NOIO,
|
|
|
- mddev->bio_set);
|
|
|
+ &mddev->bio_set);
|
|
|
} else {
|
|
|
wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO,
|
|
|
- mddev->bio_set);
|
|
|
+ &mddev->bio_set);
|
|
|
}
|
|
|
|
|
|
bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
|
|
@@ -2564,17 +2563,15 @@ static int init_resync(struct r1conf *conf)
|
|
|
int buffs;
|
|
|
|
|
|
buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
|
|
|
- BUG_ON(conf->r1buf_pool);
|
|
|
- conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free,
|
|
|
- conf->poolinfo);
|
|
|
- if (!conf->r1buf_pool)
|
|
|
- return -ENOMEM;
|
|
|
- return 0;
|
|
|
+ BUG_ON(mempool_initialized(&conf->r1buf_pool));
|
|
|
+
|
|
|
+ return mempool_init(&conf->r1buf_pool, buffs, r1buf_pool_alloc,
|
|
|
+ r1buf_pool_free, conf->poolinfo);
|
|
|
}
|
|
|
|
|
|
static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
|
|
|
{
|
|
|
- struct r1bio *r1bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
|
|
|
+ struct r1bio *r1bio = mempool_alloc(&conf->r1buf_pool, GFP_NOIO);
|
|
|
struct resync_pages *rps;
|
|
|
struct bio *bio;
|
|
|
int i;
|
|
@@ -2617,7 +2614,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
|
|
|
int idx = sector_to_idx(sector_nr);
|
|
|
int page_idx = 0;
|
|
|
|
|
|
- if (!conf->r1buf_pool)
|
|
|
+ if (!mempool_initialized(&conf->r1buf_pool))
|
|
|
if (init_resync(conf))
|
|
|
return 0;
|
|
|
|
|
@@ -2953,14 +2950,13 @@ static struct r1conf *setup_conf(struct mddev *mddev)
|
|
|
if (!conf->poolinfo)
|
|
|
goto abort;
|
|
|
conf->poolinfo->raid_disks = mddev->raid_disks * 2;
|
|
|
- conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
|
|
|
- r1bio_pool_free,
|
|
|
- conf->poolinfo);
|
|
|
- if (!conf->r1bio_pool)
|
|
|
+ err = mempool_init(&conf->r1bio_pool, NR_RAID1_BIOS, r1bio_pool_alloc,
|
|
|
+ r1bio_pool_free, conf->poolinfo);
|
|
|
+ if (err)
|
|
|
goto abort;
|
|
|
|
|
|
- conf->bio_split = bioset_create(BIO_POOL_SIZE, 0, 0);
|
|
|
- if (!conf->bio_split)
|
|
|
+ err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0);
|
|
|
+ if (err)
|
|
|
goto abort;
|
|
|
|
|
|
conf->poolinfo->mddev = mddev;
|
|
@@ -3033,7 +3029,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
|
|
|
|
|
|
abort:
|
|
|
if (conf) {
|
|
|
- mempool_destroy(conf->r1bio_pool);
|
|
|
+ mempool_exit(&conf->r1bio_pool);
|
|
|
kfree(conf->mirrors);
|
|
|
safe_put_page(conf->tmppage);
|
|
|
kfree(conf->poolinfo);
|
|
@@ -3041,8 +3037,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
|
|
|
kfree(conf->nr_waiting);
|
|
|
kfree(conf->nr_queued);
|
|
|
kfree(conf->barrier);
|
|
|
- if (conf->bio_split)
|
|
|
- bioset_free(conf->bio_split);
|
|
|
+ bioset_exit(&conf->bio_split);
|
|
|
kfree(conf);
|
|
|
}
|
|
|
return ERR_PTR(err);
|
|
@@ -3144,7 +3139,7 @@ static void raid1_free(struct mddev *mddev, void *priv)
|
|
|
{
|
|
|
struct r1conf *conf = priv;
|
|
|
|
|
|
- mempool_destroy(conf->r1bio_pool);
|
|
|
+ mempool_exit(&conf->r1bio_pool);
|
|
|
kfree(conf->mirrors);
|
|
|
safe_put_page(conf->tmppage);
|
|
|
kfree(conf->poolinfo);
|
|
@@ -3152,8 +3147,7 @@ static void raid1_free(struct mddev *mddev, void *priv)
|
|
|
kfree(conf->nr_waiting);
|
|
|
kfree(conf->nr_queued);
|
|
|
kfree(conf->barrier);
|
|
|
- if (conf->bio_split)
|
|
|
- bioset_free(conf->bio_split);
|
|
|
+ bioset_exit(&conf->bio_split);
|
|
|
kfree(conf);
|
|
|
}
|
|
|
|
|
@@ -3199,13 +3193,17 @@ static int raid1_reshape(struct mddev *mddev)
|
|
|
* At the same time, we "pack" the devices so that all the missing
|
|
|
* devices have the higher raid_disk numbers.
|
|
|
*/
|
|
|
- mempool_t *newpool, *oldpool;
|
|
|
+ mempool_t newpool, oldpool;
|
|
|
struct pool_info *newpoolinfo;
|
|
|
struct raid1_info *newmirrors;
|
|
|
struct r1conf *conf = mddev->private;
|
|
|
int cnt, raid_disks;
|
|
|
unsigned long flags;
|
|
|
int d, d2;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ memset(&newpool, 0, sizeof(newpool));
|
|
|
+ memset(&oldpool, 0, sizeof(oldpool));
|
|
|
|
|
|
/* Cannot change chunk_size, layout, or level */
|
|
|
if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
|
|
@@ -3237,17 +3235,17 @@ static int raid1_reshape(struct mddev *mddev)
|
|
|
newpoolinfo->mddev = mddev;
|
|
|
newpoolinfo->raid_disks = raid_disks * 2;
|
|
|
|
|
|
- newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
|
|
|
- r1bio_pool_free, newpoolinfo);
|
|
|
- if (!newpool) {
|
|
|
+ ret = mempool_init(&newpool, NR_RAID1_BIOS, r1bio_pool_alloc,
|
|
|
+ r1bio_pool_free, newpoolinfo);
|
|
|
+ if (ret) {
|
|
|
kfree(newpoolinfo);
|
|
|
- return -ENOMEM;
|
|
|
+ return ret;
|
|
|
}
|
|
|
newmirrors = kzalloc(sizeof(struct raid1_info) * raid_disks * 2,
|
|
|
GFP_KERNEL);
|
|
|
if (!newmirrors) {
|
|
|
kfree(newpoolinfo);
|
|
|
- mempool_destroy(newpool);
|
|
|
+ mempool_exit(&newpool);
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
|
|
@@ -3287,7 +3285,7 @@ static int raid1_reshape(struct mddev *mddev)
|
|
|
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
|
|
|
md_wakeup_thread(mddev->thread);
|
|
|
|
|
|
- mempool_destroy(oldpool);
|
|
|
+ mempool_exit(&oldpool);
|
|
|
return 0;
|
|
|
}
|
|
|
|