|
@@ -130,6 +130,24 @@ static inline int speed_max(struct mddev *mddev)
|
|
|
mddev->sync_speed_max : sysctl_speed_limit_max;
|
|
|
}
|
|
|
|
|
|
+static void * flush_info_alloc(gfp_t gfp_flags, void *data)
|
|
|
+{
|
|
|
+ return kzalloc(sizeof(struct flush_info), gfp_flags);
|
|
|
+}
|
|
|
+static void flush_info_free(void *flush_info, void *data)
|
|
|
+{
|
|
|
+ kfree(flush_info);
|
|
|
+}
|
|
|
+
|
|
|
+static void * flush_bio_alloc(gfp_t gfp_flags, void *data)
|
|
|
+{
|
|
|
+ return kzalloc(sizeof(struct flush_bio), gfp_flags);
|
|
|
+}
|
|
|
+static void flush_bio_free(void *flush_bio, void *data)
|
|
|
+{
|
|
|
+ kfree(flush_bio);
|
|
|
+}
|
|
|
+
|
|
|
static struct ctl_table_header *raid_table_header;
|
|
|
|
|
|
static struct ctl_table raid_table[] = {
|
|
@@ -412,30 +430,53 @@ static int md_congested(void *data, int bits)
|
|
|
/*
|
|
|
* Generic flush handling for md
|
|
|
*/
|
|
|
+static void submit_flushes(struct work_struct *ws)
|
|
|
+{
|
|
|
+ struct flush_info *fi = container_of(ws, struct flush_info, flush_work);
|
|
|
+ struct mddev *mddev = fi->mddev;
|
|
|
+ struct bio *bio = fi->bio;
|
|
|
+
|
|
|
+ bio->bi_opf &= ~REQ_PREFLUSH;
|
|
|
+ md_handle_request(mddev, bio);
|
|
|
+
|
|
|
+ mempool_free(fi, mddev->flush_pool);
|
|
|
+}
|
|
|
|
|
|
-static void md_end_flush(struct bio *bio)
|
|
|
+static void md_end_flush(struct bio *fbio)
|
|
|
{
|
|
|
- struct md_rdev *rdev = bio->bi_private;
|
|
|
- struct mddev *mddev = rdev->mddev;
|
|
|
+ struct flush_bio *fb = fbio->bi_private;
|
|
|
+ struct md_rdev *rdev = fb->rdev;
|
|
|
+ struct flush_info *fi = fb->fi;
|
|
|
+ struct bio *bio = fi->bio;
|
|
|
+ struct mddev *mddev = fi->mddev;
|
|
|
|
|
|
rdev_dec_pending(rdev, mddev);
|
|
|
|
|
|
- if (atomic_dec_and_test(&mddev->flush_pending)) {
|
|
|
- /* The pre-request flush has finished */
|
|
|
- queue_work(md_wq, &mddev->flush_work);
|
|
|
+ if (atomic_dec_and_test(&fi->flush_pending)) {
|
|
|
+ if (bio->bi_iter.bi_size == 0)
|
|
|
+ /* an empty barrier - all done */
|
|
|
+ bio_endio(bio);
|
|
|
+ else {
|
|
|
+ INIT_WORK(&fi->flush_work, submit_flushes);
|
|
|
+ queue_work(md_wq, &fi->flush_work);
|
|
|
+ }
|
|
|
}
|
|
|
- bio_put(bio);
|
|
|
-}
|
|
|
|
|
|
-static void md_submit_flush_data(struct work_struct *ws);
|
|
|
+ mempool_free(fb, mddev->flush_bio_pool);
|
|
|
+ bio_put(fbio);
|
|
|
+}
|
|
|
|
|
|
-static void submit_flushes(struct work_struct *ws)
|
|
|
+void md_flush_request(struct mddev *mddev, struct bio *bio)
|
|
|
{
|
|
|
- struct mddev *mddev = container_of(ws, struct mddev, flush_work);
|
|
|
struct md_rdev *rdev;
|
|
|
+ struct flush_info *fi;
|
|
|
+
|
|
|
+ fi = mempool_alloc(mddev->flush_pool, GFP_NOIO);
|
|
|
+
|
|
|
+ fi->bio = bio;
|
|
|
+ fi->mddev = mddev;
|
|
|
+ atomic_set(&fi->flush_pending, 1);
|
|
|
|
|
|
- INIT_WORK(&mddev->flush_work, md_submit_flush_data);
|
|
|
- atomic_set(&mddev->flush_pending, 1);
|
|
|
rcu_read_lock();
|
|
|
rdev_for_each_rcu(rdev, mddev)
|
|
|
if (rdev->raid_disk >= 0 &&
|
|
@@ -445,59 +486,39 @@ static void submit_flushes(struct work_struct *ws)
|
|
|
* we reclaim rcu_read_lock
|
|
|
*/
|
|
|
struct bio *bi;
|
|
|
+ struct flush_bio *fb;
|
|
|
atomic_inc(&rdev->nr_pending);
|
|
|
atomic_inc(&rdev->nr_pending);
|
|
|
rcu_read_unlock();
|
|
|
+
|
|
|
+ fb = mempool_alloc(mddev->flush_bio_pool, GFP_NOIO);
|
|
|
+ fb->fi = fi;
|
|
|
+ fb->rdev = rdev;
|
|
|
+
|
|
|
bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
|
|
|
- bi->bi_end_io = md_end_flush;
|
|
|
- bi->bi_private = rdev;
|
|
|
bio_set_dev(bi, rdev->bdev);
|
|
|
+ bi->bi_end_io = md_end_flush;
|
|
|
+ bi->bi_private = fb;
|
|
|
bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
|
|
|
- atomic_inc(&mddev->flush_pending);
|
|
|
+
|
|
|
+ atomic_inc(&fi->flush_pending);
|
|
|
submit_bio(bi);
|
|
|
+
|
|
|
rcu_read_lock();
|
|
|
rdev_dec_pending(rdev, mddev);
|
|
|
}
|
|
|
rcu_read_unlock();
|
|
|
- if (atomic_dec_and_test(&mddev->flush_pending))
|
|
|
- queue_work(md_wq, &mddev->flush_work);
|
|
|
-}
|
|
|
-
|
|
|
-static void md_submit_flush_data(struct work_struct *ws)
|
|
|
-{
|
|
|
- struct mddev *mddev = container_of(ws, struct mddev, flush_work);
|
|
|
- struct bio *bio = mddev->flush_bio;
|
|
|
|
|
|
- /*
|
|
|
- * must reset flush_bio before calling into md_handle_request to avoid a
|
|
|
- * deadlock, because other bios passed md_handle_request suspend check
|
|
|
- * could wait for this and below md_handle_request could wait for those
|
|
|
- * bios because of suspend check
|
|
|
- */
|
|
|
- mddev->flush_bio = NULL;
|
|
|
- wake_up(&mddev->sb_wait);
|
|
|
-
|
|
|
- if (bio->bi_iter.bi_size == 0)
|
|
|
- /* an empty barrier - all done */
|
|
|
- bio_endio(bio);
|
|
|
- else {
|
|
|
- bio->bi_opf &= ~REQ_PREFLUSH;
|
|
|
- md_handle_request(mddev, bio);
|
|
|
+ if (atomic_dec_and_test(&fi->flush_pending)) {
|
|
|
+ if (bio->bi_iter.bi_size == 0)
|
|
|
+ /* an empty barrier - all done */
|
|
|
+ bio_endio(bio);
|
|
|
+ else {
|
|
|
+ INIT_WORK(&fi->flush_work, submit_flushes);
|
|
|
+ queue_work(md_wq, &fi->flush_work);
|
|
|
+ }
|
|
|
}
|
|
|
}
|
|
|
-
|
|
|
-void md_flush_request(struct mddev *mddev, struct bio *bio)
|
|
|
-{
|
|
|
- spin_lock_irq(&mddev->lock);
|
|
|
- wait_event_lock_irq(mddev->sb_wait,
|
|
|
- !mddev->flush_bio,
|
|
|
- mddev->lock);
|
|
|
- mddev->flush_bio = bio;
|
|
|
- spin_unlock_irq(&mddev->lock);
|
|
|
-
|
|
|
- INIT_WORK(&mddev->flush_work, submit_flushes);
|
|
|
- queue_work(md_wq, &mddev->flush_work);
|
|
|
-}
|
|
|
EXPORT_SYMBOL(md_flush_request);
|
|
|
|
|
|
static inline struct mddev *mddev_get(struct mddev *mddev)
|
|
@@ -555,7 +576,6 @@ void mddev_init(struct mddev *mddev)
|
|
|
atomic_set(&mddev->openers, 0);
|
|
|
atomic_set(&mddev->active_io, 0);
|
|
|
spin_lock_init(&mddev->lock);
|
|
|
- atomic_set(&mddev->flush_pending, 0);
|
|
|
init_waitqueue_head(&mddev->sb_wait);
|
|
|
init_waitqueue_head(&mddev->recovery_wait);
|
|
|
mddev->reshape_position = MaxSector;
|
|
@@ -5510,6 +5530,22 @@ int md_run(struct mddev *mddev)
|
|
|
goto abort;
|
|
|
}
|
|
|
}
|
|
|
+ if (mddev->flush_pool == NULL) {
|
|
|
+ mddev->flush_pool = mempool_create(NR_FLUSH_INFOS, flush_info_alloc,
|
|
|
+ flush_info_free, mddev);
|
|
|
+ if (!mddev->flush_pool) {
|
|
|
+ err = -ENOMEM;
|
|
|
+ goto abort;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ if (mddev->flush_bio_pool == NULL) {
|
|
|
+ mddev->flush_bio_pool = mempool_create(NR_FLUSH_BIOS, flush_bio_alloc,
|
|
|
+ flush_bio_free, mddev);
|
|
|
+ if (!mddev->flush_bio_pool) {
|
|
|
+ err = -ENOMEM;
|
|
|
+ goto abort;
|
|
|
+ }
|
|
|
+ }
|
|
|
|
|
|
spin_lock(&pers_lock);
|
|
|
pers = find_pers(mddev->level, mddev->clevel);
|
|
@@ -5669,6 +5705,14 @@ int md_run(struct mddev *mddev)
|
|
|
return 0;
|
|
|
|
|
|
abort:
|
|
|
+ if (mddev->flush_bio_pool) {
|
|
|
+ mempool_destroy(mddev->flush_bio_pool);
|
|
|
+ mddev->flush_bio_pool = NULL;
|
|
|
+ }
|
|
|
+ if (mddev->flush_pool){
|
|
|
+ mempool_destroy(mddev->flush_pool);
|
|
|
+ mddev->flush_pool = NULL;
|
|
|
+ }
|
|
|
if (mddev->bio_set) {
|
|
|
bioset_free(mddev->bio_set);
|
|
|
mddev->bio_set = NULL;
|
|
@@ -5889,6 +5933,14 @@ void md_stop(struct mddev *mddev)
|
|
|
* This is called from dm-raid
|
|
|
*/
|
|
|
__md_stop(mddev);
|
|
|
+ if (mddev->flush_bio_pool) {
|
|
|
+ mempool_destroy(mddev->flush_bio_pool);
|
|
|
+ mddev->flush_bio_pool = NULL;
|
|
|
+ }
|
|
|
+ if (mddev->flush_pool) {
|
|
|
+ mempool_destroy(mddev->flush_pool);
|
|
|
+ mddev->flush_pool = NULL;
|
|
|
+ }
|
|
|
if (mddev->bio_set) {
|
|
|
bioset_free(mddev->bio_set);
|
|
|
mddev->bio_set = NULL;
|