|
@@ -787,6 +787,30 @@ static int raid1_congested(struct mddev *mddev, int bits)
|
|
|
return ret;
|
|
return ret;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
+static void flush_bio_list(struct r1conf *conf, struct bio *bio)
|
|
|
|
|
+{
|
|
|
|
|
+ /* flush any pending bitmap writes to disk before proceeding w/ I/O */
|
|
|
|
|
+ bitmap_unplug(conf->mddev->bitmap);
|
|
|
|
|
+ wake_up(&conf->wait_barrier);
|
|
|
|
|
+
|
|
|
|
|
+ while (bio) { /* submit pending writes */
|
|
|
|
|
+ struct bio *next = bio->bi_next;
|
|
|
|
|
+ struct md_rdev *rdev = (void*)bio->bi_bdev;
|
|
|
|
|
+ bio->bi_next = NULL;
|
|
|
|
|
+ bio->bi_bdev = rdev->bdev;
|
|
|
|
|
+ if (test_bit(Faulty, &rdev->flags)) {
|
|
|
|
|
+ bio->bi_error = -EIO;
|
|
|
|
|
+ bio_endio(bio);
|
|
|
|
|
+ } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
|
|
|
|
|
+ !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
|
|
|
|
|
+ /* Just ignore it */
|
|
|
|
|
+ bio_endio(bio);
|
|
|
|
|
+ else
|
|
|
|
|
+ generic_make_request(bio);
|
|
|
|
|
+ bio = next;
|
|
|
|
|
+ }
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
static void flush_pending_writes(struct r1conf *conf)
|
|
static void flush_pending_writes(struct r1conf *conf)
|
|
|
{
|
|
{
|
|
|
/* Any writes that have been queued but are awaiting
|
|
/* Any writes that have been queued but are awaiting
|
|
@@ -799,27 +823,7 @@ static void flush_pending_writes(struct r1conf *conf)
|
|
|
bio = bio_list_get(&conf->pending_bio_list);
|
|
bio = bio_list_get(&conf->pending_bio_list);
|
|
|
conf->pending_count = 0;
|
|
conf->pending_count = 0;
|
|
|
spin_unlock_irq(&conf->device_lock);
|
|
spin_unlock_irq(&conf->device_lock);
|
|
|
- /* flush any pending bitmap writes to
|
|
|
|
|
- * disk before proceeding w/ I/O */
|
|
|
|
|
- bitmap_unplug(conf->mddev->bitmap);
|
|
|
|
|
- wake_up(&conf->wait_barrier);
|
|
|
|
|
-
|
|
|
|
|
- while (bio) { /* submit pending writes */
|
|
|
|
|
- struct bio *next = bio->bi_next;
|
|
|
|
|
- struct md_rdev *rdev = (void*)bio->bi_bdev;
|
|
|
|
|
- bio->bi_next = NULL;
|
|
|
|
|
- bio->bi_bdev = rdev->bdev;
|
|
|
|
|
- if (test_bit(Faulty, &rdev->flags)) {
|
|
|
|
|
- bio->bi_error = -EIO;
|
|
|
|
|
- bio_endio(bio);
|
|
|
|
|
- } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
|
|
|
|
|
- !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
|
|
|
|
|
- /* Just ignore it */
|
|
|
|
|
- bio_endio(bio);
|
|
|
|
|
- else
|
|
|
|
|
- generic_make_request(bio);
|
|
|
|
|
- bio = next;
|
|
|
|
|
- }
|
|
|
|
|
|
|
+ flush_bio_list(conf, bio);
|
|
|
} else
|
|
} else
|
|
|
spin_unlock_irq(&conf->device_lock);
|
|
spin_unlock_irq(&conf->device_lock);
|
|
|
}
|
|
}
|
|
@@ -1152,25 +1156,7 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
|
|
|
|
|
|
|
|
/* we aren't scheduling, so we can do the write-out directly. */
|
|
/* we aren't scheduling, so we can do the write-out directly. */
|
|
|
bio = bio_list_get(&plug->pending);
|
|
bio = bio_list_get(&plug->pending);
|
|
|
- bitmap_unplug(mddev->bitmap);
|
|
|
|
|
- wake_up(&conf->wait_barrier);
|
|
|
|
|
-
|
|
|
|
|
- while (bio) { /* submit pending writes */
|
|
|
|
|
- struct bio *next = bio->bi_next;
|
|
|
|
|
- struct md_rdev *rdev = (void*)bio->bi_bdev;
|
|
|
|
|
- bio->bi_next = NULL;
|
|
|
|
|
- bio->bi_bdev = rdev->bdev;
|
|
|
|
|
- if (test_bit(Faulty, &rdev->flags)) {
|
|
|
|
|
- bio->bi_error = -EIO;
|
|
|
|
|
- bio_endio(bio);
|
|
|
|
|
- } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
|
|
|
|
|
- !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
|
|
|
|
|
- /* Just ignore it */
|
|
|
|
|
- bio_endio(bio);
|
|
|
|
|
- else
|
|
|
|
|
- generic_make_request(bio);
|
|
|
|
|
- bio = next;
|
|
|
|
|
- }
|
|
|
|
|
|
|
+ flush_bio_list(conf, bio);
|
|
|
kfree(plug);
|
|
kfree(plug);
|
|
|
}
|
|
}
|
|
|
|
|
|