|
@@ -1508,6 +1508,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
|
|
|
*/
|
|
|
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
|
|
|
set_bit(MD_CHANGE_DEVS, &mddev->flags);
|
|
|
+ set_bit(MD_CHANGE_PENDING, &mddev->flags);
|
|
|
printk(KERN_ALERT
|
|
|
"md/raid1:%s: Disk failure on %s, disabling device.\n"
|
|
|
"md/raid1:%s: Operation continuing on %d devices.\n",
|
|
@@ -2289,6 +2290,7 @@ static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio
|
|
|
static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
|
|
|
{
|
|
|
int m;
|
|
|
+ bool fail = false;
|
|
|
for (m = 0; m < conf->raid_disks * 2 ; m++)
|
|
|
if (r1_bio->bios[m] == IO_MADE_GOOD) {
|
|
|
struct md_rdev *rdev = conf->mirrors[m].rdev;
|
|
@@ -2301,6 +2303,7 @@ static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
|
|
|
* narrow down and record precise write
|
|
|
* errors.
|
|
|
*/
|
|
|
+ fail = true;
|
|
|
if (!narrow_write_error(r1_bio, m)) {
|
|
|
md_error(conf->mddev,
|
|
|
conf->mirrors[m].rdev);
|
|
@@ -2312,7 +2315,13 @@ static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
|
|
|
}
|
|
|
if (test_bit(R1BIO_WriteError, &r1_bio->state))
|
|
|
close_write(r1_bio);
|
|
|
- raid_end_bio_io(r1_bio);
|
|
|
+ if (fail) {
|
|
|
+ spin_lock_irq(&conf->device_lock);
|
|
|
+ list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
|
|
|
+ spin_unlock_irq(&conf->device_lock);
|
|
|
+ md_wakeup_thread(conf->mddev->thread);
|
|
|
+ } else
|
|
|
+ raid_end_bio_io(r1_bio);
|
|
|
}
|
|
|
|
|
|
static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
|
|
@@ -2418,6 +2427,23 @@ static void raid1d(struct md_thread *thread)
|
|
|
|
|
|
md_check_recovery(mddev);
|
|
|
|
|
|
+ if (!list_empty_careful(&conf->bio_end_io_list) &&
|
|
|
+ !test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
|
|
|
+ LIST_HEAD(tmp);
|
|
|
+ spin_lock_irqsave(&conf->device_lock, flags);
|
|
|
+ if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
|
|
|
+ list_add(&tmp, &conf->bio_end_io_list);
|
|
|
+ list_del_init(&conf->bio_end_io_list);
|
|
|
+ }
|
|
|
+ spin_unlock_irqrestore(&conf->device_lock, flags);
|
|
|
+ while (!list_empty(&tmp)) {
|
|
|
+ r1_bio = list_first_entry(&conf->bio_end_io_list,
|
|
|
+ struct r1bio, retry_list);
|
|
|
+ list_del(&r1_bio->retry_list);
|
|
|
+ raid_end_bio_io(r1_bio);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
blk_start_plug(&plug);
|
|
|
for (;;) {
|
|
|
|
|
@@ -2819,6 +2845,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
|
|
|
conf->raid_disks = mddev->raid_disks;
|
|
|
conf->mddev = mddev;
|
|
|
INIT_LIST_HEAD(&conf->retry_list);
|
|
|
+ INIT_LIST_HEAD(&conf->bio_end_io_list);
|
|
|
|
|
|
spin_lock_init(&conf->resync_lock);
|
|
|
init_waitqueue_head(&conf->wait_barrier);
|