|
@@ -1476,6 +1476,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
|
|
|
{
|
|
|
char b[BDEVNAME_SIZE];
|
|
|
struct r1conf *conf = mddev->private;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
/*
|
|
|
* If it is not operational, then we have already marked it as dead
|
|
@@ -1495,14 +1496,13 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
|
|
|
return;
|
|
|
}
|
|
|
set_bit(Blocked, &rdev->flags);
|
|
|
+ spin_lock_irqsave(&conf->device_lock, flags);
|
|
|
if (test_and_clear_bit(In_sync, &rdev->flags)) {
|
|
|
- unsigned long flags;
|
|
|
- spin_lock_irqsave(&conf->device_lock, flags);
|
|
|
mddev->degraded++;
|
|
|
set_bit(Faulty, &rdev->flags);
|
|
|
- spin_unlock_irqrestore(&conf->device_lock, flags);
|
|
|
} else
|
|
|
set_bit(Faulty, &rdev->flags);
|
|
|
+ spin_unlock_irqrestore(&conf->device_lock, flags);
|
|
|
/*
|
|
|
* if recovery is running, make sure it aborts.
|
|
|
*/
|
|
@@ -1568,7 +1568,10 @@ static int raid1_spare_active(struct mddev *mddev)
|
|
|
* Find all failed disks within the RAID1 configuration
|
|
|
* and mark them readable.
|
|
|
* Called under mddev lock, so rcu protection not needed.
|
|
|
+ * device_lock used to avoid races with raid1_end_read_request
|
|
|
+ * which expects 'In_sync' flags and ->degraded to be consistent.
|
|
|
*/
|
|
|
+ spin_lock_irqsave(&conf->device_lock, flags);
|
|
|
for (i = 0; i < conf->raid_disks; i++) {
|
|
|
struct md_rdev *rdev = conf->mirrors[i].rdev;
|
|
|
struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
|
|
@@ -1599,7 +1602,6 @@ static int raid1_spare_active(struct mddev *mddev)
|
|
|
sysfs_notify_dirent_safe(rdev->sysfs_state);
|
|
|
}
|
|
|
}
|
|
|
- spin_lock_irqsave(&conf->device_lock, flags);
|
|
|
mddev->degraded -= count;
|
|
|
spin_unlock_irqrestore(&conf->device_lock, flags);
|
|
|
|