|
|
@@ -8180,15 +8180,34 @@ static int remove_and_add_spares(struct mddev *mddev,
|
|
|
struct md_rdev *rdev;
|
|
|
int spares = 0;
|
|
|
int removed = 0;
|
|
|
+ bool remove_some = false;
|
|
|
|
|
|
- rdev_for_each(rdev, mddev)
|
|
|
+ rdev_for_each(rdev, mddev) {
|
|
|
+ if ((this == NULL || rdev == this) &&
|
|
|
+ rdev->raid_disk >= 0 &&
|
|
|
+ !test_bit(Blocked, &rdev->flags) &&
|
|
|
+ test_bit(Faulty, &rdev->flags) &&
|
|
|
+ atomic_read(&rdev->nr_pending)==0) {
|
|
|
+ /* Faulty non-Blocked devices with nr_pending == 0
|
|
|
+ * never get nr_pending incremented,
|
|
|
+ * never get Faulty cleared, and never get Blocked set.
|
|
|
+ * So we can synchronize_rcu now rather than once per device
|
|
|
+ */
|
|
|
+ remove_some = true;
|
|
|
+ set_bit(RemoveSynchronized, &rdev->flags);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (remove_some)
|
|
|
+ synchronize_rcu();
|
|
|
+ rdev_for_each(rdev, mddev) {
|
|
|
if ((this == NULL || rdev == this) &&
|
|
|
rdev->raid_disk >= 0 &&
|
|
|
!test_bit(Blocked, &rdev->flags) &&
|
|
|
- (test_bit(Faulty, &rdev->flags) ||
|
|
|
+ ((test_bit(RemoveSynchronized, &rdev->flags) ||
|
|
|
(!test_bit(In_sync, &rdev->flags) &&
|
|
|
!test_bit(Journal, &rdev->flags))) &&
|
|
|
- atomic_read(&rdev->nr_pending)==0) {
|
|
|
+ atomic_read(&rdev->nr_pending)==0)) {
|
|
|
if (mddev->pers->hot_remove_disk(
|
|
|
mddev, rdev) == 0) {
|
|
|
sysfs_unlink_rdev(mddev, rdev);
|
|
|
@@ -8196,6 +8215,10 @@ static int remove_and_add_spares(struct mddev *mddev,
|
|
|
removed++;
|
|
|
}
|
|
|
}
|
|
|
+ if (remove_some && test_bit(RemoveSynchronized, &rdev->flags))
|
|
|
+ clear_bit(RemoveSynchronized, &rdev->flags);
|
|
|
+ }
|
|
|
+
|
|
|
if (removed && mddev->kobj.sd)
|
|
|
sysfs_notify(&mddev->kobj, NULL, "degraded");
|
|
|
|