|
@@ -2695,7 +2695,8 @@ static ssize_t new_offset_store(struct md_rdev *rdev,
|
|
|
if (kstrtoull(buf, 10, &new_offset) < 0)
|
|
|
return -EINVAL;
|
|
|
|
|
|
- if (mddev->sync_thread)
|
|
|
+ if (mddev->sync_thread ||
|
|
|
+ test_bit(MD_RECOVERY_RUNNING,&mddev->recovery))
|
|
|
return -EBUSY;
|
|
|
if (new_offset == rdev->data_offset)
|
|
|
/* reset is always permitted */
|
|
@@ -3272,6 +3273,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
|
|
|
*/
|
|
|
|
|
|
if (mddev->sync_thread ||
|
|
|
+ test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
|
|
|
mddev->reshape_position != MaxSector ||
|
|
|
mddev->sysfs_active)
|
|
|
return -EBUSY;
|
|
@@ -4026,6 +4028,7 @@ action_store(struct mddev *mddev, const char *page, size_t len)
|
|
|
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
|
|
|
|
|
|
if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
|
|
|
+ flush_workqueue(md_misc_wq);
|
|
|
if (mddev->sync_thread) {
|
|
|
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
|
|
|
md_reap_sync_thread(mddev);
|
|
@@ -5044,6 +5047,7 @@ static void md_clean(struct mddev *mddev)
|
|
|
static void __md_stop_writes(struct mddev *mddev)
|
|
|
{
|
|
|
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
|
|
|
+ flush_workqueue(md_misc_wq);
|
|
|
if (mddev->sync_thread) {
|
|
|
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
|
|
|
md_reap_sync_thread(mddev);
|
|
@@ -5104,19 +5108,22 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
|
|
|
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
|
|
|
md_wakeup_thread(mddev->thread);
|
|
|
}
|
|
|
- if (mddev->sync_thread) {
|
|
|
+ if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
|
|
|
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
|
|
|
+ if (mddev->sync_thread)
|
|
|
/* Thread might be blocked waiting for metadata update
|
|
|
* which will now never happen */
|
|
|
wake_up_process(mddev->sync_thread->tsk);
|
|
|
- }
|
|
|
+
|
|
|
mddev_unlock(mddev);
|
|
|
- wait_event(resync_wait, mddev->sync_thread == NULL);
|
|
|
+ wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
|
|
|
+ &mddev->recovery));
|
|
|
mddev_lock_nointr(mddev);
|
|
|
|
|
|
mutex_lock(&mddev->open_mutex);
|
|
|
if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
|
|
|
mddev->sync_thread ||
|
|
|
+ test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
|
|
|
(bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
|
|
|
printk("md: %s still in use.\n",mdname(mddev));
|
|
|
if (did_freeze) {
|
|
@@ -5162,20 +5169,24 @@ static int do_md_stop(struct mddev *mddev, int mode,
|
|
|
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
|
|
|
md_wakeup_thread(mddev->thread);
|
|
|
}
|
|
|
- if (mddev->sync_thread) {
|
|
|
+ if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
|
|
|
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
|
|
|
+ if (mddev->sync_thread)
|
|
|
/* Thread might be blocked waiting for metadata update
|
|
|
* which will now never happen */
|
|
|
wake_up_process(mddev->sync_thread->tsk);
|
|
|
- }
|
|
|
+
|
|
|
mddev_unlock(mddev);
|
|
|
- wait_event(resync_wait, mddev->sync_thread == NULL);
|
|
|
+ wait_event(resync_wait, (mddev->sync_thread == NULL &&
|
|
|
+ !test_bit(MD_RECOVERY_RUNNING,
|
|
|
+ &mddev->recovery)));
|
|
|
mddev_lock_nointr(mddev);
|
|
|
|
|
|
mutex_lock(&mddev->open_mutex);
|
|
|
if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
|
|
|
mddev->sysfs_active ||
|
|
|
mddev->sync_thread ||
|
|
|
+ test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
|
|
|
(bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
|
|
|
printk("md: %s still in use.\n",mdname(mddev));
|
|
|
mutex_unlock(&mddev->open_mutex);
|
|
@@ -5950,7 +5961,8 @@ static int update_size(struct mddev *mddev, sector_t num_sectors)
|
|
|
* of each device. If num_sectors is zero, we find the largest size
|
|
|
* that fits.
|
|
|
*/
|
|
|
- if (mddev->sync_thread)
|
|
|
+ if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
|
|
|
+ mddev->sync_thread)
|
|
|
return -EBUSY;
|
|
|
if (mddev->ro)
|
|
|
return -EROFS;
|
|
@@ -5981,7 +5993,9 @@ static int update_raid_disks(struct mddev *mddev, int raid_disks)
|
|
|
if (raid_disks <= 0 ||
|
|
|
(mddev->max_disks && raid_disks >= mddev->max_disks))
|
|
|
return -EINVAL;
|
|
|
- if (mddev->sync_thread || mddev->reshape_position != MaxSector)
|
|
|
+ if (mddev->sync_thread ||
|
|
|
+ test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
|
|
|
+ mddev->reshape_position != MaxSector)
|
|
|
return -EBUSY;
|
|
|
|
|
|
rdev_for_each(rdev, mddev) {
|
|
@@ -7593,6 +7607,7 @@ static void md_start_sync(struct work_struct *ws)
|
|
|
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
|
|
|
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
|
|
|
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
|
|
|
+ wake_up(&resync_wait);
|
|
|
if (test_and_clear_bit(MD_RECOVERY_RECOVER,
|
|
|
&mddev->recovery))
|
|
|
if (mddev->sysfs_action)
|
|
@@ -7761,6 +7776,7 @@ void md_check_recovery(struct mddev *mddev)
|
|
|
not_running:
|
|
|
if (!mddev->sync_thread) {
|
|
|
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
|
|
|
+ wake_up(&resync_wait);
|
|
|
if (test_and_clear_bit(MD_RECOVERY_RECOVER,
|
|
|
&mddev->recovery))
|
|
|
if (mddev->sysfs_action)
|
|
@@ -7779,7 +7795,6 @@ void md_reap_sync_thread(struct mddev *mddev)
|
|
|
|
|
|
/* resync has finished, collect result */
|
|
|
md_unregister_thread(&mddev->sync_thread);
|
|
|
- wake_up(&resync_wait);
|
|
|
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
|
|
|
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
|
|
|
/* success...*/
|
|
@@ -7807,6 +7822,7 @@ void md_reap_sync_thread(struct mddev *mddev)
|
|
|
clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
|
|
|
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
|
|
|
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
|
|
|
+ wake_up(&resync_wait);
|
|
|
/* flag recovery needed just to double check */
|
|
|
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
|
|
|
sysfs_notify_dirent_safe(mddev->sysfs_action);
|