|
|
@@ -5560,11 +5560,6 @@ int md_run(struct mddev *mddev)
|
|
|
if (start_readonly && mddev->ro == 0)
|
|
|
mddev->ro = 2; /* read-only, but switch on first write */
|
|
|
|
|
|
- /*
|
|
|
- * NOTE: some pers->run(), for example r5l_recovery_log(), wakes
|
|
|
- * up mddev->thread. It is important to initialize critical
|
|
|
- * resources for mddev->thread BEFORE calling pers->run().
|
|
|
- */
|
|
|
err = pers->run(mddev);
|
|
|
if (err)
|
|
|
pr_warn("md: pers->run() failed ...\n");
|
|
|
@@ -5678,6 +5673,9 @@ static int do_md_run(struct mddev *mddev)
|
|
|
if (mddev_is_clustered(mddev))
|
|
|
md_allow_write(mddev);
|
|
|
|
|
|
+ /* run start up tasks that require md_thread */
|
|
|
+ md_start(mddev);
|
|
|
+
|
|
|
md_wakeup_thread(mddev->thread);
|
|
|
md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
|
|
|
|
|
|
@@ -5689,6 +5687,21 @@ out:
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
+int md_start(struct mddev *mddev)
|
|
|
+{
|
|
|
+ int ret = 0;
|
|
|
+
|
|
|
+ if (mddev->pers->start) {
|
|
|
+ set_bit(MD_RECOVERY_WAIT, &mddev->recovery);
|
|
|
+ md_wakeup_thread(mddev->thread);
|
|
|
+ ret = mddev->pers->start(mddev);
|
|
|
+ clear_bit(MD_RECOVERY_WAIT, &mddev->recovery);
|
|
|
+ md_wakeup_thread(mddev->sync_thread);
|
|
|
+ }
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(md_start);
|
|
|
+
|
|
|
static int restart_array(struct mddev *mddev)
|
|
|
{
|
|
|
struct gendisk *disk = mddev->gendisk;
|
|
|
@@ -8169,7 +8182,8 @@ void md_do_sync(struct md_thread *thread)
|
|
|
int ret;
|
|
|
|
|
|
/* just incase thread restarts... */
|
|
|
- if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
|
|
|
+ if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
|
|
|
+ test_bit(MD_RECOVERY_WAIT, &mddev->recovery))
|
|
|
return;
|
|
|
if (mddev->ro) {/* never try to sync a read-only array */
|
|
|
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
|