|
@@ -8022,18 +8022,15 @@ EXPORT_SYMBOL(md_write_end);
|
|
|
* may proceed without blocking. It is important to call this before
|
|
* may proceed without blocking. It is important to call this before
|
|
|
* attempting a GFP_KERNEL allocation while holding the mddev lock.
|
|
* attempting a GFP_KERNEL allocation while holding the mddev lock.
|
|
|
* Must be called with mddev_lock held.
|
|
* Must be called with mddev_lock held.
|
|
|
- *
|
|
|
|
|
- * In the ->external case MD_SB_CHANGE_PENDING can not be cleared until mddev->lock
|
|
|
|
|
- * is dropped, so return -EAGAIN after notifying userspace.
|
|
|
|
|
*/
|
|
*/
|
|
|
-int md_allow_write(struct mddev *mddev)
|
|
|
|
|
|
|
+void md_allow_write(struct mddev *mddev)
|
|
|
{
|
|
{
|
|
|
if (!mddev->pers)
|
|
if (!mddev->pers)
|
|
|
- return 0;
|
|
|
|
|
|
|
+ return;
|
|
|
if (mddev->ro)
|
|
if (mddev->ro)
|
|
|
- return 0;
|
|
|
|
|
|
|
+ return;
|
|
|
if (!mddev->pers->sync_request)
|
|
if (!mddev->pers->sync_request)
|
|
|
- return 0;
|
|
|
|
|
|
|
+ return;
|
|
|
|
|
|
|
|
spin_lock(&mddev->lock);
|
|
spin_lock(&mddev->lock);
|
|
|
if (mddev->in_sync) {
|
|
if (mddev->in_sync) {
|
|
@@ -8046,13 +8043,12 @@ int md_allow_write(struct mddev *mddev)
|
|
|
spin_unlock(&mddev->lock);
|
|
spin_unlock(&mddev->lock);
|
|
|
md_update_sb(mddev, 0);
|
|
md_update_sb(mddev, 0);
|
|
|
sysfs_notify_dirent_safe(mddev->sysfs_state);
|
|
sysfs_notify_dirent_safe(mddev->sysfs_state);
|
|
|
|
|
+ /* wait for the dirty state to be recorded in the metadata */
|
|
|
|
|
+ wait_event(mddev->sb_wait,
|
|
|
|
|
+ !test_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags) &&
|
|
|
|
|
+ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
|
|
|
} else
|
|
} else
|
|
|
spin_unlock(&mddev->lock);
|
|
spin_unlock(&mddev->lock);
|
|
|
-
|
|
|
|
|
- if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
|
|
|
|
|
- return -EAGAIN;
|
|
|
|
|
- else
|
|
|
|
|
- return 0;
|
|
|
|
|
}
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(md_allow_write);
|
|
EXPORT_SYMBOL_GPL(md_allow_write);
|
|
|
|
|
|