|
@@ -266,6 +266,37 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
|
|
|
* call has finished, the bio has been linked into some internal structure
|
|
|
* and so is visible to ->quiesce(), so we don't need the refcount any more.
|
|
|
*/
|
|
|
+void md_handle_request(struct mddev *mddev, struct bio *bio)
|
|
|
+{
|
|
|
+check_suspended:
|
|
|
+ rcu_read_lock();
|
|
|
+ if (mddev->suspended) {
|
|
|
+ DEFINE_WAIT(__wait);
|
|
|
+ for (;;) {
|
|
|
+ prepare_to_wait(&mddev->sb_wait, &__wait,
|
|
|
+ TASK_UNINTERRUPTIBLE);
|
|
|
+ if (!mddev->suspended)
|
|
|
+ break;
|
|
|
+ rcu_read_unlock();
|
|
|
+ schedule();
|
|
|
+ rcu_read_lock();
|
|
|
+ }
|
|
|
+ finish_wait(&mddev->sb_wait, &__wait);
|
|
|
+ }
|
|
|
+ atomic_inc(&mddev->active_io);
|
|
|
+ rcu_read_unlock();
|
|
|
+
|
|
|
+ if (!mddev->pers->make_request(mddev, bio)) {
|
|
|
+ atomic_dec(&mddev->active_io);
|
|
|
+ wake_up(&mddev->sb_wait);
|
|
|
+ goto check_suspended;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
|
|
|
+ wake_up(&mddev->sb_wait);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(md_handle_request);
|
|
|
+
|
|
|
static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
|
|
|
{
|
|
|
const int rw = bio_data_dir(bio);
|
|
@@ -285,23 +316,6 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
|
|
|
bio_endio(bio);
|
|
|
return BLK_QC_T_NONE;
|
|
|
}
|
|
|
-check_suspended:
|
|
|
- rcu_read_lock();
|
|
|
- if (mddev->suspended) {
|
|
|
- DEFINE_WAIT(__wait);
|
|
|
- for (;;) {
|
|
|
- prepare_to_wait(&mddev->sb_wait, &__wait,
|
|
|
- TASK_UNINTERRUPTIBLE);
|
|
|
- if (!mddev->suspended)
|
|
|
- break;
|
|
|
- rcu_read_unlock();
|
|
|
- schedule();
|
|
|
- rcu_read_lock();
|
|
|
- }
|
|
|
- finish_wait(&mddev->sb_wait, &__wait);
|
|
|
- }
|
|
|
- atomic_inc(&mddev->active_io);
|
|
|
- rcu_read_unlock();
|
|
|
|
|
|
/*
|
|
|
* save the sectors now since our bio can
|
|
@@ -310,20 +324,14 @@ check_suspended:
|
|
|
sectors = bio_sectors(bio);
|
|
|
/* bio could be mergeable after passing to underlayer */
|
|
|
bio->bi_opf &= ~REQ_NOMERGE;
|
|
|
- if (!mddev->pers->make_request(mddev, bio)) {
|
|
|
- atomic_dec(&mddev->active_io);
|
|
|
- wake_up(&mddev->sb_wait);
|
|
|
- goto check_suspended;
|
|
|
- }
|
|
|
+
|
|
|
+ md_handle_request(mddev, bio);
|
|
|
|
|
|
cpu = part_stat_lock();
|
|
|
part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
|
|
|
part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
|
|
|
part_stat_unlock();
|
|
|
|
|
|
- if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
|
|
|
- wake_up(&mddev->sb_wait);
|
|
|
-
|
|
|
return BLK_QC_T_NONE;
|
|
|
}
|
|
|
|
|
@@ -439,16 +447,22 @@ static void md_submit_flush_data(struct work_struct *ws)
|
|
|
struct mddev *mddev = container_of(ws, struct mddev, flush_work);
|
|
|
struct bio *bio = mddev->flush_bio;
|
|
|
|
|
|
+ /*
|
|
|
+ * must reset flush_bio before calling into md_handle_request to avoid a
|
|
|
+ * deadlock, because other bios passed md_handle_request suspend check
|
|
|
+ * could wait for this and below md_handle_request could wait for those
|
|
|
+ * bios because of suspend check
|
|
|
+ */
|
|
|
+ mddev->flush_bio = NULL;
|
|
|
+ wake_up(&mddev->sb_wait);
|
|
|
+
|
|
|
if (bio->bi_iter.bi_size == 0)
|
|
|
/* an empty barrier - all done */
|
|
|
bio_endio(bio);
|
|
|
else {
|
|
|
bio->bi_opf &= ~REQ_PREFLUSH;
|
|
|
- mddev->pers->make_request(mddev, bio);
|
|
|
+ md_handle_request(mddev, bio);
|
|
|
}
|
|
|
-
|
|
|
- mddev->flush_bio = NULL;
|
|
|
- wake_up(&mddev->sb_wait);
|
|
|
}
|
|
|
|
|
|
void md_flush_request(struct mddev *mddev, struct bio *bio)
|