|
@@ -342,6 +342,34 @@ static void flush_data_end_io(struct request *rq, int error)
|
|
|
struct request_queue *q = rq->q;
|
|
|
struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
|
|
|
|
|
|
+ /*
|
|
|
+ * Updating q->in_flight[] here for making this tag usable
|
|
|
+ * early. Because in blk_queue_start_tag(),
|
|
|
+ * q->in_flight[BLK_RW_ASYNC] is used to limit async I/O and
|
|
|
+ * reserve tags for sync I/O.
|
|
|
+ *
|
|
|
+ * More importantly this way can avoid the following I/O
|
|
|
+ * deadlock:
|
|
|
+ *
|
|
|
+ * - suppose there are 40 fua requests comming to flush queue
|
|
|
+ * and queue depth is 31
|
|
|
+ * - 30 rqs are scheduled then blk_queue_start_tag() can't alloc
|
|
|
+ * tag for async I/O any more
|
|
|
+ * - all the 30 rqs are completed before FLUSH_PENDING_TIMEOUT
|
|
|
+ * and flush_data_end_io() is called
|
|
|
+ * - the other rqs still can't go ahead if not updating
|
|
|
+ * q->in_flight[BLK_RW_ASYNC] here, meantime these rqs
|
|
|
+ * are held in flush data queue and make no progress of
|
|
|
+ * handling post flush rq
|
|
|
+ * - only after the post flush rq is handled, all these rqs
|
|
|
+ * can be completed
|
|
|
+ */
|
|
|
+
|
|
|
+ elv_completed_request(q, rq);
|
|
|
+
|
|
|
+ /* for avoiding double accounting */
|
|
|
+ rq->cmd_flags &= ~REQ_STARTED;
|
|
|
+
|
|
|
/*
|
|
|
* After populating an empty queue, kick it to avoid stall. Read
|
|
|
* the comment in flush_end_io().
|