|
|
@@ -378,7 +378,15 @@ static void blk_mq_start_request(struct request *rq, bool last)
|
|
|
* REQ_ATOMIC_STARTED is seen.
|
|
|
*/
|
|
|
rq->deadline = jiffies + q->rq_timeout;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Mark us as started and clear complete. Complete might have been
|
|
|
+ * set if requeue raced with timeout, which then marked it as
|
|
|
+ * complete. So be sure to clear complete again when we start
|
|
|
+ * the request, otherwise we'll ignore the completion event.
|
|
|
+ */
|
|
|
set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
|
|
|
+ clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
|
|
|
|
|
|
if (q->dma_drain_size && blk_rq_bytes(rq)) {
|
|
|
/*
|
|
|
@@ -485,6 +493,28 @@ static void blk_mq_hw_ctx_check_timeout(struct blk_mq_hw_ctx *hctx,
|
|
|
blk_mq_tag_busy_iter(hctx->tags, blk_mq_timeout_check, &data);
|
|
|
}
|
|
|
|
|
|
+static enum blk_eh_timer_return blk_mq_rq_timed_out(struct request *rq)
|
|
|
+{
|
|
|
+ struct request_queue *q = rq->q;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * We know that complete is set at this point. If STARTED isn't set
|
|
|
+ * anymore, then the request isn't active and the "timeout" should
|
|
|
+ * just be ignored. This can happen due to the bitflag ordering.
|
|
|
+ * Timeout first checks if STARTED is set, and if it is, assumes
|
|
|
+ * the request is active. But if we race with completion, then
|
|
|
+ * we both flags will get cleared. So check here again, and ignore
|
|
|
+ * a timeout event with a request that isn't active.
|
|
|
+ */
|
|
|
+ if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
|
|
|
+ return BLK_EH_NOT_HANDLED;
|
|
|
+
|
|
|
+ if (!q->mq_ops->timeout)
|
|
|
+ return BLK_EH_RESET_TIMER;
|
|
|
+
|
|
|
+ return q->mq_ops->timeout(rq);
|
|
|
+}
|
|
|
+
|
|
|
static void blk_mq_rq_timer(unsigned long data)
|
|
|
{
|
|
|
struct request_queue *q = (struct request_queue *) data;
|
|
|
@@ -538,11 +568,6 @@ static bool blk_mq_attempt_merge(struct request_queue *q,
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
-void blk_mq_add_timer(struct request *rq)
|
|
|
-{
|
|
|
- __blk_add_timer(rq, NULL);
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* Run this hardware queue, pulling any software queues mapped to it in.
|
|
|
* Note that this function currently has various problems around ordering
|
|
|
@@ -799,7 +824,7 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
|
|
|
/*
|
|
|
* We do this early, to ensure we are on the right CPU.
|
|
|
*/
|
|
|
- blk_mq_add_timer(rq);
|
|
|
+ blk_add_timer(rq);
|
|
|
}
|
|
|
|
|
|
void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
|
|
|
@@ -1400,7 +1425,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
|
|
|
q->sg_reserved_size = INT_MAX;
|
|
|
|
|
|
blk_queue_make_request(q, blk_mq_make_request);
|
|
|
- blk_queue_rq_timed_out(q, set->ops->timeout);
|
|
|
+ blk_queue_rq_timed_out(q, blk_mq_rq_timed_out);
|
|
|
if (set->timeout)
|
|
|
blk_queue_rq_timeout(q, set->timeout);
|
|
|
|