|
@@ -1401,6 +1401,22 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
|
|
|
blk_mq_hctx_mark_pending(hctx, ctx);
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Should only be used carefully, when the caller knows we want to
|
|
|
+ * bypass a potential IO scheduler on the target device.
|
|
|
+ */
|
|
|
+void blk_mq_request_bypass_insert(struct request *rq)
|
|
|
+{
|
|
|
+ struct blk_mq_ctx *ctx = rq->mq_ctx;
|
|
|
+ struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu);
|
|
|
+
|
|
|
+ spin_lock(&hctx->lock);
|
|
|
+ list_add_tail(&rq->queuelist, &hctx->dispatch);
|
|
|
+ spin_unlock(&hctx->lock);
|
|
|
+
|
|
|
+ blk_mq_run_hw_queue(hctx, false);
|
|
|
+}
|
|
|
+
|
|
|
void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
|
|
|
struct list_head *list)
|
|
|
|