|
@@ -267,6 +267,45 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
|
|
|
}
|
|
|
EXPORT_SYMBOL(blk_mq_alloc_request);
|
|
|
|
|
|
+struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
|
|
|
+ unsigned int flags, unsigned int hctx_idx)
|
|
|
+{
|
|
|
+ struct blk_mq_hw_ctx *hctx;
|
|
|
+ struct blk_mq_ctx *ctx;
|
|
|
+ struct request *rq;
|
|
|
+ struct blk_mq_alloc_data alloc_data;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If the tag allocator sleeps we could get an allocation for a
|
|
|
+ * different hardware context. No need to complicate the low level
|
|
|
+ * allocator for this for the rare use case of a command tied to
|
|
|
+ * a specific queue.
|
|
|
+ */
|
|
|
+ if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)))
|
|
|
+ return ERR_PTR(-EINVAL);
|
|
|
+
|
|
|
+ if (hctx_idx >= q->nr_hw_queues)
|
|
|
+ return ERR_PTR(-EIO);
|
|
|
+
|
|
|
+ ret = blk_queue_enter(q, true);
|
|
|
+ if (ret)
|
|
|
+ return ERR_PTR(ret);
|
|
|
+
|
|
|
+ hctx = q->queue_hw_ctx[hctx_idx];
|
|
|
+ ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask));
|
|
|
+
|
|
|
+ blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
|
|
|
+ rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
|
|
|
+ if (!rq) {
|
|
|
+ blk_queue_exit(q);
|
|
|
+ return ERR_PTR(-EWOULDBLOCK);
|
|
|
+ }
|
|
|
+
|
|
|
+ return rq;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
|
|
|
+
|
|
|
static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
|
|
|
struct blk_mq_ctx *ctx, struct request *rq)
|
|
|
{
|