|
@@ -273,10 +273,9 @@ EXPORT_SYMBOL(blk_mq_alloc_request);
|
|
|
struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
|
|
|
unsigned int flags, unsigned int hctx_idx)
|
|
|
{
|
|
|
- struct blk_mq_hw_ctx *hctx;
|
|
|
- struct blk_mq_ctx *ctx;
|
|
|
+ struct blk_mq_alloc_data alloc_data = { .flags = flags };
|
|
|
struct request *rq;
|
|
|
- struct blk_mq_alloc_data alloc_data;
|
|
|
+ unsigned int cpu;
|
|
|
int ret;
|
|
|
|
|
|
/*
|
|
@@ -299,25 +298,23 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
|
|
|
* Check if the hardware context is actually mapped to anything.
|
|
|
* If not tell the caller that it should skip this queue.
|
|
|
*/
|
|
|
- hctx = q->queue_hw_ctx[hctx_idx];
|
|
|
- if (!blk_mq_hw_queue_mapped(hctx)) {
|
|
|
- ret = -EXDEV;
|
|
|
- goto out_queue_exit;
|
|
|
+ alloc_data.hctx = q->queue_hw_ctx[hctx_idx];
|
|
|
+ if (!blk_mq_hw_queue_mapped(alloc_data.hctx)) {
|
|
|
+ blk_queue_exit(q);
|
|
|
+ return ERR_PTR(-EXDEV);
|
|
|
}
|
|
|
- ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask));
|
|
|
+ cpu = cpumask_first(alloc_data.hctx->cpumask);
|
|
|
+ alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
|
|
|
|
|
|
- blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
|
|
|
- rq = __blk_mq_alloc_request(&alloc_data, rw);
|
|
|
- if (!rq) {
|
|
|
- ret = -EWOULDBLOCK;
|
|
|
- goto out_queue_exit;
|
|
|
- }
|
|
|
-
|
|
|
- return rq;
|
|
|
+ rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data);
|
|
|
|
|
|
-out_queue_exit:
|
|
|
+ blk_mq_put_ctx(alloc_data.ctx);
|
|
|
blk_queue_exit(q);
|
|
|
- return ERR_PTR(ret);
|
|
|
+
|
|
|
+ if (!rq)
|
|
|
+ return ERR_PTR(-EWOULDBLOCK);
|
|
|
+
|
|
|
+ return rq;
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
|
|
|
|