|
@@ -1848,17 +1848,10 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
|
|
if (set->ops->complete)
|
|
if (set->ops->complete)
|
|
blk_queue_softirq_done(q, set->ops->complete);
|
|
blk_queue_softirq_done(q, set->ops->complete);
|
|
|
|
|
|
- blk_mq_init_flush(q);
|
|
|
|
blk_mq_init_cpu_queues(q, set->nr_hw_queues);
|
|
blk_mq_init_cpu_queues(q, set->nr_hw_queues);
|
|
|
|
|
|
- q->flush_rq = kzalloc(round_up(sizeof(struct request) +
|
|
|
|
- set->cmd_size, cache_line_size()),
|
|
|
|
- GFP_KERNEL);
|
|
|
|
- if (!q->flush_rq)
|
|
|
|
- goto err_hw;
|
|
|
|
-
|
|
|
|
if (blk_mq_init_hw_queues(q, set))
|
|
if (blk_mq_init_hw_queues(q, set))
|
|
- goto err_flush_rq;
|
|
|
|
|
|
+ goto err_hw;
|
|
|
|
|
|
mutex_lock(&all_q_mutex);
|
|
mutex_lock(&all_q_mutex);
|
|
list_add_tail(&q->all_q_node, &all_q_list);
|
|
list_add_tail(&q->all_q_node, &all_q_list);
|
|
@@ -1866,12 +1859,15 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
|
|
|
|
|
|
blk_mq_add_queue_tag_set(set, q);
|
|
blk_mq_add_queue_tag_set(set, q);
|
|
|
|
|
|
|
|
+ if (blk_mq_init_flush(q))
|
|
|
|
+ goto err_hw_queues;
|
|
|
|
+
|
|
blk_mq_map_swqueue(q);
|
|
blk_mq_map_swqueue(q);
|
|
|
|
|
|
return q;
|
|
return q;
|
|
|
|
|
|
-err_flush_rq:
|
|
|
|
- kfree(q->flush_rq);
|
|
|
|
|
|
+err_hw_queues:
|
|
|
|
+ blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
|
|
err_hw:
|
|
err_hw:
|
|
blk_cleanup_queue(q);
|
|
blk_cleanup_queue(q);
|
|
err_hctxs:
|
|
err_hctxs:
|