|
@@ -1364,18 +1364,14 @@ static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
|
|
|
static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
|
|
|
int qid, int depth)
|
|
|
{
|
|
|
- if (qid && dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
|
|
|
- unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth),
|
|
|
- dev->ctrl.page_size);
|
|
|
- nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
|
|
|
- nvmeq->sq_cmds_io = dev->cmb + offset;
|
|
|
- } else {
|
|
|
- nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
|
|
|
- &nvmeq->sq_dma_addr, GFP_KERNEL);
|
|
|
- if (!nvmeq->sq_cmds)
|
|
|
- return -ENOMEM;
|
|
|
- }
|
|
|
+ /* CMB SQEs will be mapped before creation */
|
|
|
+ if (qid && dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS))
|
|
|
+ return 0;
|
|
|
|
|
|
+ nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
|
|
|
+ &nvmeq->sq_dma_addr, GFP_KERNEL);
|
|
|
+ if (!nvmeq->sq_cmds)
|
|
|
+ return -ENOMEM;
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -1449,6 +1445,13 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
|
|
|
struct nvme_dev *dev = nvmeq->dev;
|
|
|
int result;
|
|
|
|
|
|
+ if (dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
|
|
|
+ unsigned offset = (qid - 1) * roundup(SQ_SIZE(nvmeq->q_depth),
|
|
|
+ dev->ctrl.page_size);
|
|
|
+ nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
|
|
|
+ nvmeq->sq_cmds_io = dev->cmb + offset;
|
|
|
+ }
|
|
|
+
|
|
|
nvmeq->cq_vector = qid - 1;
|
|
|
result = adapter_alloc_cq(dev, qid, nvmeq);
|
|
|
if (result < 0)
|