|
@@ -448,12 +448,31 @@ static void **nvme_pci_iod_list(struct request *req)
|
|
|
return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
|
|
|
}
|
|
|
|
|
|
+static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
|
|
|
+{
|
|
|
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
|
|
|
+ unsigned int avg_seg_size;
|
|
|
+
|
|
|
+ avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req),
|
|
|
+ blk_rq_nr_phys_segments(req));
|
|
|
+
|
|
|
+ if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1))))
|
|
|
+ return false;
|
|
|
+ if (!iod->nvmeq->qid)
|
|
|
+ return false;
|
|
|
+ if (!sgl_threshold || avg_seg_size < sgl_threshold)
|
|
|
+ return false;
|
|
|
+ return true;
|
|
|
+}
|
|
|
+
|
|
|
static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev)
|
|
|
{
|
|
|
struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
|
|
|
int nseg = blk_rq_nr_phys_segments(rq);
|
|
|
unsigned int size = blk_rq_payload_bytes(rq);
|
|
|
|
|
|
+ iod->use_sgl = nvme_pci_use_sgls(dev, rq);
|
|
|
+
|
|
|
if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
|
|
|
size_t alloc_size = nvme_pci_iod_alloc_size(dev, size, nseg,
|
|
|
iod->use_sgl);
|
|
@@ -604,8 +623,6 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
|
|
|
dma_addr_t prp_dma;
|
|
|
int nprps, i;
|
|
|
|
|
|
- iod->use_sgl = false;
|
|
|
-
|
|
|
length -= (page_size - offset);
|
|
|
if (length <= 0) {
|
|
|
iod->first_dma = 0;
|
|
@@ -715,8 +732,6 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
|
|
|
int entries = iod->nents, i = 0;
|
|
|
dma_addr_t sgl_dma;
|
|
|
|
|
|
- iod->use_sgl = true;
|
|
|
-
|
|
|
/* setting the transfer type as SGL */
|
|
|
cmd->flags = NVME_CMD_SGL_METABUF;
|
|
|
|
|
@@ -770,23 +785,6 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
|
|
|
return BLK_STS_OK;
|
|
|
}
|
|
|
|
|
|
-static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
|
|
|
-{
|
|
|
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
|
|
|
- unsigned int avg_seg_size;
|
|
|
-
|
|
|
- avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req),
|
|
|
- blk_rq_nr_phys_segments(req));
|
|
|
-
|
|
|
- if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1))))
|
|
|
- return false;
|
|
|
- if (!iod->nvmeq->qid)
|
|
|
- return false;
|
|
|
- if (!sgl_threshold || avg_seg_size < sgl_threshold)
|
|
|
- return false;
|
|
|
- return true;
|
|
|
-}
|
|
|
-
|
|
|
static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
|
|
|
struct nvme_command *cmnd)
|
|
|
{
|
|
@@ -806,7 +804,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
|
|
|
DMA_ATTR_NO_WARN))
|
|
|
goto out;
|
|
|
|
|
|
- if (nvme_pci_use_sgls(dev, req))
|
|
|
+ if (iod->use_sgl)
|
|
|
ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw);
|
|
|
else
|
|
|
ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
|