|
@@ -896,19 +896,28 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|
goto retry_cmd;
|
|
goto retry_cmd;
|
|
}
|
|
}
|
|
if (blk_integrity_rq(req)) {
|
|
if (blk_integrity_rq(req)) {
|
|
- if (blk_rq_count_integrity_sg(req->q, req->bio) != 1)
|
|
|
|
|
|
+ if (blk_rq_count_integrity_sg(req->q, req->bio) != 1) {
|
|
|
|
+ dma_unmap_sg(dev->dev, iod->sg, iod->nents,
|
|
|
|
+ dma_dir);
|
|
goto error_cmd;
|
|
goto error_cmd;
|
|
|
|
+ }
|
|
|
|
|
|
sg_init_table(iod->meta_sg, 1);
|
|
sg_init_table(iod->meta_sg, 1);
|
|
if (blk_rq_map_integrity_sg(
|
|
if (blk_rq_map_integrity_sg(
|
|
- req->q, req->bio, iod->meta_sg) != 1)
|
|
|
|
|
|
+ req->q, req->bio, iod->meta_sg) != 1) {
|
|
|
|
+ dma_unmap_sg(dev->dev, iod->sg, iod->nents,
|
|
|
|
+ dma_dir);
|
|
goto error_cmd;
|
|
goto error_cmd;
|
|
|
|
+ }
|
|
|
|
|
|
if (rq_data_dir(req))
|
|
if (rq_data_dir(req))
|
|
nvme_dif_remap(req, nvme_dif_prep);
|
|
nvme_dif_remap(req, nvme_dif_prep);
|
|
|
|
|
|
- if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir))
|
|
|
|
|
|
+ if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir)) {
|
|
|
|
+ dma_unmap_sg(dev->dev, iod->sg, iod->nents,
|
|
|
|
+ dma_dir);
|
|
goto error_cmd;
|
|
goto error_cmd;
|
|
|
|
+ }
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1728,9 +1737,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
|
|
u32 aqa;
|
|
u32 aqa;
|
|
u64 cap = lo_hi_readq(&dev->bar->cap);
|
|
u64 cap = lo_hi_readq(&dev->bar->cap);
|
|
struct nvme_queue *nvmeq;
|
|
struct nvme_queue *nvmeq;
|
|
- unsigned page_shift = PAGE_SHIFT;
|
|
|
|
|
|
+ /*
|
|
|
|
+ * default to a 4K page size, with the intention to update this
|
|
|
|
+ * path in the future to accomodate architectures with differing
|
|
|
|
+ * kernel and IO page sizes.
|
|
|
|
+ */
|
|
|
|
+ unsigned page_shift = 12;
|
|
unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12;
|
|
unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12;
|
|
- unsigned dev_page_max = NVME_CAP_MPSMAX(cap) + 12;
|
|
|
|
|
|
|
|
if (page_shift < dev_page_min) {
|
|
if (page_shift < dev_page_min) {
|
|
dev_err(dev->dev,
|
|
dev_err(dev->dev,
|
|
@@ -1739,13 +1752,6 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
|
|
1 << page_shift);
|
|
1 << page_shift);
|
|
return -ENODEV;
|
|
return -ENODEV;
|
|
}
|
|
}
|
|
- if (page_shift > dev_page_max) {
|
|
|
|
- dev_info(dev->dev,
|
|
|
|
- "Device maximum page size (%u) smaller than "
|
|
|
|
- "host (%u); enabling work-around\n",
|
|
|
|
- 1 << dev_page_max, 1 << page_shift);
|
|
|
|
- page_shift = dev_page_max;
|
|
|
|
- }
|
|
|
|
|
|
|
|
dev->subsystem = readl(&dev->bar->vs) >= NVME_VS(1, 1) ?
|
|
dev->subsystem = readl(&dev->bar->vs) >= NVME_VS(1, 1) ?
|
|
NVME_CAP_NSSRC(cap) : 0;
|
|
NVME_CAP_NSSRC(cap) : 0;
|