|
|
@@ -896,19 +896,28 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|
|
goto retry_cmd;
|
|
|
}
|
|
|
if (blk_integrity_rq(req)) {
|
|
|
- if (blk_rq_count_integrity_sg(req->q, req->bio) != 1)
|
|
|
+ if (blk_rq_count_integrity_sg(req->q, req->bio) != 1) {
|
|
|
+ dma_unmap_sg(dev->dev, iod->sg, iod->nents,
|
|
|
+ dma_dir);
|
|
|
goto error_cmd;
|
|
|
+ }
|
|
|
|
|
|
sg_init_table(iod->meta_sg, 1);
|
|
|
if (blk_rq_map_integrity_sg(
|
|
|
- req->q, req->bio, iod->meta_sg) != 1)
|
|
|
+ req->q, req->bio, iod->meta_sg) != 1) {
|
|
|
+ dma_unmap_sg(dev->dev, iod->sg, iod->nents,
|
|
|
+ dma_dir);
|
|
|
goto error_cmd;
|
|
|
+ }
|
|
|
|
|
|
if (rq_data_dir(req))
|
|
|
nvme_dif_remap(req, nvme_dif_prep);
|
|
|
|
|
|
- if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir))
|
|
|
+ if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir)) {
|
|
|
+ dma_unmap_sg(dev->dev, iod->sg, iod->nents,
|
|
|
+ dma_dir);
|
|
|
goto error_cmd;
|
|
|
+ }
|
|
|
}
|
|
|
}
|
|
|
|