|
@@ -537,73 +537,6 @@ static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
|
|
|
mempool_free(iod->sg, dev->iod_mempool);
|
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_BLK_DEV_INTEGRITY
|
|
|
-static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
|
|
|
-{
|
|
|
- if (be32_to_cpu(pi->ref_tag) == v)
|
|
|
- pi->ref_tag = cpu_to_be32(p);
|
|
|
-}
|
|
|
-
|
|
|
-static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
|
|
|
-{
|
|
|
- if (be32_to_cpu(pi->ref_tag) == p)
|
|
|
- pi->ref_tag = cpu_to_be32(v);
|
|
|
-}
|
|
|
-
|
|
|
-/**
|
|
|
- * nvme_dif_remap - remaps ref tags to bip seed and physical lba
|
|
|
- *
|
|
|
- * The virtual start sector is the one that was originally submitted by the
|
|
|
- * block layer. Due to partitioning, MD/DM cloning, etc. the actual physical
|
|
|
- * start sector may be different. Remap protection information to match the
|
|
|
- * physical LBA on writes, and back to the original seed on reads.
|
|
|
- *
|
|
|
- * Type 0 and 3 do not have a ref tag, so no remapping required.
|
|
|
- */
|
|
|
-static void nvme_dif_remap(struct request *req,
|
|
|
- void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi))
|
|
|
-{
|
|
|
- struct nvme_ns *ns = req->rq_disk->private_data;
|
|
|
- struct bio_integrity_payload *bip;
|
|
|
- struct t10_pi_tuple *pi;
|
|
|
- void *p, *pmap;
|
|
|
- u32 i, nlb, ts, phys, virt;
|
|
|
-
|
|
|
- if (!ns->pi_type || ns->pi_type == NVME_NS_DPS_PI_TYPE3)
|
|
|
- return;
|
|
|
-
|
|
|
- bip = bio_integrity(req->bio);
|
|
|
- if (!bip)
|
|
|
- return;
|
|
|
-
|
|
|
- pmap = kmap_atomic(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset;
|
|
|
-
|
|
|
- p = pmap;
|
|
|
- virt = bip_get_seed(bip);
|
|
|
- phys = nvme_block_nr(ns, blk_rq_pos(req));
|
|
|
- nlb = (blk_rq_bytes(req) >> ns->lba_shift);
|
|
|
- ts = ns->disk->queue->integrity.tuple_size;
|
|
|
-
|
|
|
- for (i = 0; i < nlb; i++, virt++, phys++) {
|
|
|
- pi = (struct t10_pi_tuple *)p;
|
|
|
- dif_swap(phys, virt, pi);
|
|
|
- p += ts;
|
|
|
- }
|
|
|
- kunmap_atomic(pmap);
|
|
|
-}
|
|
|
-#else /* CONFIG_BLK_DEV_INTEGRITY */
|
|
|
-static void nvme_dif_remap(struct request *req,
|
|
|
- void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi))
|
|
|
-{
|
|
|
-}
|
|
|
-static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
|
|
|
-{
|
|
|
-}
|
|
|
-static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
|
|
|
-{
|
|
|
-}
|
|
|
-#endif
|
|
|
-
|
|
|
static void nvme_print_sgl(struct scatterlist *sgl, int nents)
|
|
|
{
|
|
|
int i;
|
|
@@ -829,9 +762,6 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
|
|
|
if (blk_rq_map_integrity_sg(q, req->bio, &iod->meta_sg) != 1)
|
|
|
goto out_unmap;
|
|
|
|
|
|
- if (req_op(req) == REQ_OP_WRITE)
|
|
|
- nvme_dif_remap(req, nvme_dif_prep);
|
|
|
-
|
|
|
if (!dma_map_sg(dev->dev, &iod->meta_sg, 1, dma_dir))
|
|
|
goto out_unmap;
|
|
|
}
|
|
@@ -854,11 +784,8 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
|
|
|
|
|
|
if (iod->nents) {
|
|
|
dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
|
|
|
- if (blk_integrity_rq(req)) {
|
|
|
- if (req_op(req) == REQ_OP_READ)
|
|
|
- nvme_dif_remap(req, nvme_dif_complete);
|
|
|
+ if (blk_integrity_rq(req))
|
|
|
dma_unmap_sg(dev->dev, &iod->meta_sg, 1, dma_dir);
|
|
|
- }
|
|
|
}
|
|
|
|
|
|
nvme_cleanup_cmd(req);
|