|
@@ -325,12 +325,21 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
|
|
|
return BLK_STS_OK;
|
|
|
}
|
|
|
|
|
|
-static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
|
|
|
- struct nvme_command *cmnd)
|
|
|
+static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
|
|
|
+ struct request *req, struct nvme_command *cmnd)
|
|
|
{
|
|
|
u16 control = 0;
|
|
|
u32 dsmgmt = 0;
|
|
|
|
|
|
+ /*
|
|
|
+ * If formated with metadata, require the block layer provide a buffer
|
|
|
+ * unless this namespace is formated such that the metadata can be
|
|
|
+ * stripped/generated by the controller with PRACT=1.
|
|
|
+ */
|
|
|
+ if (ns && ns->ms && (!ns->pi_type || ns->ms != 8) &&
|
|
|
+ !blk_integrity_rq(req) && !blk_rq_is_passthrough(req))
|
|
|
+ return BLK_STS_NOTSUPP;
|
|
|
+
|
|
|
if (req->cmd_flags & REQ_FUA)
|
|
|
control |= NVME_RW_FUA;
|
|
|
if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
|
|
@@ -364,6 +373,7 @@ static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
|
|
|
|
|
|
cmnd->rw.control = cpu_to_le16(control);
|
|
|
cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
|
|
@@ -392,7 +402,7 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
|
|
|
break;
|
|
|
case REQ_OP_READ:
|
|
|
case REQ_OP_WRITE:
|
|
|
- nvme_setup_rw(ns, req, cmd);
|
|
|
+ ret = nvme_setup_rw(ns, req, cmd);
|
|
|
break;
|
|
|
default:
|
|
|
WARN_ON_ONCE(1);
|