|
@@ -76,7 +76,6 @@ struct nvmet_fc_fcp_iod {
|
|
|
dma_addr_t rspdma;
|
|
|
struct scatterlist *data_sg;
|
|
|
int data_sg_cnt;
|
|
|
- u32 total_length;
|
|
|
u32 offset;
|
|
|
enum nvmet_fcp_datadir io_dir;
|
|
|
bool active;
|
|
@@ -1700,7 +1699,7 @@ nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
|
|
|
u32 page_len, length;
|
|
|
int i = 0;
|
|
|
|
|
|
- length = fod->total_length;
|
|
|
+ length = fod->req.transfer_len;
|
|
|
nent = DIV_ROUND_UP(length, PAGE_SIZE);
|
|
|
sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
|
|
|
if (!sg)
|
|
@@ -1789,7 +1788,7 @@ nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
|
|
|
u32 rsn, rspcnt, xfr_length;
|
|
|
|
|
|
if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
|
|
|
- xfr_length = fod->total_length;
|
|
|
+ xfr_length = fod->req.transfer_len;
|
|
|
else
|
|
|
xfr_length = fod->offset;
|
|
|
|
|
@@ -1815,7 +1814,7 @@ nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
|
|
|
rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
|
|
|
if (!(rspcnt % fod->queue->ersp_ratio) ||
|
|
|
sqe->opcode == nvme_fabrics_command ||
|
|
|
- xfr_length != fod->total_length ||
|
|
|
+ xfr_length != fod->req.transfer_len ||
|
|
|
(le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
|
|
|
(sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
|
|
|
queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head)))
|
|
@@ -1892,7 +1891,7 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
|
|
|
fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
|
|
|
|
|
|
tlen = min_t(u32, tgtport->max_sg_cnt * PAGE_SIZE,
|
|
|
- (fod->total_length - fod->offset));
|
|
|
+ (fod->req.transfer_len - fod->offset));
|
|
|
fcpreq->transfer_length = tlen;
|
|
|
fcpreq->transferred_length = 0;
|
|
|
fcpreq->fcp_error = 0;
|
|
@@ -1906,7 +1905,7 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
|
|
|
* combined xfr with response.
|
|
|
*/
|
|
|
if ((op == NVMET_FCOP_READDATA) &&
|
|
|
- ((fod->offset + fcpreq->transfer_length) == fod->total_length) &&
|
|
|
+ ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) &&
|
|
|
(tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
|
|
|
fcpreq->op = NVMET_FCOP_READDATA_RSP;
|
|
|
nvmet_fc_prep_fcp_rsp(tgtport, fod);
|
|
@@ -1986,7 +1985,7 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
|
|
|
}
|
|
|
|
|
|
fod->offset += fcpreq->transferred_length;
|
|
|
- if (fod->offset != fod->total_length) {
|
|
|
+ if (fod->offset != fod->req.transfer_len) {
|
|
|
spin_lock_irqsave(&fod->flock, flags);
|
|
|
fod->writedataactive = true;
|
|
|
spin_unlock_irqrestore(&fod->flock, flags);
|
|
@@ -1998,9 +1997,7 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
|
|
|
}
|
|
|
|
|
|
/* data transfer complete, resume with nvmet layer */
|
|
|
-
|
|
|
- fod->req.execute(&fod->req);
|
|
|
-
|
|
|
+ nvmet_req_execute(&fod->req);
|
|
|
break;
|
|
|
|
|
|
case NVMET_FCOP_READDATA:
|
|
@@ -2023,7 +2020,7 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
|
|
|
}
|
|
|
|
|
|
fod->offset += fcpreq->transferred_length;
|
|
|
- if (fod->offset != fod->total_length) {
|
|
|
+ if (fod->offset != fod->req.transfer_len) {
|
|
|
/* transfer the next chunk */
|
|
|
nvmet_fc_transfer_fcp_data(tgtport, fod,
|
|
|
NVMET_FCOP_READDATA);
|
|
@@ -2160,7 +2157,7 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
|
|
|
|
|
|
fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
|
|
|
|
|
|
- fod->total_length = be32_to_cpu(cmdiu->data_len);
|
|
|
+ fod->req.transfer_len = be32_to_cpu(cmdiu->data_len);
|
|
|
if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
|
|
|
fod->io_dir = NVMET_FCP_WRITE;
|
|
|
if (!nvme_is_write(&cmdiu->sqe))
|
|
@@ -2171,7 +2168,7 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
|
|
|
goto transport_error;
|
|
|
} else {
|
|
|
fod->io_dir = NVMET_FCP_NODATA;
|
|
|
- if (fod->total_length)
|
|
|
+ if (fod->req.transfer_len)
|
|
|
goto transport_error;
|
|
|
}
|
|
|
|
|
@@ -2179,9 +2176,6 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
|
|
|
fod->req.rsp = &fod->rspiubuf.cqe;
|
|
|
fod->req.port = fod->queue->port;
|
|
|
|
|
|
- /* ensure nvmet handlers will set cmd handler callback */
|
|
|
- fod->req.execute = NULL;
|
|
|
-
|
|
|
/* clear any response payload */
|
|
|
memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
|
|
|
|
|
@@ -2201,7 +2195,7 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
|
|
|
/* keep a running counter of tail position */
|
|
|
atomic_inc(&fod->queue->sqtail);
|
|
|
|
|
|
- if (fod->total_length) {
|
|
|
+ if (fod->req.transfer_len) {
|
|
|
ret = nvmet_fc_alloc_tgt_pgs(fod);
|
|
|
if (ret) {
|
|
|
nvmet_req_complete(&fod->req, ret);
|
|
@@ -2224,9 +2218,7 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
|
|
|
* can invoke the nvmet_layer now. If read data, cmd completion will
|
|
|
* push the data
|
|
|
*/
|
|
|
-
|
|
|
- fod->req.execute(&fod->req);
|
|
|
-
|
|
|
+ nvmet_req_execute(&fod->req);
|
|
|
return;
|
|
|
|
|
|
transport_error:
|