Browse Source

nvmet-fc: simplify sg list handling

The existing nvmet_fc sg list handling has 2 faults:
a) the request between LLDD and transport has too large of an sg
   list (256 elements), which is normally 256k (64 elements).
b) sglist handling doesn't optimize on the fact that each element
   is a page.

This patch removes the static sg list in the request and uses the
dynamic list already present in the nvmet_fc transport. It also
simplies the handling of the sg list on multiple sequences to
take advantage of the per-page divisions.

Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
James Smart 8 years ago
parent
commit
48fa362b6c
2 changed files with 11 additions and 39 deletions
  1. 10 38
      drivers/nvme/target/fc.c
  2. 1 1
      include/linux/nvme-fc-driver.h

+ 10 - 38
drivers/nvme/target/fc.c

@@ -58,7 +58,8 @@ struct nvmet_fc_ls_iod {
 	struct work_struct		work;
 	struct work_struct		work;
 } __aligned(sizeof(unsigned long long));
 } __aligned(sizeof(unsigned long long));
 
 
-#define NVMET_FC_MAX_KB_PER_XFR		256
+#define NVMET_FC_MAX_SEQ_LENGTH		(256 * 1024)
+#define NVMET_FC_MAX_XFR_SGENTS		(NVMET_FC_MAX_SEQ_LENGTH / PAGE_SIZE)
 
 
 enum nvmet_fcp_datadir {
 enum nvmet_fcp_datadir {
 	NVMET_FCP_NODATA,
 	NVMET_FCP_NODATA,
@@ -74,9 +75,7 @@ struct nvmet_fc_fcp_iod {
 	struct nvme_fc_ersp_iu		rspiubuf;
 	struct nvme_fc_ersp_iu		rspiubuf;
 	dma_addr_t			rspdma;
 	dma_addr_t			rspdma;
 	struct scatterlist		*data_sg;
 	struct scatterlist		*data_sg;
-	struct scatterlist		*next_sg;
 	int				data_sg_cnt;
 	int				data_sg_cnt;
-	u32				next_sg_offset;
 	u32				total_length;
 	u32				total_length;
 	u32				offset;
 	u32				offset;
 	enum nvmet_fcp_datadir		io_dir;
 	enum nvmet_fcp_datadir		io_dir;
@@ -112,6 +111,7 @@ struct nvmet_fc_tgtport {
 	struct ida			assoc_cnt;
 	struct ida			assoc_cnt;
 	struct nvmet_port		*port;
 	struct nvmet_port		*port;
 	struct kref			ref;
 	struct kref			ref;
+	u32				max_sg_cnt;
 };
 };
 
 
 struct nvmet_fc_defer_fcp_req {
 struct nvmet_fc_defer_fcp_req {
@@ -994,6 +994,8 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
 	INIT_LIST_HEAD(&newrec->assoc_list);
 	INIT_LIST_HEAD(&newrec->assoc_list);
 	kref_init(&newrec->ref);
 	kref_init(&newrec->ref);
 	ida_init(&newrec->assoc_cnt);
 	ida_init(&newrec->assoc_cnt);
+	newrec->max_sg_cnt = min_t(u32, NVMET_FC_MAX_XFR_SGENTS,
+					template->max_sgl_segments);
 
 
 	ret = nvmet_fc_alloc_ls_iodlist(newrec);
 	ret = nvmet_fc_alloc_ls_iodlist(newrec);
 	if (ret) {
 	if (ret) {
@@ -1866,51 +1868,23 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
 				struct nvmet_fc_fcp_iod *fod, u8 op)
 				struct nvmet_fc_fcp_iod *fod, u8 op)
 {
 {
 	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
 	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
-	struct scatterlist *sg, *datasg;
 	unsigned long flags;
 	unsigned long flags;
-	u32 tlen, sg_off;
+	u32 tlen;
 	int ret;
 	int ret;
 
 
 	fcpreq->op = op;
 	fcpreq->op = op;
 	fcpreq->offset = fod->offset;
 	fcpreq->offset = fod->offset;
 	fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
 	fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
-	tlen = min_t(u32, (NVMET_FC_MAX_KB_PER_XFR * 1024),
+
+	tlen = min_t(u32, tgtport->max_sg_cnt * PAGE_SIZE,
 			(fod->total_length - fod->offset));
 			(fod->total_length - fod->offset));
-	tlen = min_t(u32, tlen, NVME_FC_MAX_SEGMENTS * PAGE_SIZE);
-	tlen = min_t(u32, tlen, fod->tgtport->ops->max_sgl_segments
-					* PAGE_SIZE);
 	fcpreq->transfer_length = tlen;
 	fcpreq->transfer_length = tlen;
 	fcpreq->transferred_length = 0;
 	fcpreq->transferred_length = 0;
 	fcpreq->fcp_error = 0;
 	fcpreq->fcp_error = 0;
 	fcpreq->rsplen = 0;
 	fcpreq->rsplen = 0;
 
 
-	fcpreq->sg_cnt = 0;
-
-	datasg = fod->next_sg;
-	sg_off = fod->next_sg_offset;
-
-	for (sg = fcpreq->sg ; tlen; sg++) {
-		*sg = *datasg;
-		if (sg_off) {
-			sg->offset += sg_off;
-			sg->length -= sg_off;
-			sg->dma_address += sg_off;
-			sg_off = 0;
-		}
-		if (tlen < sg->length) {
-			sg->length = tlen;
-			fod->next_sg = datasg;
-			fod->next_sg_offset += tlen;
-		} else if (tlen == sg->length) {
-			fod->next_sg_offset = 0;
-			fod->next_sg = sg_next(datasg);
-		} else {
-			fod->next_sg_offset = 0;
-			datasg = sg_next(datasg);
-		}
-		tlen -= sg->length;
-		fcpreq->sg_cnt++;
-	}
+	fcpreq->sg = &fod->data_sg[fod->offset / PAGE_SIZE];
+	fcpreq->sg_cnt = DIV_ROUND_UP(tlen, PAGE_SIZE);
 
 
 	/*
 	/*
 	 * If the last READDATA request: check if LLDD supports
 	 * If the last READDATA request: check if LLDD supports
@@ -2225,8 +2199,6 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
 	fod->req.sg = fod->data_sg;
 	fod->req.sg = fod->data_sg;
 	fod->req.sg_cnt = fod->data_sg_cnt;
 	fod->req.sg_cnt = fod->data_sg_cnt;
 	fod->offset = 0;
 	fod->offset = 0;
-	fod->next_sg = fod->data_sg;
-	fod->next_sg_offset = 0;
 
 
 	if (fod->io_dir == NVMET_FCP_WRITE) {
 	if (fod->io_dir == NVMET_FCP_WRITE) {
 		/* pull the data over before invoking nvmet layer */
 		/* pull the data over before invoking nvmet layer */

+ 1 - 1
include/linux/nvme-fc-driver.h

@@ -624,7 +624,7 @@ struct nvmefc_tgt_fcp_req {
 	u32			timeout;
 	u32			timeout;
 	u32			transfer_length;
 	u32			transfer_length;
 	struct fc_ba_rjt	ba_rjt;
 	struct fc_ba_rjt	ba_rjt;
-	struct scatterlist	sg[NVME_FC_MAX_SEGMENTS];
+	struct scatterlist	*sg;
 	int			sg_cnt;
 	int			sg_cnt;
 	void			*rspaddr;
 	void			*rspaddr;
 	dma_addr_t		rspdma;
 	dma_addr_t		rspdma;