浏览代码

Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

Pull SCSI target updates from Nicholas Bellinger:
 "The highlights this round include:

   - Add target_alloc_session() w/ callback helper for doing se_session
     allocation + tag + se_node_acl lookup.  (HCH + nab)

   - Tree-wide fabric driver conversion to use target_alloc_session()

   - Convert sbp-target to use percpu_ida tag pre-allocation, and
     TARGET_SCF_ACK_KREF I/O krefs (Chris Boot + nab)

   - Convert usb-gadget to use percpu_ida tag pre-allocation, and
     TARGET_SCF_ACK_KREF I/O krefs (Andrzej Pietrasiewicz + nab)

   - Convert xen-scsiback to use percpu_ida tag pre-allocation, and
     TARGET_SCF_ACK_KREF I/O krefs (Juergen Gross + nab)

   - Convert tcm_fc to use TARGET_SCF_ACK_KREF I/O + TMR krefs

   - Convert ib_srpt to use percpu_ida tag pre-allocation

   - Add DebugFS node for qla2xxx target sess list (Quinn)

   - Rework iser-target connection termination (Jenny + Sagi)

   - Convert iser-target to new CQ API (HCH)

   - Add pass-through WRITE_SAME support for IBLOCK (Mike Christie)

   - Introduce data_bitmap for asynchronous access of data area (Sheng
     Yang + Andy)

   - Fix target_release_cmd_kref shutdown comp leak (Himanshu Madhani)

  Also, there is a separate PULL request coming for cxgb4 NIC driver
  prerequisites for supporting hw iscsi segmentation offload (ISO), that
  will be the base for a number of v4.7 developments involving
  iscsi-target hw offloads"

* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (36 commits)
  target: Fix target_release_cmd_kref shutdown comp leak
  target: Avoid DataIN transfers for non-GOOD SAM status
  target/user: Report capability of handling out-of-order completions to userspace
  target/user: Fix size_t format-spec build warning
  target/user: Don't free expired command when time out
  target/user: Introduce data_bitmap, replace data_length/data_head/data_tail
  target/user: Free data ring in unified function
  target/user: Use iovec[] to describe continuous area
  target: Remove enum transport_lunflags_table
  target/iblock: pass WRITE_SAME to device if possible
  iser-target: Kill the ->isert_cmd back pointer in struct iser_tx_desc
  iser-target: Kill struct isert_rdma_wr
  iser-target: Convert to new CQ API
  iser-target: Split and properly type the login buffer
  iser-target: Remove ISER_RECV_DATA_SEG_LEN
  iser-target: Remove impossible condition from isert_wait_conn
  iser-target: Remove redundant wait in release_conn
  iser-target: Rework connection termination
  iser-target: Separate flows for np listeners and connections cma events
  iser-target: Add new state ISER_CONN_BOUND to isert_conn
  ...
Linus Torvalds 9 年之前
父节点
当前提交
5266e5b12c

+ 10 - 1
Documentation/target/tcmu-design.txt

@@ -117,7 +117,9 @@ userspace (respectively) to put commands on the ring, and indicate
 when the commands are completed.
 when the commands are completed.
 
 
 version - 1 (userspace should abort if otherwise)
 version - 1 (userspace should abort if otherwise)
-flags - none yet defined.
+flags:
+- TCMU_MAILBOX_FLAG_CAP_OOOC: indicates out-of-order completion is
+  supported.  See "The Command Ring" for details.
 cmdr_off - The offset of the start of the command ring from the start
 cmdr_off - The offset of the start of the command ring from the start
 of the memory region, to account for the mailbox size.
 of the memory region, to account for the mailbox size.
 cmdr_size - The size of the command ring. This does *not* need to be a
 cmdr_size - The size of the command ring. This does *not* need to be a
@@ -162,6 +164,13 @@ rsp.sense_buffer if necessary. Userspace then increments
 mailbox.cmd_tail by entry.hdr.length (mod cmdr_size) and signals the
 mailbox.cmd_tail by entry.hdr.length (mod cmdr_size) and signals the
 kernel via the UIO method, a 4-byte write to the file descriptor.
 kernel via the UIO method, a 4-byte write to the file descriptor.
 
 
+If TCMU_MAILBOX_FLAG_CAP_OOOC is set for mailbox->flags, kernel is
+capable of handling out-of-order completions. In this case, userspace can
+handle command in different order other than original. Since kernel would
+still process the commands in the same order it appeared in the command
+ring, userspace need to update the cmd->id when completing the
+command(a.k.a steal the original command's entry).
+
 When the opcode is PAD, userspace only updates cmd_tail as above --
 When the opcode is PAD, userspace only updates cmd_tail as above --
 it's a no-op. (The kernel inserts PAD entries to ensure each CMD entry
 it's a no-op. (The kernel inserts PAD entries to ensure each CMD entry
 is contiguous within the command ring.)
 is contiguous within the command ring.)

+ 359 - 451
drivers/infiniband/ulp/isert/ib_isert.c

@@ -49,22 +49,25 @@ static struct workqueue_struct *isert_release_wq;
 static void
 static void
 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
 static int
 static int
-isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
-	       struct isert_rdma_wr *wr);
+isert_map_rdma(struct isert_cmd *isert_cmd, struct iscsi_conn *conn);
 static void
 static void
 isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
 isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
 static int
 static int
-isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
-	       struct isert_rdma_wr *wr);
+isert_reg_rdma(struct isert_cmd *isert_cmd, struct iscsi_conn *conn);
 static int
 static int
 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
 static int
 static int
-isert_rdma_post_recvl(struct isert_conn *isert_conn);
+isert_login_post_recv(struct isert_conn *isert_conn);
 static int
 static int
 isert_rdma_accept(struct isert_conn *isert_conn);
 isert_rdma_accept(struct isert_conn *isert_conn);
 struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
 struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
 
 
 static void isert_release_work(struct work_struct *work);
 static void isert_release_work(struct work_struct *work);
+static void isert_wait4flush(struct isert_conn *isert_conn);
+static void isert_recv_done(struct ib_cq *cq, struct ib_wc *wc);
+static void isert_send_done(struct ib_cq *cq, struct ib_wc *wc);
+static void isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc);
+static void isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc);
 
 
 static inline bool
 static inline bool
 isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
 isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
@@ -177,12 +180,6 @@ err:
 	return ret;
 	return ret;
 }
 }
 
 
-static void
-isert_cq_event_callback(struct ib_event *e, void *context)
-{
-	isert_dbg("event: %d\n", e->event);
-}
-
 static int
 static int
 isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
 isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
 {
 {
@@ -212,6 +209,7 @@ isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
 		rx_sg->addr = rx_desc->dma_addr;
 		rx_sg->addr = rx_desc->dma_addr;
 		rx_sg->length = ISER_RX_PAYLOAD_SIZE;
 		rx_sg->length = ISER_RX_PAYLOAD_SIZE;
 		rx_sg->lkey = device->pd->local_dma_lkey;
 		rx_sg->lkey = device->pd->local_dma_lkey;
+		rx_desc->rx_cqe.done = isert_recv_done;
 	}
 	}
 
 
 	return 0;
 	return 0;
@@ -250,9 +248,6 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn)
 	isert_conn->rx_descs = NULL;
 	isert_conn->rx_descs = NULL;
 }
 }
 
 
-static void isert_cq_work(struct work_struct *);
-static void isert_cq_callback(struct ib_cq *, void *);
-
 static void
 static void
 isert_free_comps(struct isert_device *device)
 isert_free_comps(struct isert_device *device)
 {
 {
@@ -261,10 +256,8 @@ isert_free_comps(struct isert_device *device)
 	for (i = 0; i < device->comps_used; i++) {
 	for (i = 0; i < device->comps_used; i++) {
 		struct isert_comp *comp = &device->comps[i];
 		struct isert_comp *comp = &device->comps[i];
 
 
-		if (comp->cq) {
-			cancel_work_sync(&comp->work);
-			ib_destroy_cq(comp->cq);
-		}
+		if (comp->cq)
+			ib_free_cq(comp->cq);
 	}
 	}
 	kfree(device->comps);
 	kfree(device->comps);
 }
 }
@@ -293,28 +286,17 @@ isert_alloc_comps(struct isert_device *device)
 	max_cqe = min(ISER_MAX_CQ_LEN, device->ib_device->attrs.max_cqe);
 	max_cqe = min(ISER_MAX_CQ_LEN, device->ib_device->attrs.max_cqe);
 
 
 	for (i = 0; i < device->comps_used; i++) {
 	for (i = 0; i < device->comps_used; i++) {
-		struct ib_cq_init_attr cq_attr = {};
 		struct isert_comp *comp = &device->comps[i];
 		struct isert_comp *comp = &device->comps[i];
 
 
 		comp->device = device;
 		comp->device = device;
-		INIT_WORK(&comp->work, isert_cq_work);
-		cq_attr.cqe = max_cqe;
-		cq_attr.comp_vector = i;
-		comp->cq = ib_create_cq(device->ib_device,
-					isert_cq_callback,
-					isert_cq_event_callback,
-					(void *)comp,
-					&cq_attr);
+		comp->cq = ib_alloc_cq(device->ib_device, comp, max_cqe, i,
+				IB_POLL_WORKQUEUE);
 		if (IS_ERR(comp->cq)) {
 		if (IS_ERR(comp->cq)) {
 			isert_err("Unable to allocate cq\n");
 			isert_err("Unable to allocate cq\n");
 			ret = PTR_ERR(comp->cq);
 			ret = PTR_ERR(comp->cq);
 			comp->cq = NULL;
 			comp->cq = NULL;
 			goto out_cq;
 			goto out_cq;
 		}
 		}
-
-		ret = ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP);
-		if (ret)
-			goto out_cq;
 	}
 	}
 
 
 	return 0;
 	return 0;
@@ -582,7 +564,6 @@ isert_init_conn(struct isert_conn *isert_conn)
 	INIT_LIST_HEAD(&isert_conn->node);
 	INIT_LIST_HEAD(&isert_conn->node);
 	init_completion(&isert_conn->login_comp);
 	init_completion(&isert_conn->login_comp);
 	init_completion(&isert_conn->login_req_comp);
 	init_completion(&isert_conn->login_req_comp);
-	init_completion(&isert_conn->wait);
 	kref_init(&isert_conn->kref);
 	kref_init(&isert_conn->kref);
 	mutex_init(&isert_conn->mutex);
 	mutex_init(&isert_conn->mutex);
 	spin_lock_init(&isert_conn->pool_lock);
 	spin_lock_init(&isert_conn->pool_lock);
@@ -596,11 +577,13 @@ isert_free_login_buf(struct isert_conn *isert_conn)
 	struct ib_device *ib_dev = isert_conn->device->ib_device;
 	struct ib_device *ib_dev = isert_conn->device->ib_device;
 
 
 	ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
 	ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
-			    ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
+			    ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE);
+	kfree(isert_conn->login_rsp_buf);
+
 	ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
 	ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
-			    ISCSI_DEF_MAX_RECV_SEG_LEN,
+			    ISER_RX_PAYLOAD_SIZE,
 			    DMA_FROM_DEVICE);
 			    DMA_FROM_DEVICE);
-	kfree(isert_conn->login_buf);
+	kfree(isert_conn->login_req_buf);
 }
 }
 
 
 static int
 static int
@@ -609,50 +592,48 @@ isert_alloc_login_buf(struct isert_conn *isert_conn,
 {
 {
 	int ret;
 	int ret;
 
 
-	isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
-					ISER_RX_LOGIN_SIZE, GFP_KERNEL);
-	if (!isert_conn->login_buf) {
+	isert_conn->login_req_buf = kzalloc(sizeof(*isert_conn->login_req_buf),
+			GFP_KERNEL);
+	if (!isert_conn->login_req_buf) {
 		isert_err("Unable to allocate isert_conn->login_buf\n");
 		isert_err("Unable to allocate isert_conn->login_buf\n");
 		return -ENOMEM;
 		return -ENOMEM;
 	}
 	}
 
 
-	isert_conn->login_req_buf = isert_conn->login_buf;
-	isert_conn->login_rsp_buf = isert_conn->login_buf +
-				    ISCSI_DEF_MAX_RECV_SEG_LEN;
-
-	isert_dbg("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
-		 isert_conn->login_buf, isert_conn->login_req_buf,
-		 isert_conn->login_rsp_buf);
-
 	isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
 	isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
-				(void *)isert_conn->login_req_buf,
-				ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
-
+				isert_conn->login_req_buf,
+				ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
 	ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
 	ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
 	if (ret) {
 	if (ret) {
 		isert_err("login_req_dma mapping error: %d\n", ret);
 		isert_err("login_req_dma mapping error: %d\n", ret);
 		isert_conn->login_req_dma = 0;
 		isert_conn->login_req_dma = 0;
-		goto out_login_buf;
+		goto out_free_login_req_buf;
 	}
 	}
 
 
-	isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
-					(void *)isert_conn->login_rsp_buf,
-					ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
+	isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL);
+	if (!isert_conn->login_rsp_buf) {
+		isert_err("Unable to allocate isert_conn->login_rspbuf\n");
+		goto out_unmap_login_req_buf;
+	}
 
 
+	isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
+					isert_conn->login_rsp_buf,
+					ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE);
 	ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
 	ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
 	if (ret) {
 	if (ret) {
 		isert_err("login_rsp_dma mapping error: %d\n", ret);
 		isert_err("login_rsp_dma mapping error: %d\n", ret);
 		isert_conn->login_rsp_dma = 0;
 		isert_conn->login_rsp_dma = 0;
-		goto out_req_dma_map;
+		goto out_free_login_rsp_buf;
 	}
 	}
 
 
 	return 0;
 	return 0;
 
 
-out_req_dma_map:
+out_free_login_rsp_buf:
+	kfree(isert_conn->login_rsp_buf);
+out_unmap_login_req_buf:
 	ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
 	ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
-			    ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
-out_login_buf:
-	kfree(isert_conn->login_buf);
+			    ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+out_free_login_req_buf:
+	kfree(isert_conn->login_req_buf);
 	return ret;
 	return ret;
 }
 }
 
 
@@ -726,7 +707,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
 	if (ret)
 	if (ret)
 		goto out_conn_dev;
 		goto out_conn_dev;
 
 
-	ret = isert_rdma_post_recvl(isert_conn);
+	ret = isert_login_post_recv(isert_conn);
 	if (ret)
 	if (ret)
 		goto out_conn_dev;
 		goto out_conn_dev;
 
 
@@ -773,7 +754,7 @@ isert_connect_release(struct isert_conn *isert_conn)
 		ib_destroy_qp(isert_conn->qp);
 		ib_destroy_qp(isert_conn->qp);
 	}
 	}
 
 
-	if (isert_conn->login_buf)
+	if (isert_conn->login_req_buf)
 		isert_free_login_buf(isert_conn);
 		isert_free_login_buf(isert_conn);
 
 
 	isert_device_put(device);
 	isert_device_put(device);
@@ -820,12 +801,30 @@ isert_put_conn(struct isert_conn *isert_conn)
 	kref_put(&isert_conn->kref, isert_release_kref);
 	kref_put(&isert_conn->kref, isert_release_kref);
 }
 }
 
 
+static void
+isert_handle_unbound_conn(struct isert_conn *isert_conn)
+{
+	struct isert_np *isert_np = isert_conn->cm_id->context;
+
+	mutex_lock(&isert_np->mutex);
+	if (!list_empty(&isert_conn->node)) {
+		/*
+		 * This means iscsi doesn't know this connection
+		 * so schedule a cleanup ourselves
+		 */
+		list_del_init(&isert_conn->node);
+		isert_put_conn(isert_conn);
+		queue_work(isert_release_wq, &isert_conn->release_work);
+	}
+	mutex_unlock(&isert_np->mutex);
+}
+
 /**
 /**
  * isert_conn_terminate() - Initiate connection termination
  * isert_conn_terminate() - Initiate connection termination
  * @isert_conn: isert connection struct
  * @isert_conn: isert connection struct
  *
  *
  * Notes:
  * Notes:
- * In case the connection state is FULL_FEATURE, move state
+ * In case the connection state is BOUND, move state
  * to TEMINATING and start teardown sequence (rdma_disconnect).
  * to TEMINATING and start teardown sequence (rdma_disconnect).
  * In case the connection state is UP, complete flush as well.
  * In case the connection state is UP, complete flush as well.
  *
  *
@@ -837,23 +836,16 @@ isert_conn_terminate(struct isert_conn *isert_conn)
 {
 {
 	int err;
 	int err;
 
 
-	switch (isert_conn->state) {
-	case ISER_CONN_TERMINATING:
-		break;
-	case ISER_CONN_UP:
-	case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
-		isert_info("Terminating conn %p state %d\n",
-			   isert_conn, isert_conn->state);
-		isert_conn->state = ISER_CONN_TERMINATING;
-		err = rdma_disconnect(isert_conn->cm_id);
-		if (err)
-			isert_warn("Failed rdma_disconnect isert_conn %p\n",
-				   isert_conn);
-		break;
-	default:
-		isert_warn("conn %p teminating in state %d\n",
-			   isert_conn, isert_conn->state);
-	}
+	if (isert_conn->state >= ISER_CONN_TERMINATING)
+		return;
+
+	isert_info("Terminating conn %p state %d\n",
+		   isert_conn, isert_conn->state);
+	isert_conn->state = ISER_CONN_TERMINATING;
+	err = rdma_disconnect(isert_conn->cm_id);
+	if (err)
+		isert_warn("Failed rdma_disconnect isert_conn %p\n",
+			   isert_conn);
 }
 }
 
 
 static int
 static int
@@ -887,35 +879,27 @@ static int
 isert_disconnected_handler(struct rdma_cm_id *cma_id,
 isert_disconnected_handler(struct rdma_cm_id *cma_id,
 			   enum rdma_cm_event_type event)
 			   enum rdma_cm_event_type event)
 {
 {
-	struct isert_np *isert_np = cma_id->context;
-	struct isert_conn *isert_conn;
-	bool terminating = false;
-
-	if (isert_np->cm_id == cma_id)
-		return isert_np_cma_handler(cma_id->context, event);
-
-	isert_conn = cma_id->qp->qp_context;
+	struct isert_conn *isert_conn = cma_id->qp->qp_context;
 
 
 	mutex_lock(&isert_conn->mutex);
 	mutex_lock(&isert_conn->mutex);
-	terminating = (isert_conn->state == ISER_CONN_TERMINATING);
-	isert_conn_terminate(isert_conn);
-	mutex_unlock(&isert_conn->mutex);
-
-	isert_info("conn %p completing wait\n", isert_conn);
-	complete(&isert_conn->wait);
-
-	if (terminating)
-		goto out;
-
-	mutex_lock(&isert_np->mutex);
-	if (!list_empty(&isert_conn->node)) {
-		list_del_init(&isert_conn->node);
-		isert_put_conn(isert_conn);
-		queue_work(isert_release_wq, &isert_conn->release_work);
+	switch (isert_conn->state) {
+	case ISER_CONN_TERMINATING:
+		break;
+	case ISER_CONN_UP:
+		isert_conn_terminate(isert_conn);
+		isert_wait4flush(isert_conn);
+		isert_handle_unbound_conn(isert_conn);
+		break;
+	case ISER_CONN_BOUND:
+	case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
+		iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+		break;
+	default:
+		isert_warn("conn %p teminating in state %d\n",
+			   isert_conn, isert_conn->state);
 	}
 	}
-	mutex_unlock(&isert_np->mutex);
+	mutex_unlock(&isert_conn->mutex);
 
 
-out:
 	return 0;
 	return 0;
 }
 }
 
 
@@ -934,12 +918,16 @@ isert_connect_error(struct rdma_cm_id *cma_id)
 static int
 static int
 isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
 isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
 {
 {
+	struct isert_np *isert_np = cma_id->context;
 	int ret = 0;
 	int ret = 0;
 
 
 	isert_info("%s (%d): status %d id %p np %p\n",
 	isert_info("%s (%d): status %d id %p np %p\n",
 		   rdma_event_msg(event->event), event->event,
 		   rdma_event_msg(event->event), event->event,
 		   event->status, cma_id, cma_id->context);
 		   event->status, cma_id, cma_id->context);
 
 
+	if (isert_np->cm_id == cma_id)
+		return isert_np_cma_handler(cma_id->context, event->event);
+
 	switch (event->event) {
 	switch (event->event) {
 	case RDMA_CM_EVENT_CONNECT_REQUEST:
 	case RDMA_CM_EVENT_CONNECT_REQUEST:
 		ret = isert_connect_request(cma_id, event);
 		ret = isert_connect_request(cma_id, event);
@@ -977,7 +965,8 @@ isert_post_recvm(struct isert_conn *isert_conn, u32 count)
 
 
 	for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
 	for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
 		rx_desc = &isert_conn->rx_descs[i];
 		rx_desc = &isert_conn->rx_descs[i];
-		rx_wr->wr_id = (uintptr_t)rx_desc;
+
+		rx_wr->wr_cqe = &rx_desc->rx_cqe;
 		rx_wr->sg_list = &rx_desc->rx_sg;
 		rx_wr->sg_list = &rx_desc->rx_sg;
 		rx_wr->num_sge = 1;
 		rx_wr->num_sge = 1;
 		rx_wr->next = rx_wr + 1;
 		rx_wr->next = rx_wr + 1;
@@ -985,13 +974,10 @@ isert_post_recvm(struct isert_conn *isert_conn, u32 count)
 	rx_wr--;
 	rx_wr--;
 	rx_wr->next = NULL; /* mark end of work requests list */
 	rx_wr->next = NULL; /* mark end of work requests list */
 
 
-	isert_conn->post_recv_buf_count += count;
 	ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr,
 	ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr,
 			   &rx_wr_failed);
 			   &rx_wr_failed);
-	if (ret) {
+	if (ret)
 		isert_err("ib_post_recv() failed with ret: %d\n", ret);
 		isert_err("ib_post_recv() failed with ret: %d\n", ret);
-		isert_conn->post_recv_buf_count -= count;
-	}
 
 
 	return ret;
 	return ret;
 }
 }
@@ -1002,23 +988,20 @@ isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
 	struct ib_recv_wr *rx_wr_failed, rx_wr;
 	struct ib_recv_wr *rx_wr_failed, rx_wr;
 	int ret;
 	int ret;
 
 
-	rx_wr.wr_id = (uintptr_t)rx_desc;
+	rx_wr.wr_cqe = &rx_desc->rx_cqe;
 	rx_wr.sg_list = &rx_desc->rx_sg;
 	rx_wr.sg_list = &rx_desc->rx_sg;
 	rx_wr.num_sge = 1;
 	rx_wr.num_sge = 1;
 	rx_wr.next = NULL;
 	rx_wr.next = NULL;
 
 
-	isert_conn->post_recv_buf_count++;
 	ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_failed);
 	ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_failed);
-	if (ret) {
+	if (ret)
 		isert_err("ib_post_recv() failed with ret: %d\n", ret);
 		isert_err("ib_post_recv() failed with ret: %d\n", ret);
-		isert_conn->post_recv_buf_count--;
-	}
 
 
 	return ret;
 	return ret;
 }
 }
 
 
 static int
 static int
-isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
+isert_login_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
 {
 {
 	struct ib_device *ib_dev = isert_conn->cm_id->device;
 	struct ib_device *ib_dev = isert_conn->cm_id->device;
 	struct ib_send_wr send_wr, *send_wr_failed;
 	struct ib_send_wr send_wr, *send_wr_failed;
@@ -1027,8 +1010,10 @@ isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
 	ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
 	ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
 				      ISER_HEADERS_LEN, DMA_TO_DEVICE);
 				      ISER_HEADERS_LEN, DMA_TO_DEVICE);
 
 
+	tx_desc->tx_cqe.done = isert_login_send_done;
+
 	send_wr.next	= NULL;
 	send_wr.next	= NULL;
-	send_wr.wr_id	= (uintptr_t)tx_desc;
+	send_wr.wr_cqe	= &tx_desc->tx_cqe;
 	send_wr.sg_list	= tx_desc->tx_sg;
 	send_wr.sg_list	= tx_desc->tx_sg;
 	send_wr.num_sge	= tx_desc->num_sge;
 	send_wr.num_sge	= tx_desc->num_sge;
 	send_wr.opcode	= IB_WR_SEND;
 	send_wr.opcode	= IB_WR_SEND;
@@ -1056,7 +1041,6 @@ isert_create_send_desc(struct isert_conn *isert_conn,
 	tx_desc->iser_header.flags = ISCSI_CTRL;
 	tx_desc->iser_header.flags = ISCSI_CTRL;
 
 
 	tx_desc->num_sge = 1;
 	tx_desc->num_sge = 1;
-	tx_desc->isert_cmd = isert_cmd;
 
 
 	if (tx_desc->tx_sg[0].lkey != device->pd->local_dma_lkey) {
 	if (tx_desc->tx_sg[0].lkey != device->pd->local_dma_lkey) {
 		tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey;
 		tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey;
@@ -1097,8 +1081,9 @@ isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
 {
 {
 	struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
 	struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
 
 
-	isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
-	send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
+	isert_cmd->iser_ib_op = ISER_IB_SEND;
+	tx_desc->tx_cqe.done = isert_send_done;
+	send_wr->wr_cqe = &tx_desc->tx_cqe;
 
 
 	if (isert_conn->snd_w_inv && isert_cmd->inv_rkey) {
 	if (isert_conn->snd_w_inv && isert_cmd->inv_rkey) {
 		send_wr->opcode  = IB_WR_SEND_WITH_INV;
 		send_wr->opcode  = IB_WR_SEND_WITH_INV;
@@ -1113,7 +1098,7 @@ isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
 }
 }
 
 
 static int
 static int
-isert_rdma_post_recvl(struct isert_conn *isert_conn)
+isert_login_post_recv(struct isert_conn *isert_conn)
 {
 {
 	struct ib_recv_wr rx_wr, *rx_wr_fail;
 	struct ib_recv_wr rx_wr, *rx_wr_fail;
 	struct ib_sge sge;
 	struct ib_sge sge;
@@ -1121,23 +1106,22 @@ isert_rdma_post_recvl(struct isert_conn *isert_conn)
 
 
 	memset(&sge, 0, sizeof(struct ib_sge));
 	memset(&sge, 0, sizeof(struct ib_sge));
 	sge.addr = isert_conn->login_req_dma;
 	sge.addr = isert_conn->login_req_dma;
-	sge.length = ISER_RX_LOGIN_SIZE;
+	sge.length = ISER_RX_PAYLOAD_SIZE;
 	sge.lkey = isert_conn->device->pd->local_dma_lkey;
 	sge.lkey = isert_conn->device->pd->local_dma_lkey;
 
 
 	isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
 	isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
 		sge.addr, sge.length, sge.lkey);
 		sge.addr, sge.length, sge.lkey);
 
 
+	isert_conn->login_req_buf->rx_cqe.done = isert_login_recv_done;
+
 	memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
 	memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
-	rx_wr.wr_id = (uintptr_t)isert_conn->login_req_buf;
+	rx_wr.wr_cqe = &isert_conn->login_req_buf->rx_cqe;
 	rx_wr.sg_list = &sge;
 	rx_wr.sg_list = &sge;
 	rx_wr.num_sge = 1;
 	rx_wr.num_sge = 1;
 
 
-	isert_conn->post_recv_buf_count++;
 	ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_fail);
 	ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_fail);
-	if (ret) {
+	if (ret)
 		isert_err("ib_post_recv() failed: %d\n", ret);
 		isert_err("ib_post_recv() failed: %d\n", ret);
-		isert_conn->post_recv_buf_count--;
-	}
 
 
 	return ret;
 	return ret;
 }
 }
@@ -1203,12 +1187,12 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
 			goto post_send;
 			goto post_send;
 		}
 		}
 
 
-		ret = isert_rdma_post_recvl(isert_conn);
+		ret = isert_login_post_recv(isert_conn);
 		if (ret)
 		if (ret)
 			return ret;
 			return ret;
 	}
 	}
 post_send:
 post_send:
-	ret = isert_post_send(isert_conn, tx_desc);
+	ret = isert_login_post_send(isert_conn, tx_desc);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
@@ -1218,7 +1202,7 @@ post_send:
 static void
 static void
 isert_rx_login_req(struct isert_conn *isert_conn)
 isert_rx_login_req(struct isert_conn *isert_conn)
 {
 {
-	struct iser_rx_desc *rx_desc = (void *)isert_conn->login_req_buf;
+	struct iser_rx_desc *rx_desc = isert_conn->login_req_buf;
 	int rx_buflen = isert_conn->login_req_len;
 	int rx_buflen = isert_conn->login_req_len;
 	struct iscsi_conn *conn = isert_conn->conn;
 	struct iscsi_conn *conn = isert_conn->conn;
 	struct iscsi_login *login = conn->conn_login;
 	struct iscsi_login *login = conn->conn_login;
@@ -1551,12 +1535,42 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
 }
 }
 
 
 static void
 static void
-isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
+isert_print_wc(struct ib_wc *wc, const char *type)
+{
+	if (wc->status != IB_WC_WR_FLUSH_ERR)
+		isert_err("%s failure: %s (%d) vend_err %x\n", type,
+			  ib_wc_status_msg(wc->status), wc->status,
+			  wc->vendor_err);
+	else
+		isert_dbg("%s failure: %s (%d)\n", type,
+			  ib_wc_status_msg(wc->status), wc->status);
+}
+
+static void
+isert_recv_done(struct ib_cq *cq, struct ib_wc *wc)
 {
 {
+	struct isert_conn *isert_conn = wc->qp->qp_context;
+	struct ib_device *ib_dev = isert_conn->cm_id->device;
+	struct iser_rx_desc *rx_desc = cqe_to_rx_desc(wc->wr_cqe);
+	struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
 	struct iser_ctrl *iser_ctrl = &rx_desc->iser_header;
 	struct iser_ctrl *iser_ctrl = &rx_desc->iser_header;
 	uint64_t read_va = 0, write_va = 0;
 	uint64_t read_va = 0, write_va = 0;
 	uint32_t read_stag = 0, write_stag = 0;
 	uint32_t read_stag = 0, write_stag = 0;
 
 
+	if (unlikely(wc->status != IB_WC_SUCCESS)) {
+		isert_print_wc(wc, "recv");
+		if (wc->status != IB_WC_WR_FLUSH_ERR)
+			iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+		return;
+	}
+
+	ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr,
+			ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+
+	isert_dbg("DMA: 0x%llx, iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
+		 rx_desc->dma_addr, hdr->opcode, hdr->itt, hdr->flags,
+		 (int)(wc->byte_len - ISER_HEADERS_LEN));
+
 	switch (iser_ctrl->flags & 0xF0) {
 	switch (iser_ctrl->flags & 0xF0) {
 	case ISCSI_CTRL:
 	case ISCSI_CTRL:
 		if (iser_ctrl->flags & ISER_RSV) {
 		if (iser_ctrl->flags & ISER_RSV) {
@@ -1584,56 +1598,40 @@ isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
 
 
 	isert_rx_opcode(isert_conn, rx_desc,
 	isert_rx_opcode(isert_conn, rx_desc,
 			read_stag, read_va, write_stag, write_va);
 			read_stag, read_va, write_stag, write_va);
+
+	ib_dma_sync_single_for_device(ib_dev, rx_desc->dma_addr,
+			ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
 }
 }
 
 
 static void
 static void
-isert_rcv_completion(struct iser_rx_desc *desc,
-		     struct isert_conn *isert_conn,
-		     u32 xfer_len)
+isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc)
 {
 {
+	struct isert_conn *isert_conn = wc->qp->qp_context;
 	struct ib_device *ib_dev = isert_conn->cm_id->device;
 	struct ib_device *ib_dev = isert_conn->cm_id->device;
-	struct iscsi_hdr *hdr;
-	u64 rx_dma;
-	int rx_buflen;
-
-	if ((char *)desc == isert_conn->login_req_buf) {
-		rx_dma = isert_conn->login_req_dma;
-		rx_buflen = ISER_RX_LOGIN_SIZE;
-		isert_dbg("login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
-			 rx_dma, rx_buflen);
-	} else {
-		rx_dma = desc->dma_addr;
-		rx_buflen = ISER_RX_PAYLOAD_SIZE;
-		isert_dbg("req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
-			 rx_dma, rx_buflen);
+
+	if (unlikely(wc->status != IB_WC_SUCCESS)) {
+		isert_print_wc(wc, "login recv");
+		return;
 	}
 	}
 
 
-	ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE);
+	ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_req_dma,
+			ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
 
 
-	hdr = &desc->iscsi_header;
-	isert_dbg("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
-		 hdr->opcode, hdr->itt, hdr->flags,
-		 (int)(xfer_len - ISER_HEADERS_LEN));
+	isert_conn->login_req_len = wc->byte_len - ISER_HEADERS_LEN;
 
 
-	if ((char *)desc == isert_conn->login_req_buf) {
-		isert_conn->login_req_len = xfer_len - ISER_HEADERS_LEN;
-		if (isert_conn->conn) {
-			struct iscsi_login *login = isert_conn->conn->conn_login;
+	if (isert_conn->conn) {
+		struct iscsi_login *login = isert_conn->conn->conn_login;
 
 
-			if (login && !login->first_request)
-				isert_rx_login_req(isert_conn);
-		}
-		mutex_lock(&isert_conn->mutex);
-		complete(&isert_conn->login_req_comp);
-		mutex_unlock(&isert_conn->mutex);
-	} else {
-		isert_rx_do_work(desc, isert_conn);
+		if (login && !login->first_request)
+			isert_rx_login_req(isert_conn);
 	}
 	}
 
 
-	ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
-				      DMA_FROM_DEVICE);
+	mutex_lock(&isert_conn->mutex);
+	complete(&isert_conn->login_req_comp);
+	mutex_unlock(&isert_conn->mutex);
 
 
-	isert_conn->post_recv_buf_count--;
+	ib_dma_sync_single_for_device(ib_dev, isert_conn->login_req_dma,
+				ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
 }
 }
 
 
 static int
 static int
@@ -1683,54 +1681,50 @@ isert_unmap_data_buf(struct isert_conn *isert_conn, struct isert_data_buf *data)
 static void
 static void
 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
 {
 {
-	struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
-
 	isert_dbg("Cmd %p\n", isert_cmd);
 	isert_dbg("Cmd %p\n", isert_cmd);
 
 
-	if (wr->data.sg) {
+	if (isert_cmd->data.sg) {
 		isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
 		isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
-		isert_unmap_data_buf(isert_conn, &wr->data);
+		isert_unmap_data_buf(isert_conn, &isert_cmd->data);
 	}
 	}
 
 
-	if (wr->rdma_wr) {
+	if (isert_cmd->rdma_wr) {
 		isert_dbg("Cmd %p free send_wr\n", isert_cmd);
 		isert_dbg("Cmd %p free send_wr\n", isert_cmd);
-		kfree(wr->rdma_wr);
-		wr->rdma_wr = NULL;
+		kfree(isert_cmd->rdma_wr);
+		isert_cmd->rdma_wr = NULL;
 	}
 	}
 
 
-	if (wr->ib_sge) {
+	if (isert_cmd->ib_sge) {
 		isert_dbg("Cmd %p free ib_sge\n", isert_cmd);
 		isert_dbg("Cmd %p free ib_sge\n", isert_cmd);
-		kfree(wr->ib_sge);
-		wr->ib_sge = NULL;
+		kfree(isert_cmd->ib_sge);
+		isert_cmd->ib_sge = NULL;
 	}
 	}
 }
 }
 
 
 static void
 static void
 isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
 isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
 {
 {
-	struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
-
 	isert_dbg("Cmd %p\n", isert_cmd);
 	isert_dbg("Cmd %p\n", isert_cmd);
 
 
-	if (wr->fr_desc) {
-		isert_dbg("Cmd %p free fr_desc %p\n", isert_cmd, wr->fr_desc);
-		if (wr->fr_desc->ind & ISERT_PROTECTED) {
-			isert_unmap_data_buf(isert_conn, &wr->prot);
-			wr->fr_desc->ind &= ~ISERT_PROTECTED;
+	if (isert_cmd->fr_desc) {
+		isert_dbg("Cmd %p free fr_desc %p\n", isert_cmd, isert_cmd->fr_desc);
+		if (isert_cmd->fr_desc->ind & ISERT_PROTECTED) {
+			isert_unmap_data_buf(isert_conn, &isert_cmd->prot);
+			isert_cmd->fr_desc->ind &= ~ISERT_PROTECTED;
 		}
 		}
 		spin_lock_bh(&isert_conn->pool_lock);
 		spin_lock_bh(&isert_conn->pool_lock);
-		list_add_tail(&wr->fr_desc->list, &isert_conn->fr_pool);
+		list_add_tail(&isert_cmd->fr_desc->list, &isert_conn->fr_pool);
 		spin_unlock_bh(&isert_conn->pool_lock);
 		spin_unlock_bh(&isert_conn->pool_lock);
-		wr->fr_desc = NULL;
+		isert_cmd->fr_desc = NULL;
 	}
 	}
 
 
-	if (wr->data.sg) {
+	if (isert_cmd->data.sg) {
 		isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
 		isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
-		isert_unmap_data_buf(isert_conn, &wr->data);
+		isert_unmap_data_buf(isert_conn, &isert_cmd->data);
 	}
 	}
 
 
-	wr->ib_sge = NULL;
-	wr->rdma_wr = NULL;
+	isert_cmd->ib_sge = NULL;
+	isert_cmd->rdma_wr = NULL;
 }
 }
 
 
 static void
 static void
@@ -1882,52 +1876,70 @@ fail_mr_status:
 }
 }
 
 
 static void
 static void
-isert_completion_rdma_write(struct iser_tx_desc *tx_desc,
-			    struct isert_cmd *isert_cmd)
+isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
 {
 {
-	struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
-	struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
-	struct se_cmd *se_cmd = &cmd->se_cmd;
-	struct isert_conn *isert_conn = isert_cmd->conn;
+	struct isert_conn *isert_conn = wc->qp->qp_context;
 	struct isert_device *device = isert_conn->device;
 	struct isert_device *device = isert_conn->device;
+	struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe);
+	struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc);
+	struct se_cmd *cmd = &isert_cmd->iscsi_cmd->se_cmd;
 	int ret = 0;
 	int ret = 0;
 
 
-	if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
-		ret = isert_check_pi_status(se_cmd,
-					    wr->fr_desc->pi_ctx->sig_mr);
-		wr->fr_desc->ind &= ~ISERT_PROTECTED;
+	if (unlikely(wc->status != IB_WC_SUCCESS)) {
+		isert_print_wc(wc, "rdma write");
+		if (wc->status != IB_WC_WR_FLUSH_ERR)
+			iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+		isert_completion_put(desc, isert_cmd, device->ib_device, true);
+		return;
+	}
+
+	isert_dbg("Cmd %p\n", isert_cmd);
+
+	if (isert_cmd->fr_desc && isert_cmd->fr_desc->ind & ISERT_PROTECTED) {
+		ret = isert_check_pi_status(cmd,
+				isert_cmd->fr_desc->pi_ctx->sig_mr);
+		isert_cmd->fr_desc->ind &= ~ISERT_PROTECTED;
 	}
 	}
 
 
 	device->unreg_rdma_mem(isert_cmd, isert_conn);
 	device->unreg_rdma_mem(isert_cmd, isert_conn);
-	wr->rdma_wr_num = 0;
+	isert_cmd->rdma_wr_num = 0;
 	if (ret)
 	if (ret)
-		transport_send_check_condition_and_sense(se_cmd,
-							 se_cmd->pi_err, 0);
+		transport_send_check_condition_and_sense(cmd, cmd->pi_err, 0);
 	else
 	else
-		isert_put_response(isert_conn->conn, cmd);
+		isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd);
 }
 }
 
 
 static void
 static void
-isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
-			   struct isert_cmd *isert_cmd)
+isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
 {
 {
-	struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
+	struct isert_conn *isert_conn = wc->qp->qp_context;
+	struct isert_device *device = isert_conn->device;
+	struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe);
+	struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc);
 	struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
 	struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
 	struct se_cmd *se_cmd = &cmd->se_cmd;
 	struct se_cmd *se_cmd = &cmd->se_cmd;
-	struct isert_conn *isert_conn = isert_cmd->conn;
-	struct isert_device *device = isert_conn->device;
 	int ret = 0;
 	int ret = 0;
 
 
-	if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
+	if (unlikely(wc->status != IB_WC_SUCCESS)) {
+		isert_print_wc(wc, "rdma read");
+		if (wc->status != IB_WC_WR_FLUSH_ERR)
+			iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+		isert_completion_put(desc, isert_cmd, device->ib_device, true);
+		return;
+	}
+
+	isert_dbg("Cmd %p\n", isert_cmd);
+
+	if (isert_cmd->fr_desc && isert_cmd->fr_desc->ind & ISERT_PROTECTED) {
 		ret = isert_check_pi_status(se_cmd,
 		ret = isert_check_pi_status(se_cmd,
-					    wr->fr_desc->pi_ctx->sig_mr);
-		wr->fr_desc->ind &= ~ISERT_PROTECTED;
+					    isert_cmd->fr_desc->pi_ctx->sig_mr);
+		isert_cmd->fr_desc->ind &= ~ISERT_PROTECTED;
 	}
 	}
 
 
 	iscsit_stop_dataout_timer(cmd);
 	iscsit_stop_dataout_timer(cmd);
 	device->unreg_rdma_mem(isert_cmd, isert_conn);
 	device->unreg_rdma_mem(isert_cmd, isert_conn);
-	cmd->write_data_done = wr->data.len;
-	wr->rdma_wr_num = 0;
+	cmd->write_data_done = isert_cmd->data.len;
+	isert_cmd->rdma_wr_num = 0;
 
 
 	isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
 	isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
 	spin_lock_bh(&cmd->istate_lock);
 	spin_lock_bh(&cmd->istate_lock);
@@ -1975,170 +1987,56 @@ isert_do_control_comp(struct work_struct *work)
 }
 }
 
 
 static void
 static void
-isert_response_completion(struct iser_tx_desc *tx_desc,
-			  struct isert_cmd *isert_cmd,
-			  struct isert_conn *isert_conn,
-			  struct ib_device *ib_dev)
+isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc)
 {
 {
-	struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
-
-	if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
-	    cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
-	    cmd->i_state == ISTATE_SEND_REJECT ||
-	    cmd->i_state == ISTATE_SEND_TEXTRSP) {
-		isert_unmap_tx_desc(tx_desc, ib_dev);
+	struct isert_conn *isert_conn = wc->qp->qp_context;
+	struct ib_device *ib_dev = isert_conn->cm_id->device;
+	struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe);
 
 
-		INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
-		queue_work(isert_comp_wq, &isert_cmd->comp_work);
-		return;
+	if (unlikely(wc->status != IB_WC_SUCCESS)) {
+		isert_print_wc(wc, "login send");
+		if (wc->status != IB_WC_WR_FLUSH_ERR)
+			iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
 	}
 	}
 
 
-	cmd->i_state = ISTATE_SENT_STATUS;
-	isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
+	isert_unmap_tx_desc(tx_desc, ib_dev);
 }
 }
 
 
 static void
 static void
-isert_snd_completion(struct iser_tx_desc *tx_desc,
-		      struct isert_conn *isert_conn)
+isert_send_done(struct ib_cq *cq, struct ib_wc *wc)
 {
 {
+	struct isert_conn *isert_conn = wc->qp->qp_context;
 	struct ib_device *ib_dev = isert_conn->cm_id->device;
 	struct ib_device *ib_dev = isert_conn->cm_id->device;
-	struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
-	struct isert_rdma_wr *wr;
+	struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe);
+	struct isert_cmd *isert_cmd = tx_desc_to_cmd(tx_desc);
 
 
-	if (!isert_cmd) {
-		isert_unmap_tx_desc(tx_desc, ib_dev);
+	if (unlikely(wc->status != IB_WC_SUCCESS)) {
+		isert_print_wc(wc, "send");
+		if (wc->status != IB_WC_WR_FLUSH_ERR)
+			iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+		isert_completion_put(tx_desc, isert_cmd, ib_dev, true);
 		return;
 		return;
 	}
 	}
-	wr = &isert_cmd->rdma_wr;
 
 
-	isert_dbg("Cmd %p iser_ib_op %d\n", isert_cmd, wr->iser_ib_op);
+	isert_dbg("Cmd %p\n", isert_cmd);
 
 
-	switch (wr->iser_ib_op) {
-	case ISER_IB_SEND:
-		isert_response_completion(tx_desc, isert_cmd,
-					  isert_conn, ib_dev);
-		break;
-	case ISER_IB_RDMA_WRITE:
-		isert_completion_rdma_write(tx_desc, isert_cmd);
-		break;
-	case ISER_IB_RDMA_READ:
-		isert_completion_rdma_read(tx_desc, isert_cmd);
-		break;
+	switch (isert_cmd->iscsi_cmd->i_state) {
+	case ISTATE_SEND_TASKMGTRSP:
+	case ISTATE_SEND_LOGOUTRSP:
+	case ISTATE_SEND_REJECT:
+	case ISTATE_SEND_TEXTRSP:
+		isert_unmap_tx_desc(tx_desc, ib_dev);
+
+		INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
+		queue_work(isert_comp_wq, &isert_cmd->comp_work);
+		return;
 	default:
 	default:
-		isert_err("Unknown wr->iser_ib_op: 0x%x\n", wr->iser_ib_op);
-		dump_stack();
+		isert_cmd->iscsi_cmd->i_state = ISTATE_SENT_STATUS;
+		isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
 		break;
 		break;
 	}
 	}
 }
 }
 
 
-/**
- * is_isert_tx_desc() - Indicate if the completion wr_id
- *     is a TX descriptor or not.
- * @isert_conn: iser connection
- * @wr_id: completion WR identifier
- *
- * Since we cannot rely on wc opcode in FLUSH errors
- * we must work around it by checking if the wr_id address
- * falls in the iser connection rx_descs buffer. If so
- * it is an RX descriptor, otherwize it is a TX.
- */
-static inline bool
-is_isert_tx_desc(struct isert_conn *isert_conn, void *wr_id)
-{
-	void *start = isert_conn->rx_descs;
-	int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->rx_descs);
-
-	if (wr_id >= start && wr_id < start + len)
-		return false;
-
-	return true;
-}
-
-static void
-isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc)
-{
-	if (wc->wr_id == ISER_BEACON_WRID) {
-		isert_info("conn %p completing wait_comp_err\n",
-			   isert_conn);
-		complete(&isert_conn->wait_comp_err);
-	} else if (is_isert_tx_desc(isert_conn, (void *)(uintptr_t)wc->wr_id)) {
-		struct ib_device *ib_dev = isert_conn->cm_id->device;
-		struct isert_cmd *isert_cmd;
-		struct iser_tx_desc *desc;
-
-		desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
-		isert_cmd = desc->isert_cmd;
-		if (!isert_cmd)
-			isert_unmap_tx_desc(desc, ib_dev);
-		else
-			isert_completion_put(desc, isert_cmd, ib_dev, true);
-	} else {
-		isert_conn->post_recv_buf_count--;
-		if (!isert_conn->post_recv_buf_count)
-			iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
-	}
-}
-
-static void
-isert_handle_wc(struct ib_wc *wc)
-{
-	struct isert_conn *isert_conn;
-	struct iser_tx_desc *tx_desc;
-	struct iser_rx_desc *rx_desc;
-
-	isert_conn = wc->qp->qp_context;
-	if (likely(wc->status == IB_WC_SUCCESS)) {
-		if (wc->opcode == IB_WC_RECV) {
-			rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id;
-			isert_rcv_completion(rx_desc, isert_conn, wc->byte_len);
-		} else {
-			tx_desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
-			isert_snd_completion(tx_desc, isert_conn);
-		}
-	} else {
-		if (wc->status != IB_WC_WR_FLUSH_ERR)
-			isert_err("%s (%d): wr id %llx vend_err %x\n",
-				  ib_wc_status_msg(wc->status), wc->status,
-				  wc->wr_id, wc->vendor_err);
-		else
-			isert_dbg("%s (%d): wr id %llx\n",
-				  ib_wc_status_msg(wc->status), wc->status,
-				  wc->wr_id);
-
-		if (wc->wr_id != ISER_FASTREG_LI_WRID)
-			isert_cq_comp_err(isert_conn, wc);
-	}
-}
-
-static void
-isert_cq_work(struct work_struct *work)
-{
-	enum { isert_poll_budget = 65536 };
-	struct isert_comp *comp = container_of(work, struct isert_comp,
-					       work);
-	struct ib_wc *const wcs = comp->wcs;
-	int i, n, completed = 0;
-
-	while ((n = ib_poll_cq(comp->cq, ARRAY_SIZE(comp->wcs), wcs)) > 0) {
-		for (i = 0; i < n; i++)
-			isert_handle_wc(&wcs[i]);
-
-		completed += n;
-		if (completed >= isert_poll_budget)
-			break;
-	}
-
-	ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP);
-}
-
-static void
-isert_cq_callback(struct ib_cq *cq, void *context)
-{
-	struct isert_comp *comp = context;
-
-	queue_work(isert_comp_wq, &comp->work);
-}
-
 static int
 static int
 isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
 isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
 {
 {
@@ -2395,7 +2293,8 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
 	page_off = offset % PAGE_SIZE;
 	page_off = offset % PAGE_SIZE;
 
 
 	rdma_wr->wr.sg_list = ib_sge;
 	rdma_wr->wr.sg_list = ib_sge;
-	rdma_wr->wr.wr_id = (uintptr_t)&isert_cmd->tx_desc;
+	rdma_wr->wr.wr_cqe = &isert_cmd->tx_desc.tx_cqe;
+
 	/*
 	/*
 	 * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
 	 * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
 	 */
 	 */
@@ -2428,24 +2327,23 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
 }
 }
 
 
 static int
 static int
-isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
-	       struct isert_rdma_wr *wr)
+isert_map_rdma(struct isert_cmd *isert_cmd, struct iscsi_conn *conn)
 {
 {
+	struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
 	struct se_cmd *se_cmd = &cmd->se_cmd;
 	struct se_cmd *se_cmd = &cmd->se_cmd;
-	struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
 	struct isert_conn *isert_conn = conn->context;
 	struct isert_conn *isert_conn = conn->context;
-	struct isert_data_buf *data = &wr->data;
+	struct isert_data_buf *data = &isert_cmd->data;
 	struct ib_rdma_wr *rdma_wr;
 	struct ib_rdma_wr *rdma_wr;
 	struct ib_sge *ib_sge;
 	struct ib_sge *ib_sge;
 	u32 offset, data_len, data_left, rdma_write_max, va_offset = 0;
 	u32 offset, data_len, data_left, rdma_write_max, va_offset = 0;
 	int ret = 0, i, ib_sge_cnt;
 	int ret = 0, i, ib_sge_cnt;
 
 
-	isert_cmd->tx_desc.isert_cmd = isert_cmd;
-
-	offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
+	offset = isert_cmd->iser_ib_op == ISER_IB_RDMA_READ ?
+			cmd->write_data_done : 0;
 	ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
 	ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
 				 se_cmd->t_data_nents, se_cmd->data_length,
 				 se_cmd->t_data_nents, se_cmd->data_length,
-				 offset, wr->iser_ib_op, &wr->data);
+				 offset, isert_cmd->iser_ib_op,
+				 &isert_cmd->data);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
@@ -2458,41 +2356,44 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 		ret = -ENOMEM;
 		ret = -ENOMEM;
 		goto unmap_cmd;
 		goto unmap_cmd;
 	}
 	}
-	wr->ib_sge = ib_sge;
+	isert_cmd->ib_sge = ib_sge;
 
 
-	wr->rdma_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge);
-	wr->rdma_wr = kzalloc(sizeof(struct ib_rdma_wr) * wr->rdma_wr_num,
-				GFP_KERNEL);
-	if (!wr->rdma_wr) {
-		isert_dbg("Unable to allocate wr->rdma_wr\n");
+	isert_cmd->rdma_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge);
+	isert_cmd->rdma_wr = kzalloc(sizeof(struct ib_rdma_wr) *
+			isert_cmd->rdma_wr_num, GFP_KERNEL);
+	if (!isert_cmd->rdma_wr) {
+		isert_dbg("Unable to allocate isert_cmd->rdma_wr\n");
 		ret = -ENOMEM;
 		ret = -ENOMEM;
 		goto unmap_cmd;
 		goto unmap_cmd;
 	}
 	}
 
 
-	wr->isert_cmd = isert_cmd;
 	rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
 	rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
 
 
-	for (i = 0; i < wr->rdma_wr_num; i++) {
-		rdma_wr = &isert_cmd->rdma_wr.rdma_wr[i];
+	for (i = 0; i < isert_cmd->rdma_wr_num; i++) {
+		rdma_wr = &isert_cmd->rdma_wr[i];
 		data_len = min(data_left, rdma_write_max);
 		data_len = min(data_left, rdma_write_max);
 
 
 		rdma_wr->wr.send_flags = 0;
 		rdma_wr->wr.send_flags = 0;
-		if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
+		if (isert_cmd->iser_ib_op == ISER_IB_RDMA_WRITE) {
+			isert_cmd->tx_desc.tx_cqe.done = isert_rdma_write_done;
+
 			rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
 			rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
 			rdma_wr->remote_addr = isert_cmd->read_va + offset;
 			rdma_wr->remote_addr = isert_cmd->read_va + offset;
 			rdma_wr->rkey = isert_cmd->read_stag;
 			rdma_wr->rkey = isert_cmd->read_stag;
-			if (i + 1 == wr->rdma_wr_num)
+			if (i + 1 == isert_cmd->rdma_wr_num)
 				rdma_wr->wr.next = &isert_cmd->tx_desc.send_wr;
 				rdma_wr->wr.next = &isert_cmd->tx_desc.send_wr;
 			else
 			else
-				rdma_wr->wr.next = &wr->rdma_wr[i + 1].wr;
+				rdma_wr->wr.next = &isert_cmd->rdma_wr[i + 1].wr;
 		} else {
 		} else {
+			isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done;
+
 			rdma_wr->wr.opcode = IB_WR_RDMA_READ;
 			rdma_wr->wr.opcode = IB_WR_RDMA_READ;
 			rdma_wr->remote_addr = isert_cmd->write_va + va_offset;
 			rdma_wr->remote_addr = isert_cmd->write_va + va_offset;
 			rdma_wr->rkey = isert_cmd->write_stag;
 			rdma_wr->rkey = isert_cmd->write_stag;
-			if (i + 1 == wr->rdma_wr_num)
+			if (i + 1 == isert_cmd->rdma_wr_num)
 				rdma_wr->wr.send_flags = IB_SEND_SIGNALED;
 				rdma_wr->wr.send_flags = IB_SEND_SIGNALED;
 			else
 			else
-				rdma_wr->wr.next = &wr->rdma_wr[i + 1].wr;
+				rdma_wr->wr.next = &isert_cmd->rdma_wr[i + 1].wr;
 		}
 		}
 
 
 		ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
 		ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
@@ -2517,7 +2418,7 @@ isert_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr)
 	u32 rkey;
 	u32 rkey;
 
 
 	memset(inv_wr, 0, sizeof(*inv_wr));
 	memset(inv_wr, 0, sizeof(*inv_wr));
-	inv_wr->wr_id = ISER_FASTREG_LI_WRID;
+	inv_wr->wr_cqe = NULL;
 	inv_wr->opcode = IB_WR_LOCAL_INV;
 	inv_wr->opcode = IB_WR_LOCAL_INV;
 	inv_wr->ex.invalidate_rkey = mr->rkey;
 	inv_wr->ex.invalidate_rkey = mr->rkey;
 
 
@@ -2573,7 +2474,7 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
 
 
 	reg_wr.wr.next = NULL;
 	reg_wr.wr.next = NULL;
 	reg_wr.wr.opcode = IB_WR_REG_MR;
 	reg_wr.wr.opcode = IB_WR_REG_MR;
-	reg_wr.wr.wr_id = ISER_FASTREG_LI_WRID;
+	reg_wr.wr.wr_cqe = NULL;
 	reg_wr.wr.send_flags = 0;
 	reg_wr.wr.send_flags = 0;
 	reg_wr.wr.num_sge = 0;
 	reg_wr.wr.num_sge = 0;
 	reg_wr.mr = mr;
 	reg_wr.mr = mr;
@@ -2660,10 +2561,10 @@ isert_set_prot_checks(u8 prot_checks)
 
 
 static int
 static int
 isert_reg_sig_mr(struct isert_conn *isert_conn,
 isert_reg_sig_mr(struct isert_conn *isert_conn,
-		 struct se_cmd *se_cmd,
-		 struct isert_rdma_wr *rdma_wr,
+		 struct isert_cmd *isert_cmd,
 		 struct fast_reg_descriptor *fr_desc)
 		 struct fast_reg_descriptor *fr_desc)
 {
 {
+	struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd;
 	struct ib_sig_handover_wr sig_wr;
 	struct ib_sig_handover_wr sig_wr;
 	struct ib_send_wr inv_wr, *bad_wr, *wr = NULL;
 	struct ib_send_wr inv_wr, *bad_wr, *wr = NULL;
 	struct pi_context *pi_ctx = fr_desc->pi_ctx;
 	struct pi_context *pi_ctx = fr_desc->pi_ctx;
@@ -2684,14 +2585,14 @@ isert_reg_sig_mr(struct isert_conn *isert_conn,
 
 
 	memset(&sig_wr, 0, sizeof(sig_wr));
 	memset(&sig_wr, 0, sizeof(sig_wr));
 	sig_wr.wr.opcode = IB_WR_REG_SIG_MR;
 	sig_wr.wr.opcode = IB_WR_REG_SIG_MR;
-	sig_wr.wr.wr_id = ISER_FASTREG_LI_WRID;
-	sig_wr.wr.sg_list = &rdma_wr->ib_sg[DATA];
+	sig_wr.wr.wr_cqe = NULL;
+	sig_wr.wr.sg_list = &isert_cmd->ib_sg[DATA];
 	sig_wr.wr.num_sge = 1;
 	sig_wr.wr.num_sge = 1;
 	sig_wr.access_flags = IB_ACCESS_LOCAL_WRITE;
 	sig_wr.access_flags = IB_ACCESS_LOCAL_WRITE;
 	sig_wr.sig_attrs = &sig_attrs;
 	sig_wr.sig_attrs = &sig_attrs;
 	sig_wr.sig_mr = pi_ctx->sig_mr;
 	sig_wr.sig_mr = pi_ctx->sig_mr;
 	if (se_cmd->t_prot_sg)
 	if (se_cmd->t_prot_sg)
-		sig_wr.prot = &rdma_wr->ib_sg[PROT];
+		sig_wr.prot = &isert_cmd->ib_sg[PROT];
 
 
 	if (!wr)
 	if (!wr)
 		wr = &sig_wr.wr;
 		wr = &sig_wr.wr;
@@ -2705,35 +2606,34 @@ isert_reg_sig_mr(struct isert_conn *isert_conn,
 	}
 	}
 	fr_desc->ind &= ~ISERT_SIG_KEY_VALID;
 	fr_desc->ind &= ~ISERT_SIG_KEY_VALID;
 
 
-	rdma_wr->ib_sg[SIG].lkey = pi_ctx->sig_mr->lkey;
-	rdma_wr->ib_sg[SIG].addr = 0;
-	rdma_wr->ib_sg[SIG].length = se_cmd->data_length;
+	isert_cmd->ib_sg[SIG].lkey = pi_ctx->sig_mr->lkey;
+	isert_cmd->ib_sg[SIG].addr = 0;
+	isert_cmd->ib_sg[SIG].length = se_cmd->data_length;
 	if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP &&
 	if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP &&
 	    se_cmd->prot_op != TARGET_PROT_DOUT_INSERT)
 	    se_cmd->prot_op != TARGET_PROT_DOUT_INSERT)
 		/*
 		/*
 		 * We have protection guards on the wire
 		 * We have protection guards on the wire
 		 * so we need to set a larget transfer
 		 * so we need to set a larget transfer
 		 */
 		 */
-		rdma_wr->ib_sg[SIG].length += se_cmd->prot_length;
+		isert_cmd->ib_sg[SIG].length += se_cmd->prot_length;
 
 
 	isert_dbg("sig_sge: addr: 0x%llx  length: %u lkey: %x\n",
 	isert_dbg("sig_sge: addr: 0x%llx  length: %u lkey: %x\n",
-		  rdma_wr->ib_sg[SIG].addr, rdma_wr->ib_sg[SIG].length,
-		  rdma_wr->ib_sg[SIG].lkey);
+		  isert_cmd->ib_sg[SIG].addr, isert_cmd->ib_sg[SIG].length,
+		  isert_cmd->ib_sg[SIG].lkey);
 err:
 err:
 	return ret;
 	return ret;
 }
 }
 
 
 static int
 static int
 isert_handle_prot_cmd(struct isert_conn *isert_conn,
 isert_handle_prot_cmd(struct isert_conn *isert_conn,
-		      struct isert_cmd *isert_cmd,
-		      struct isert_rdma_wr *wr)
+		      struct isert_cmd *isert_cmd)
 {
 {
 	struct isert_device *device = isert_conn->device;
 	struct isert_device *device = isert_conn->device;
 	struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd;
 	struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd;
 	int ret;
 	int ret;
 
 
-	if (!wr->fr_desc->pi_ctx) {
-		ret = isert_create_pi_ctx(wr->fr_desc,
+	if (!isert_cmd->fr_desc->pi_ctx) {
+		ret = isert_create_pi_ctx(isert_cmd->fr_desc,
 					  device->ib_device,
 					  device->ib_device,
 					  device->pd);
 					  device->pd);
 		if (ret) {
 		if (ret) {
@@ -2748,16 +2648,20 @@ isert_handle_prot_cmd(struct isert_conn *isert_conn,
 					 se_cmd->t_prot_sg,
 					 se_cmd->t_prot_sg,
 					 se_cmd->t_prot_nents,
 					 se_cmd->t_prot_nents,
 					 se_cmd->prot_length,
 					 se_cmd->prot_length,
-					 0, wr->iser_ib_op, &wr->prot);
+					 0,
+					 isert_cmd->iser_ib_op,
+					 &isert_cmd->prot);
 		if (ret) {
 		if (ret) {
 			isert_err("conn %p failed to map protection buffer\n",
 			isert_err("conn %p failed to map protection buffer\n",
 				  isert_conn);
 				  isert_conn);
 			return ret;
 			return ret;
 		}
 		}
 
 
-		memset(&wr->ib_sg[PROT], 0, sizeof(wr->ib_sg[PROT]));
-		ret = isert_fast_reg_mr(isert_conn, wr->fr_desc, &wr->prot,
-					ISERT_PROT_KEY_VALID, &wr->ib_sg[PROT]);
+		memset(&isert_cmd->ib_sg[PROT], 0, sizeof(isert_cmd->ib_sg[PROT]));
+		ret = isert_fast_reg_mr(isert_conn, isert_cmd->fr_desc,
+					&isert_cmd->prot,
+					ISERT_PROT_KEY_VALID,
+					&isert_cmd->ib_sg[PROT]);
 		if (ret) {
 		if (ret) {
 			isert_err("conn %p failed to fast reg mr\n",
 			isert_err("conn %p failed to fast reg mr\n",
 				  isert_conn);
 				  isert_conn);
@@ -2765,29 +2669,28 @@ isert_handle_prot_cmd(struct isert_conn *isert_conn,
 		}
 		}
 	}
 	}
 
 
-	ret = isert_reg_sig_mr(isert_conn, se_cmd, wr, wr->fr_desc);
+	ret = isert_reg_sig_mr(isert_conn, isert_cmd, isert_cmd->fr_desc);
 	if (ret) {
 	if (ret) {
 		isert_err("conn %p failed to fast reg mr\n",
 		isert_err("conn %p failed to fast reg mr\n",
 			  isert_conn);
 			  isert_conn);
 		goto unmap_prot_cmd;
 		goto unmap_prot_cmd;
 	}
 	}
-	wr->fr_desc->ind |= ISERT_PROTECTED;
+	isert_cmd->fr_desc->ind |= ISERT_PROTECTED;
 
 
 	return 0;
 	return 0;
 
 
 unmap_prot_cmd:
 unmap_prot_cmd:
 	if (se_cmd->t_prot_sg)
 	if (se_cmd->t_prot_sg)
-		isert_unmap_data_buf(isert_conn, &wr->prot);
+		isert_unmap_data_buf(isert_conn, &isert_cmd->prot);
 
 
 	return ret;
 	return ret;
 }
 }
 
 
 static int
 static int
-isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
-	       struct isert_rdma_wr *wr)
+isert_reg_rdma(struct isert_cmd *isert_cmd, struct iscsi_conn *conn)
 {
 {
+	struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
 	struct se_cmd *se_cmd = &cmd->se_cmd;
 	struct se_cmd *se_cmd = &cmd->se_cmd;
-	struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
 	struct isert_conn *isert_conn = conn->context;
 	struct isert_conn *isert_conn = conn->context;
 	struct fast_reg_descriptor *fr_desc = NULL;
 	struct fast_reg_descriptor *fr_desc = NULL;
 	struct ib_rdma_wr *rdma_wr;
 	struct ib_rdma_wr *rdma_wr;
@@ -2796,57 +2699,61 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 	int ret = 0;
 	int ret = 0;
 	unsigned long flags;
 	unsigned long flags;
 
 
-	isert_cmd->tx_desc.isert_cmd = isert_cmd;
-
-	offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
+	offset = isert_cmd->iser_ib_op == ISER_IB_RDMA_READ ?
+			cmd->write_data_done : 0;
 	ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
 	ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
 				 se_cmd->t_data_nents, se_cmd->data_length,
 				 se_cmd->t_data_nents, se_cmd->data_length,
-				 offset, wr->iser_ib_op, &wr->data);
+				 offset, isert_cmd->iser_ib_op,
+				 &isert_cmd->data);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
-	if (wr->data.dma_nents != 1 || isert_prot_cmd(isert_conn, se_cmd)) {
+	if (isert_cmd->data.dma_nents != 1 ||
+	    isert_prot_cmd(isert_conn, se_cmd)) {
 		spin_lock_irqsave(&isert_conn->pool_lock, flags);
 		spin_lock_irqsave(&isert_conn->pool_lock, flags);
 		fr_desc = list_first_entry(&isert_conn->fr_pool,
 		fr_desc = list_first_entry(&isert_conn->fr_pool,
 					   struct fast_reg_descriptor, list);
 					   struct fast_reg_descriptor, list);
 		list_del(&fr_desc->list);
 		list_del(&fr_desc->list);
 		spin_unlock_irqrestore(&isert_conn->pool_lock, flags);
 		spin_unlock_irqrestore(&isert_conn->pool_lock, flags);
-		wr->fr_desc = fr_desc;
+		isert_cmd->fr_desc = fr_desc;
 	}
 	}
 
 
-	ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data,
-				ISERT_DATA_KEY_VALID, &wr->ib_sg[DATA]);
+	ret = isert_fast_reg_mr(isert_conn, fr_desc, &isert_cmd->data,
+				ISERT_DATA_KEY_VALID, &isert_cmd->ib_sg[DATA]);
 	if (ret)
 	if (ret)
 		goto unmap_cmd;
 		goto unmap_cmd;
 
 
 	if (isert_prot_cmd(isert_conn, se_cmd)) {
 	if (isert_prot_cmd(isert_conn, se_cmd)) {
-		ret = isert_handle_prot_cmd(isert_conn, isert_cmd, wr);
+		ret = isert_handle_prot_cmd(isert_conn, isert_cmd);
 		if (ret)
 		if (ret)
 			goto unmap_cmd;
 			goto unmap_cmd;
 
 
-		ib_sg = &wr->ib_sg[SIG];
+		ib_sg = &isert_cmd->ib_sg[SIG];
 	} else {
 	} else {
-		ib_sg = &wr->ib_sg[DATA];
+		ib_sg = &isert_cmd->ib_sg[DATA];
 	}
 	}
 
 
-	memcpy(&wr->s_ib_sge, ib_sg, sizeof(*ib_sg));
-	wr->ib_sge = &wr->s_ib_sge;
-	wr->rdma_wr_num = 1;
-	memset(&wr->s_rdma_wr, 0, sizeof(wr->s_rdma_wr));
-	wr->rdma_wr = &wr->s_rdma_wr;
-	wr->isert_cmd = isert_cmd;
+	memcpy(&isert_cmd->s_ib_sge, ib_sg, sizeof(*ib_sg));
+	isert_cmd->ib_sge = &isert_cmd->s_ib_sge;
+	isert_cmd->rdma_wr_num = 1;
+	memset(&isert_cmd->s_rdma_wr, 0, sizeof(isert_cmd->s_rdma_wr));
+	isert_cmd->rdma_wr = &isert_cmd->s_rdma_wr;
 
 
-	rdma_wr = &isert_cmd->rdma_wr.s_rdma_wr;
-	rdma_wr->wr.sg_list = &wr->s_ib_sge;
+	rdma_wr = &isert_cmd->s_rdma_wr;
+	rdma_wr->wr.sg_list = &isert_cmd->s_ib_sge;
 	rdma_wr->wr.num_sge = 1;
 	rdma_wr->wr.num_sge = 1;
-	rdma_wr->wr.wr_id = (uintptr_t)&isert_cmd->tx_desc;
-	if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
+	rdma_wr->wr.wr_cqe = &isert_cmd->tx_desc.tx_cqe;
+	if (isert_cmd->iser_ib_op == ISER_IB_RDMA_WRITE) {
+		isert_cmd->tx_desc.tx_cqe.done = isert_rdma_write_done;
+
 		rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
 		rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
 		rdma_wr->remote_addr = isert_cmd->read_va;
 		rdma_wr->remote_addr = isert_cmd->read_va;
 		rdma_wr->rkey = isert_cmd->read_stag;
 		rdma_wr->rkey = isert_cmd->read_stag;
 		rdma_wr->wr.send_flags = !isert_prot_cmd(isert_conn, se_cmd) ?
 		rdma_wr->wr.send_flags = !isert_prot_cmd(isert_conn, se_cmd) ?
 				      0 : IB_SEND_SIGNALED;
 				      0 : IB_SEND_SIGNALED;
 	} else {
 	} else {
+		isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done;
+
 		rdma_wr->wr.opcode = IB_WR_RDMA_READ;
 		rdma_wr->wr.opcode = IB_WR_RDMA_READ;
 		rdma_wr->remote_addr = isert_cmd->write_va;
 		rdma_wr->remote_addr = isert_cmd->write_va;
 		rdma_wr->rkey = isert_cmd->write_stag;
 		rdma_wr->rkey = isert_cmd->write_stag;
@@ -2861,7 +2768,7 @@ unmap_cmd:
 		list_add_tail(&fr_desc->list, &isert_conn->fr_pool);
 		list_add_tail(&fr_desc->list, &isert_conn->fr_pool);
 		spin_unlock_irqrestore(&isert_conn->pool_lock, flags);
 		spin_unlock_irqrestore(&isert_conn->pool_lock, flags);
 	}
 	}
-	isert_unmap_data_buf(isert_conn, &wr->data);
+	isert_unmap_data_buf(isert_conn, &isert_cmd->data);
 
 
 	return ret;
 	return ret;
 }
 }
@@ -2871,7 +2778,6 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
 {
 {
 	struct se_cmd *se_cmd = &cmd->se_cmd;
 	struct se_cmd *se_cmd = &cmd->se_cmd;
 	struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
 	struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
-	struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
 	struct isert_conn *isert_conn = conn->context;
 	struct isert_conn *isert_conn = conn->context;
 	struct isert_device *device = isert_conn->device;
 	struct isert_device *device = isert_conn->device;
 	struct ib_send_wr *wr_failed;
 	struct ib_send_wr *wr_failed;
@@ -2880,8 +2786,8 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
 	isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n",
 	isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n",
 		 isert_cmd, se_cmd->data_length);
 		 isert_cmd, se_cmd->data_length);
 
 
-	wr->iser_ib_op = ISER_IB_RDMA_WRITE;
-	rc = device->reg_rdma_mem(conn, cmd, wr);
+	isert_cmd->iser_ib_op = ISER_IB_RDMA_WRITE;
+	rc = device->reg_rdma_mem(isert_cmd, conn);
 	if (rc) {
 	if (rc) {
 		isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
 		isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
 		return rc;
 		return rc;
@@ -2898,8 +2804,8 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
 		isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
 		isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
 		isert_init_send_wr(isert_conn, isert_cmd,
 		isert_init_send_wr(isert_conn, isert_cmd,
 				   &isert_cmd->tx_desc.send_wr);
 				   &isert_cmd->tx_desc.send_wr);
-		isert_cmd->rdma_wr.s_rdma_wr.wr.next = &isert_cmd->tx_desc.send_wr;
-		wr->rdma_wr_num += 1;
+		isert_cmd->s_rdma_wr.wr.next = &isert_cmd->tx_desc.send_wr;
+		isert_cmd->rdma_wr_num += 1;
 
 
 		rc = isert_post_recv(isert_conn, isert_cmd->rx_desc);
 		rc = isert_post_recv(isert_conn, isert_cmd->rx_desc);
 		if (rc) {
 		if (rc) {
@@ -2908,7 +2814,7 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
 		}
 		}
 	}
 	}
 
 
-	rc = ib_post_send(isert_conn->qp, &wr->rdma_wr->wr, &wr_failed);
+	rc = ib_post_send(isert_conn->qp, &isert_cmd->rdma_wr->wr, &wr_failed);
 	if (rc)
 	if (rc)
 		isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
 		isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
 
 
@@ -2927,7 +2833,6 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
 {
 {
 	struct se_cmd *se_cmd = &cmd->se_cmd;
 	struct se_cmd *se_cmd = &cmd->se_cmd;
 	struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
 	struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
-	struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
 	struct isert_conn *isert_conn = conn->context;
 	struct isert_conn *isert_conn = conn->context;
 	struct isert_device *device = isert_conn->device;
 	struct isert_device *device = isert_conn->device;
 	struct ib_send_wr *wr_failed;
 	struct ib_send_wr *wr_failed;
@@ -2935,14 +2840,14 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
 
 
 	isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
 	isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
 		 isert_cmd, se_cmd->data_length, cmd->write_data_done);
 		 isert_cmd, se_cmd->data_length, cmd->write_data_done);
-	wr->iser_ib_op = ISER_IB_RDMA_READ;
-	rc = device->reg_rdma_mem(conn, cmd, wr);
+	isert_cmd->iser_ib_op = ISER_IB_RDMA_READ;
+	rc = device->reg_rdma_mem(isert_cmd, conn);
 	if (rc) {
 	if (rc) {
 		isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
 		isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
 		return rc;
 		return rc;
 	}
 	}
 
 
-	rc = ib_post_send(isert_conn->qp, &wr->rdma_wr->wr, &wr_failed);
+	rc = ib_post_send(isert_conn->qp, &isert_cmd->rdma_wr->wr, &wr_failed);
 	if (rc)
 	if (rc)
 		isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
 		isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
 
 
@@ -3214,6 +3119,7 @@ accept_wait:
 
 
 	conn->context = isert_conn;
 	conn->context = isert_conn;
 	isert_conn->conn = conn;
 	isert_conn->conn = conn;
+	isert_conn->state = ISER_CONN_BOUND;
 
 
 	isert_set_conn_info(np, conn, isert_conn);
 	isert_set_conn_info(np, conn, isert_conn);
 
 
@@ -3274,8 +3180,6 @@ static void isert_release_work(struct work_struct *work)
 
 
 	isert_info("Starting release conn %p\n", isert_conn);
 	isert_info("Starting release conn %p\n", isert_conn);
 
 
-	wait_for_completion(&isert_conn->wait);
-
 	mutex_lock(&isert_conn->mutex);
 	mutex_lock(&isert_conn->mutex);
 	isert_conn->state = ISER_CONN_DOWN;
 	isert_conn->state = ISER_CONN_DOWN;
 	mutex_unlock(&isert_conn->mutex);
 	mutex_unlock(&isert_conn->mutex);
@@ -3309,15 +3213,27 @@ isert_wait4cmds(struct iscsi_conn *conn)
 	}
 	}
 }
 }
 
 
+static void
+isert_beacon_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+	struct isert_conn *isert_conn = wc->qp->qp_context;
+
+	isert_print_wc(wc, "beacon");
+
+	isert_info("conn %p completing wait_comp_err\n", isert_conn);
+	complete(&isert_conn->wait_comp_err);
+}
+
 static void
 static void
 isert_wait4flush(struct isert_conn *isert_conn)
 isert_wait4flush(struct isert_conn *isert_conn)
 {
 {
 	struct ib_recv_wr *bad_wr;
 	struct ib_recv_wr *bad_wr;
+	static struct ib_cqe cqe = { .done = isert_beacon_done };
 
 
 	isert_info("conn %p\n", isert_conn);
 	isert_info("conn %p\n", isert_conn);
 
 
 	init_completion(&isert_conn->wait_comp_err);
 	init_completion(&isert_conn->wait_comp_err);
-	isert_conn->beacon.wr_id = ISER_BEACON_WRID;
+	isert_conn->beacon.wr_cqe = &cqe;
 	/* post an indication that all flush errors were consumed */
 	/* post an indication that all flush errors were consumed */
 	if (ib_post_recv(isert_conn->qp, &isert_conn->beacon, &bad_wr)) {
 	if (ib_post_recv(isert_conn->qp, &isert_conn->beacon, &bad_wr)) {
 		isert_err("conn %p failed to post beacon", isert_conn);
 		isert_err("conn %p failed to post beacon", isert_conn);
@@ -3369,14 +3285,6 @@ static void isert_wait_conn(struct iscsi_conn *conn)
 	isert_info("Starting conn %p\n", isert_conn);
 	isert_info("Starting conn %p\n", isert_conn);
 
 
 	mutex_lock(&isert_conn->mutex);
 	mutex_lock(&isert_conn->mutex);
-	/*
-	 * Only wait for wait_comp_err if the isert_conn made it
-	 * into full feature phase..
-	 */
-	if (isert_conn->state == ISER_CONN_INIT) {
-		mutex_unlock(&isert_conn->mutex);
-		return;
-	}
 	isert_conn_terminate(isert_conn);
 	isert_conn_terminate(isert_conn);
 	mutex_unlock(&isert_conn->mutex);
 	mutex_unlock(&isert_conn->mutex);
 
 

+ 37 - 35
drivers/infiniband/ulp/isert/ib_isert.h

@@ -36,9 +36,7 @@
 /* Constant PDU lengths calculations */
 /* Constant PDU lengths calculations */
 #define ISER_HEADERS_LEN	(sizeof(struct iser_ctrl) + \
 #define ISER_HEADERS_LEN	(sizeof(struct iser_ctrl) + \
 				 sizeof(struct iscsi_hdr))
 				 sizeof(struct iscsi_hdr))
-#define ISER_RECV_DATA_SEG_LEN	8192
-#define ISER_RX_PAYLOAD_SIZE	(ISER_HEADERS_LEN + ISER_RECV_DATA_SEG_LEN)
-#define ISER_RX_LOGIN_SIZE	(ISER_HEADERS_LEN + ISCSI_DEF_MAX_RECV_SEG_LEN)
+#define ISER_RX_PAYLOAD_SIZE	(ISER_HEADERS_LEN + ISCSI_DEF_MAX_RECV_SEG_LEN)
 
 
 /* QP settings */
 /* QP settings */
 /* Maximal bounds on received asynchronous PDUs */
 /* Maximal bounds on received asynchronous PDUs */
@@ -62,12 +60,11 @@
 				ISERT_MAX_TX_MISC_PDUS	+ \
 				ISERT_MAX_TX_MISC_PDUS	+ \
 				ISERT_MAX_RX_MISC_PDUS)
 				ISERT_MAX_RX_MISC_PDUS)
 
 
-#define ISER_RX_PAD_SIZE	(ISER_RECV_DATA_SEG_LEN + 4096 - \
-		(ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge)))
+#define ISER_RX_PAD_SIZE	(ISCSI_DEF_MAX_RECV_SEG_LEN + 4096 - \
+		(ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge) + \
+		 sizeof(struct ib_cqe)))
 
 
 #define ISCSI_ISER_SG_TABLESIZE		256
 #define ISCSI_ISER_SG_TABLESIZE		256
-#define ISER_FASTREG_LI_WRID		0xffffffffffffffffULL
-#define ISER_BEACON_WRID               0xfffffffffffffffeULL
 
 
 enum isert_desc_type {
 enum isert_desc_type {
 	ISCSI_TX_CONTROL,
 	ISCSI_TX_CONTROL,
@@ -84,6 +81,7 @@ enum iser_ib_op_code {
 enum iser_conn_state {
 enum iser_conn_state {
 	ISER_CONN_INIT,
 	ISER_CONN_INIT,
 	ISER_CONN_UP,
 	ISER_CONN_UP,
+	ISER_CONN_BOUND,
 	ISER_CONN_FULL_FEATURE,
 	ISER_CONN_FULL_FEATURE,
 	ISER_CONN_TERMINATING,
 	ISER_CONN_TERMINATING,
 	ISER_CONN_DOWN,
 	ISER_CONN_DOWN,
@@ -92,23 +90,35 @@ enum iser_conn_state {
 struct iser_rx_desc {
 struct iser_rx_desc {
 	struct iser_ctrl iser_header;
 	struct iser_ctrl iser_header;
 	struct iscsi_hdr iscsi_header;
 	struct iscsi_hdr iscsi_header;
-	char		data[ISER_RECV_DATA_SEG_LEN];
+	char		data[ISCSI_DEF_MAX_RECV_SEG_LEN];
 	u64		dma_addr;
 	u64		dma_addr;
 	struct ib_sge	rx_sg;
 	struct ib_sge	rx_sg;
+	struct ib_cqe	rx_cqe;
 	char		pad[ISER_RX_PAD_SIZE];
 	char		pad[ISER_RX_PAD_SIZE];
 } __packed;
 } __packed;
 
 
+static inline struct iser_rx_desc *cqe_to_rx_desc(struct ib_cqe *cqe)
+{
+	return container_of(cqe, struct iser_rx_desc, rx_cqe);
+}
+
 struct iser_tx_desc {
 struct iser_tx_desc {
 	struct iser_ctrl iser_header;
 	struct iser_ctrl iser_header;
 	struct iscsi_hdr iscsi_header;
 	struct iscsi_hdr iscsi_header;
 	enum isert_desc_type type;
 	enum isert_desc_type type;
 	u64		dma_addr;
 	u64		dma_addr;
 	struct ib_sge	tx_sg[2];
 	struct ib_sge	tx_sg[2];
+	struct ib_cqe	tx_cqe;
 	int		num_sge;
 	int		num_sge;
-	struct isert_cmd *isert_cmd;
 	struct ib_send_wr send_wr;
 	struct ib_send_wr send_wr;
 } __packed;
 } __packed;
 
 
+static inline struct iser_tx_desc *cqe_to_tx_desc(struct ib_cqe *cqe)
+{
+	return container_of(cqe, struct iser_tx_desc, tx_cqe);
+}
+
+
 enum isert_indicator {
 enum isert_indicator {
 	ISERT_PROTECTED		= 1 << 0,
 	ISERT_PROTECTED		= 1 << 0,
 	ISERT_DATA_KEY_VALID	= 1 << 1,
 	ISERT_DATA_KEY_VALID	= 1 << 1,
@@ -144,20 +154,6 @@ enum {
 	SIG = 2,
 	SIG = 2,
 };
 };
 
 
-struct isert_rdma_wr {
-	struct isert_cmd	*isert_cmd;
-	enum iser_ib_op_code	iser_ib_op;
-	struct ib_sge		*ib_sge;
-	struct ib_sge		s_ib_sge;
-	int			rdma_wr_num;
-	struct ib_rdma_wr	*rdma_wr;
-	struct ib_rdma_wr	s_rdma_wr;
-	struct ib_sge		ib_sg[3];
-	struct isert_data_buf	data;
-	struct isert_data_buf	prot;
-	struct fast_reg_descriptor *fr_desc;
-};
-
 struct isert_cmd {
 struct isert_cmd {
 	uint32_t		read_stag;
 	uint32_t		read_stag;
 	uint32_t		write_stag;
 	uint32_t		write_stag;
@@ -170,22 +166,34 @@ struct isert_cmd {
 	struct iscsi_cmd	*iscsi_cmd;
 	struct iscsi_cmd	*iscsi_cmd;
 	struct iser_tx_desc	tx_desc;
 	struct iser_tx_desc	tx_desc;
 	struct iser_rx_desc	*rx_desc;
 	struct iser_rx_desc	*rx_desc;
-	struct isert_rdma_wr	rdma_wr;
+	enum iser_ib_op_code	iser_ib_op;
+	struct ib_sge		*ib_sge;
+	struct ib_sge		s_ib_sge;
+	int			rdma_wr_num;
+	struct ib_rdma_wr	*rdma_wr;
+	struct ib_rdma_wr	s_rdma_wr;
+	struct ib_sge		ib_sg[3];
+	struct isert_data_buf	data;
+	struct isert_data_buf	prot;
+	struct fast_reg_descriptor *fr_desc;
 	struct work_struct	comp_work;
 	struct work_struct	comp_work;
 	struct scatterlist	sg;
 	struct scatterlist	sg;
 };
 };
 
 
+static inline struct isert_cmd *tx_desc_to_cmd(struct iser_tx_desc *desc)
+{
+	return container_of(desc, struct isert_cmd, tx_desc);
+}
+
 struct isert_device;
 struct isert_device;
 
 
 struct isert_conn {
 struct isert_conn {
 	enum iser_conn_state	state;
 	enum iser_conn_state	state;
-	int			post_recv_buf_count;
 	u32			responder_resources;
 	u32			responder_resources;
 	u32			initiator_depth;
 	u32			initiator_depth;
 	bool			pi_support;
 	bool			pi_support;
 	u32			max_sge;
 	u32			max_sge;
-	char			*login_buf;
-	char			*login_req_buf;
+	struct iser_rx_desc	*login_req_buf;
 	char			*login_rsp_buf;
 	char			*login_rsp_buf;
 	u64			login_req_dma;
 	u64			login_req_dma;
 	int			login_req_len;
 	int			login_req_len;
@@ -201,7 +209,6 @@ struct isert_conn {
 	struct ib_qp		*qp;
 	struct ib_qp		*qp;
 	struct isert_device	*device;
 	struct isert_device	*device;
 	struct mutex		mutex;
 	struct mutex		mutex;
-	struct completion	wait;
 	struct completion	wait_comp_err;
 	struct completion	wait_comp_err;
 	struct kref		kref;
 	struct kref		kref;
 	struct list_head	fr_pool;
 	struct list_head	fr_pool;
@@ -221,17 +228,13 @@ struct isert_conn {
  *
  *
  * @device:     pointer to device handle
  * @device:     pointer to device handle
  * @cq:         completion queue
  * @cq:         completion queue
- * @wcs:        work completion array
  * @active_qps: Number of active QPs attached
  * @active_qps: Number of active QPs attached
  *              to completion context
  *              to completion context
- * @work:       completion work handle
  */
  */
 struct isert_comp {
 struct isert_comp {
 	struct isert_device     *device;
 	struct isert_device     *device;
 	struct ib_cq		*cq;
 	struct ib_cq		*cq;
-	struct ib_wc		 wcs[16];
 	int                      active_qps;
 	int                      active_qps;
-	struct work_struct	 work;
 };
 };
 
 
 struct isert_device {
 struct isert_device {
@@ -243,9 +246,8 @@ struct isert_device {
 	struct isert_comp	*comps;
 	struct isert_comp	*comps;
 	int                     comps_used;
 	int                     comps_used;
 	struct list_head	dev_node;
 	struct list_head	dev_node;
-	int			(*reg_rdma_mem)(struct iscsi_conn *conn,
-						    struct iscsi_cmd *cmd,
-						    struct isert_rdma_wr *wr);
+	int			(*reg_rdma_mem)(struct isert_cmd *isert_cmd,
+						struct iscsi_conn *conn);
 	void			(*unreg_rdma_mem)(struct isert_cmd *isert_cmd,
 	void			(*unreg_rdma_mem)(struct isert_cmd *isert_cmd,
 						  struct isert_conn *isert_conn);
 						  struct isert_conn *isert_conn);
 };
 };

+ 22 - 54
drivers/infiniband/ulp/srpt/ib_srpt.c

@@ -1264,40 +1264,26 @@ free_mem:
  */
  */
 static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
 static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
 {
 {
+	struct se_session *se_sess;
 	struct srpt_send_ioctx *ioctx;
 	struct srpt_send_ioctx *ioctx;
-	unsigned long flags;
+	int tag;
 
 
 	BUG_ON(!ch);
 	BUG_ON(!ch);
+	se_sess = ch->sess;
 
 
-	ioctx = NULL;
-	spin_lock_irqsave(&ch->spinlock, flags);
-	if (!list_empty(&ch->free_list)) {
-		ioctx = list_first_entry(&ch->free_list,
-					 struct srpt_send_ioctx, free_list);
-		list_del(&ioctx->free_list);
+	tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
+	if (tag < 0) {
+		pr_err("Unable to obtain tag for srpt_send_ioctx\n");
+		return NULL;
 	}
 	}
-	spin_unlock_irqrestore(&ch->spinlock, flags);
-
-	if (!ioctx)
-		return ioctx;
-
-	BUG_ON(ioctx->ch != ch);
+	ioctx = &((struct srpt_send_ioctx *)se_sess->sess_cmd_map)[tag];
+	memset(ioctx, 0, sizeof(struct srpt_send_ioctx));
+	ioctx->ch = ch;
 	spin_lock_init(&ioctx->spinlock);
 	spin_lock_init(&ioctx->spinlock);
 	ioctx->state = SRPT_STATE_NEW;
 	ioctx->state = SRPT_STATE_NEW;
-	ioctx->n_rbuf = 0;
-	ioctx->rbufs = NULL;
-	ioctx->n_rdma = 0;
-	ioctx->n_rdma_wrs = 0;
-	ioctx->rdma_wrs = NULL;
-	ioctx->mapped_sg_count = 0;
 	init_completion(&ioctx->tx_done);
 	init_completion(&ioctx->tx_done);
-	ioctx->queue_status_only = false;
-	/*
-	 * transport_init_se_cmd() does not initialize all fields, so do it
-	 * here.
-	 */
-	memset(&ioctx->cmd, 0, sizeof(ioctx->cmd));
-	memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data));
+
+	ioctx->cmd.map_tag = tag;
 
 
 	return ioctx;
 	return ioctx;
 }
 }
@@ -2034,9 +2020,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
 	struct srp_login_rej *rej;
 	struct srp_login_rej *rej;
 	struct ib_cm_rep_param *rep_param;
 	struct ib_cm_rep_param *rep_param;
 	struct srpt_rdma_ch *ch, *tmp_ch;
 	struct srpt_rdma_ch *ch, *tmp_ch;
-	struct se_node_acl *se_acl;
 	u32 it_iu_len;
 	u32 it_iu_len;
-	int i, ret = 0;
+	int ret = 0;
 	unsigned char *p;
 	unsigned char *p;
 
 
 	WARN_ON_ONCE(irqs_disabled());
 	WARN_ON_ONCE(irqs_disabled());
@@ -2158,12 +2143,6 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
 	if (!ch->ioctx_ring)
 	if (!ch->ioctx_ring)
 		goto free_ch;
 		goto free_ch;
 
 
-	INIT_LIST_HEAD(&ch->free_list);
-	for (i = 0; i < ch->rq_size; i++) {
-		ch->ioctx_ring[i]->ch = ch;
-		list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list);
-	}
-
 	ret = srpt_create_ch_ib(ch);
 	ret = srpt_create_ch_ib(ch);
 	if (ret) {
 	if (ret) {
 		rej->reason = cpu_to_be32(
 		rej->reason = cpu_to_be32(
@@ -2193,19 +2172,13 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
 	pr_debug("registering session %s\n", ch->sess_name);
 	pr_debug("registering session %s\n", ch->sess_name);
 	p = &ch->sess_name[0];
 	p = &ch->sess_name[0];
 
 
-	ch->sess = transport_init_session(TARGET_PROT_NORMAL);
-	if (IS_ERR(ch->sess)) {
-		rej->reason = cpu_to_be32(
-				SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
-		pr_debug("Failed to create session\n");
-		goto destroy_ib;
-	}
-
 try_again:
 try_again:
-	se_acl = core_tpg_get_initiator_node_acl(&sport->port_tpg_1, p);
-	if (!se_acl) {
+	ch->sess = target_alloc_session(&sport->port_tpg_1, ch->rq_size,
+					sizeof(struct srpt_send_ioctx),
+					TARGET_PROT_NORMAL, p, ch, NULL);
+	if (IS_ERR(ch->sess)) {
 		pr_info("Rejected login because no ACL has been"
 		pr_info("Rejected login because no ACL has been"
-			" configured yet for initiator %s.\n", ch->sess_name);
+			" configured yet for initiator %s.\n", p);
 		/*
 		/*
 		 * XXX: Hack to retry of ch->i_port_id without leading '0x'
 		 * XXX: Hack to retry of ch->i_port_id without leading '0x'
 		 */
 		 */
@@ -2213,14 +2186,11 @@ try_again:
 			p += 2;
 			p += 2;
 			goto try_again;
 			goto try_again;
 		}
 		}
-		rej->reason = cpu_to_be32(
+		rej->reason = cpu_to_be32((PTR_ERR(ch->sess) == -ENOMEM) ?
+				SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES :
 				SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
 				SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
-		transport_free_session(ch->sess);
 		goto destroy_ib;
 		goto destroy_ib;
 	}
 	}
-	ch->sess->se_node_acl = se_acl;
-
-	transport_register_session(&sport->port_tpg_1, se_acl, ch->sess, ch);
 
 
 	pr_debug("Establish connection sess=%p name=%s cm_id=%p\n", ch->sess,
 	pr_debug("Establish connection sess=%p name=%s cm_id=%p\n", ch->sess,
 		 ch->sess_name, ch->cm_id);
 		 ch->sess_name, ch->cm_id);
@@ -2911,7 +2881,7 @@ static void srpt_release_cmd(struct se_cmd *se_cmd)
 	struct srpt_send_ioctx *ioctx = container_of(se_cmd,
 	struct srpt_send_ioctx *ioctx = container_of(se_cmd,
 				struct srpt_send_ioctx, cmd);
 				struct srpt_send_ioctx, cmd);
 	struct srpt_rdma_ch *ch = ioctx->ch;
 	struct srpt_rdma_ch *ch = ioctx->ch;
-	unsigned long flags;
+	struct se_session *se_sess = ch->sess;
 
 
 	WARN_ON(ioctx->state != SRPT_STATE_DONE);
 	WARN_ON(ioctx->state != SRPT_STATE_DONE);
 	WARN_ON(ioctx->mapped_sg_count != 0);
 	WARN_ON(ioctx->mapped_sg_count != 0);
@@ -2922,9 +2892,7 @@ static void srpt_release_cmd(struct se_cmd *se_cmd)
 		ioctx->n_rbuf = 0;
 		ioctx->n_rbuf = 0;
 	}
 	}
 
 
-	spin_lock_irqsave(&ch->spinlock, flags);
-	list_add(&ioctx->free_list, &ch->free_list);
-	spin_unlock_irqrestore(&ch->spinlock, flags);
+	percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
 }
 }
 
 
 /**
 /**

+ 0 - 2
drivers/infiniband/ulp/srpt/ib_srpt.h

@@ -179,7 +179,6 @@ struct srpt_recv_ioctx {
  * struct srpt_send_ioctx - SRPT send I/O context.
  * struct srpt_send_ioctx - SRPT send I/O context.
  * @ioctx:       See above.
  * @ioctx:       See above.
  * @ch:          Channel pointer.
  * @ch:          Channel pointer.
- * @free_list:   Node in srpt_rdma_ch.free_list.
  * @n_rbuf:      Number of data buffers in the received SRP command.
  * @n_rbuf:      Number of data buffers in the received SRP command.
  * @rbufs:       Pointer to SRP data buffer array.
  * @rbufs:       Pointer to SRP data buffer array.
  * @single_rbuf: SRP data buffer if the command has only a single buffer.
  * @single_rbuf: SRP data buffer if the command has only a single buffer.
@@ -202,7 +201,6 @@ struct srpt_send_ioctx {
 	struct srp_direct_buf	*rbufs;
 	struct srp_direct_buf	*rbufs;
 	struct srp_direct_buf	single_rbuf;
 	struct srp_direct_buf	single_rbuf;
 	struct scatterlist	*sg;
 	struct scatterlist	*sg;
-	struct list_head	free_list;
 	spinlock_t		spinlock;
 	spinlock_t		spinlock;
 	enum srpt_command_state	state;
 	enum srpt_command_state	state;
 	struct se_cmd		cmd;
 	struct se_cmd		cmd;

+ 1 - 0
drivers/scsi/qla2xxx/qla_def.h

@@ -2963,6 +2963,7 @@ struct qlt_hw_data {
 
 
 	uint8_t tgt_node_name[WWN_SIZE];
 	uint8_t tgt_node_name[WWN_SIZE];
 
 
+	struct dentry *dfs_tgt_sess;
 	struct list_head q_full_list;
 	struct list_head q_full_list;
 	uint32_t num_pend_cmds;
 	uint32_t num_pend_cmds;
 	uint32_t num_qfull_cmds_alloc;
 	uint32_t num_qfull_cmds_alloc;

+ 55 - 0
drivers/scsi/qla2xxx/qla_dfs.c

@@ -12,6 +12,47 @@
 static struct dentry *qla2x00_dfs_root;
 static struct dentry *qla2x00_dfs_root;
 static atomic_t qla2x00_dfs_root_count;
 static atomic_t qla2x00_dfs_root_count;
 
 
+static int
+qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused)
+{
+	scsi_qla_host_t *vha = s->private;
+	struct qla_hw_data *ha = vha->hw;
+	unsigned long flags;
+	struct qla_tgt_sess *sess = NULL;
+	struct qla_tgt *tgt= vha->vha_tgt.qla_tgt;
+
+	seq_printf(s, "%s\n",vha->host_str);
+	if (tgt) {
+		seq_printf(s, "Port ID   Port Name                Handle\n");
+
+		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+		list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
+			seq_printf(s, "%02x:%02x:%02x  %8phC  %d\n",
+					   sess->s_id.b.domain,sess->s_id.b.area,
+					   sess->s_id.b.al_pa,	sess->port_name,
+					   sess->loop_id);
+		}
+		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+	}
+
+	return 0;
+}
+
+static int
+qla2x00_dfs_tgt_sess_open(struct inode *inode, struct file *file)
+{
+	scsi_qla_host_t *vha = inode->i_private;
+	return single_open(file, qla2x00_dfs_tgt_sess_show, vha);
+}
+
+
+static const struct file_operations dfs_tgt_sess_ops = {
+	.open		= qla2x00_dfs_tgt_sess_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
 static int
 static int
 qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
 qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
 {
 {
@@ -248,6 +289,15 @@ create_nodes:
 		    "Unable to create debugfs fce node.\n");
 		    "Unable to create debugfs fce node.\n");
 		goto out;
 		goto out;
 	}
 	}
+
+	ha->tgt.dfs_tgt_sess = debugfs_create_file("tgt_sess",
+		S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_sess_ops);
+	if (!ha->tgt.dfs_tgt_sess) {
+		ql_log(ql_log_warn, vha, 0xffff,
+			"Unable to create debugFS tgt_sess node.\n");
+		goto out;
+	}
+
 out:
 out:
 	return 0;
 	return 0;
 }
 }
@@ -257,6 +307,11 @@ qla2x00_dfs_remove(scsi_qla_host_t *vha)
 {
 {
 	struct qla_hw_data *ha = vha->hw;
 	struct qla_hw_data *ha = vha->hw;
 
 
+	if (ha->tgt.dfs_tgt_sess) {
+		debugfs_remove(ha->tgt.dfs_tgt_sess);
+		ha->tgt.dfs_tgt_sess = NULL;
+	}
+
 	if (ha->dfs_fw_resource_cnt) {
 	if (ha->dfs_fw_resource_cnt) {
 		debugfs_remove(ha->dfs_fw_resource_cnt);
 		debugfs_remove(ha->dfs_fw_resource_cnt);
 		ha->dfs_fw_resource_cnt = NULL;
 		ha->dfs_fw_resource_cnt = NULL;

+ 33 - 24
drivers/scsi/qla2xxx/qla_target.c

@@ -641,7 +641,8 @@ void qlt_unreg_sess(struct qla_tgt_sess *sess)
 {
 {
 	struct scsi_qla_host *vha = sess->vha;
 	struct scsi_qla_host *vha = sess->vha;
 
 
-	vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
+	if (sess->se_sess)
+		vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
 
 
 	if (!list_empty(&sess->del_list_entry))
 	if (!list_empty(&sess->del_list_entry))
 		list_del_init(&sess->del_list_entry);
 		list_del_init(&sess->del_list_entry);
@@ -856,8 +857,12 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
 			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
 			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
 			    "Timeout: sess %p about to be deleted\n",
 			    "Timeout: sess %p about to be deleted\n",
 			    sess);
 			    sess);
-			ha->tgt.tgt_ops->shutdown_sess(sess);
-			ha->tgt.tgt_ops->put_sess(sess);
+			if (sess->se_sess) {
+				ha->tgt.tgt_ops->shutdown_sess(sess);
+				ha->tgt.tgt_ops->put_sess(sess);
+			} else {
+				qlt_unreg_sess(sess);
+			}
 		} else {
 		} else {
 			schedule_delayed_work(&tgt->sess_del_work,
 			schedule_delayed_work(&tgt->sess_del_work,
 			    sess->expires - elapsed);
 			    sess->expires - elapsed);
@@ -879,7 +884,6 @@ static struct qla_tgt_sess *qlt_create_sess(
 	struct qla_hw_data *ha = vha->hw;
 	struct qla_hw_data *ha = vha->hw;
 	struct qla_tgt_sess *sess;
 	struct qla_tgt_sess *sess;
 	unsigned long flags;
 	unsigned long flags;
-	unsigned char be_sid[3];
 
 
 	/* Check to avoid double sessions */
 	/* Check to avoid double sessions */
 	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
 	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
@@ -905,6 +909,14 @@ static struct qla_tgt_sess *qlt_create_sess(
 			if (sess->deleted)
 			if (sess->deleted)
 				qlt_undelete_sess(sess);
 				qlt_undelete_sess(sess);
 
 
+			if (!sess->se_sess) {
+				if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
+				    &sess->port_name[0], sess) < 0) {
+					spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+					return NULL;
+				}
+			}
+
 			kref_get(&sess->se_sess->sess_kref);
 			kref_get(&sess->se_sess->sess_kref);
 			ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
 			ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
 						(fcport->flags & FCF_CONF_COMP_SUPPORTED));
 						(fcport->flags & FCF_CONF_COMP_SUPPORTED));
@@ -948,26 +960,6 @@ static struct qla_tgt_sess *qlt_create_sess(
 	    "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
 	    "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
 	    sess, vha->vha_tgt.qla_tgt);
 	    sess, vha->vha_tgt.qla_tgt);
 
 
-	be_sid[0] = sess->s_id.b.domain;
-	be_sid[1] = sess->s_id.b.area;
-	be_sid[2] = sess->s_id.b.al_pa;
-	/*
-	 * Determine if this fc_port->port_name is allowed to access
-	 * target mode using explict NodeACLs+MappedLUNs, or using
-	 * TPG demo mode.  If this is successful a target mode FC nexus
-	 * is created.
-	 */
-	if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
-	    &fcport->port_name[0], sess, &be_sid[0], fcport->loop_id) < 0) {
-		kfree(sess);
-		return NULL;
-	}
-	/*
-	 * Take an extra reference to ->sess_kref here to handle qla_tgt_sess
-	 * access across ->tgt.sess_lock reaquire.
-	 */
-	kref_get(&sess->se_sess->sess_kref);
-
 	sess->conf_compl_supported = (fcport->flags & FCF_CONF_COMP_SUPPORTED);
 	sess->conf_compl_supported = (fcport->flags & FCF_CONF_COMP_SUPPORTED);
 	BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name));
 	BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name));
 	memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
 	memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
@@ -985,6 +977,23 @@ static struct qla_tgt_sess *qlt_create_sess(
 	    fcport->loop_id, sess->s_id.b.domain, sess->s_id.b.area,
 	    fcport->loop_id, sess->s_id.b.domain, sess->s_id.b.area,
 	    sess->s_id.b.al_pa, sess->conf_compl_supported ?  "" : "not ");
 	    sess->s_id.b.al_pa, sess->conf_compl_supported ?  "" : "not ");
 
 
+	/*
+	 * Determine if this fc_port->port_name is allowed to access
+	 * target mode using explict NodeACLs+MappedLUNs, or using
+	 * TPG demo mode.  If this is successful a target mode FC nexus
+	 * is created.
+	 */
+	if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
+	    &fcport->port_name[0], sess) < 0) {
+		return NULL;
+	} else {
+		/*
+		 * Take an extra reference to ->sess_kref here to handle qla_tgt_sess
+		 * access across ->tgt.sess_lock reaquire.
+		 */
+		kref_get(&sess->se_sess->sess_kref);
+	}
+
 	return sess;
 	return sess;
 }
 }
 
 

+ 1 - 1
drivers/scsi/qla2xxx/qla_target.h

@@ -731,7 +731,7 @@ struct qla_tgt_func_tmpl {
 	void (*free_session)(struct qla_tgt_sess *);
 	void (*free_session)(struct qla_tgt_sess *);
 
 
 	int (*check_initiator_node_acl)(struct scsi_qla_host *, unsigned char *,
 	int (*check_initiator_node_acl)(struct scsi_qla_host *, unsigned char *,
-					void *, uint8_t *, uint16_t);
+					struct qla_tgt_sess *);
 	void (*update_sess)(struct qla_tgt_sess *, port_id_t, uint16_t, bool);
 	void (*update_sess)(struct qla_tgt_sess *, port_id_t, uint16_t, bool);
 	struct qla_tgt_sess *(*find_sess_by_loop_id)(struct scsi_qla_host *,
 	struct qla_tgt_sess *(*find_sess_by_loop_id)(struct scsi_qla_host *,
 						const uint16_t);
 						const uint16_t);

+ 40 - 39
drivers/scsi/qla2xxx/tcm_qla2xxx.c

@@ -1406,6 +1406,39 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
 	transport_deregister_session(sess->se_sess);
 	transport_deregister_session(sess->se_sess);
 }
 }
 
 
+static int tcm_qla2xxx_session_cb(struct se_portal_group *se_tpg,
+				  struct se_session *se_sess, void *p)
+{
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+				struct tcm_qla2xxx_tpg, se_tpg);
+	struct tcm_qla2xxx_lport *lport = tpg->lport;
+	struct qla_hw_data *ha = lport->qla_vha->hw;
+	struct se_node_acl *se_nacl = se_sess->se_node_acl;
+	struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
+				struct tcm_qla2xxx_nacl, se_node_acl);
+	struct qla_tgt_sess *qlat_sess = p;
+	uint16_t loop_id = qlat_sess->loop_id;
+	unsigned long flags;
+	unsigned char be_sid[3];
+
+	be_sid[0] = qlat_sess->s_id.b.domain;
+	be_sid[1] = qlat_sess->s_id.b.area;
+	be_sid[2] = qlat_sess->s_id.b.al_pa;
+
+	/*
+	 * And now setup se_nacl and session pointers into HW lport internal
+	 * mappings for fabric S_ID and LOOP_ID.
+	 */
+	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+	tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl,
+				     se_sess, qlat_sess, be_sid);
+	tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl,
+					se_sess, qlat_sess, loop_id);
+	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
+	return 0;
+}
+
 /*
 /*
  * Called via qlt_create_sess():ha->qla2x_tmpl->check_initiator_node_acl()
  * Called via qlt_create_sess():ha->qla2x_tmpl->check_initiator_node_acl()
  * to locate struct se_node_acl
  * to locate struct se_node_acl
@@ -1413,20 +1446,13 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
 static int tcm_qla2xxx_check_initiator_node_acl(
 static int tcm_qla2xxx_check_initiator_node_acl(
 	scsi_qla_host_t *vha,
 	scsi_qla_host_t *vha,
 	unsigned char *fc_wwpn,
 	unsigned char *fc_wwpn,
-	void *qla_tgt_sess,
-	uint8_t *s_id,
-	uint16_t loop_id)
+	struct qla_tgt_sess *qlat_sess)
 {
 {
 	struct qla_hw_data *ha = vha->hw;
 	struct qla_hw_data *ha = vha->hw;
 	struct tcm_qla2xxx_lport *lport;
 	struct tcm_qla2xxx_lport *lport;
 	struct tcm_qla2xxx_tpg *tpg;
 	struct tcm_qla2xxx_tpg *tpg;
-	struct tcm_qla2xxx_nacl *nacl;
-	struct se_portal_group *se_tpg;
-	struct se_node_acl *se_nacl;
 	struct se_session *se_sess;
 	struct se_session *se_sess;
-	struct qla_tgt_sess *sess = qla_tgt_sess;
 	unsigned char port_name[36];
 	unsigned char port_name[36];
-	unsigned long flags;
 	int num_tags = (ha->cur_fw_xcb_count) ? ha->cur_fw_xcb_count :
 	int num_tags = (ha->cur_fw_xcb_count) ? ha->cur_fw_xcb_count :
 		       TCM_QLA2XXX_DEFAULT_TAGS;
 		       TCM_QLA2XXX_DEFAULT_TAGS;
 
 
@@ -1444,15 +1470,6 @@ static int tcm_qla2xxx_check_initiator_node_acl(
 		pr_err("Unable to lcoate struct tcm_qla2xxx_lport->tpg_1\n");
 		pr_err("Unable to lcoate struct tcm_qla2xxx_lport->tpg_1\n");
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
-	se_tpg = &tpg->se_tpg;
-
-	se_sess = transport_init_session_tags(num_tags,
-					      sizeof(struct qla_tgt_cmd),
-					      TARGET_PROT_ALL);
-	if (IS_ERR(se_sess)) {
-		pr_err("Unable to initialize struct se_session\n");
-		return PTR_ERR(se_sess);
-	}
 	/*
 	/*
 	 * Format the FCP Initiator port_name into colon seperated values to
 	 * Format the FCP Initiator port_name into colon seperated values to
 	 * match the format by tcm_qla2xxx explict ConfigFS NodeACLs.
 	 * match the format by tcm_qla2xxx explict ConfigFS NodeACLs.
@@ -1463,28 +1480,12 @@ static int tcm_qla2xxx_check_initiator_node_acl(
 	 * Locate our struct se_node_acl either from an explict NodeACL created
 	 * Locate our struct se_node_acl either from an explict NodeACL created
 	 * via ConfigFS, or via running in TPG demo mode.
 	 * via ConfigFS, or via running in TPG demo mode.
 	 */
 	 */
-	se_sess->se_node_acl = core_tpg_check_initiator_node_acl(se_tpg,
-					port_name);
-	if (!se_sess->se_node_acl) {
-		transport_free_session(se_sess);
-		return -EINVAL;
-	}
-	se_nacl = se_sess->se_node_acl;
-	nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
-	/*
-	 * And now setup the new se_nacl and session pointers into our HW lport
-	 * mappings for fabric S_ID and LOOP_ID.
-	 */
-	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
-	tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl, se_sess,
-			qla_tgt_sess, s_id);
-	tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl, se_sess,
-			qla_tgt_sess, loop_id);
-	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
-	/*
-	 * Finally register the new FC Nexus with TCM
-	 */
-	transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess);
+	se_sess = target_alloc_session(&tpg->se_tpg, num_tags,
+				       sizeof(struct qla_tgt_cmd),
+				       TARGET_PROT_ALL, port_name,
+				       qlat_sess, tcm_qla2xxx_session_cb);
+	if (IS_ERR(se_sess))
+		return PTR_ERR(se_sess);
 
 
 	return 0;
 	return 0;
 }
 }

+ 18 - 28
drivers/target/loopback/tcm_loop.c

@@ -802,58 +802,48 @@ static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = {
 
 
 /* Start items for tcm_loop_nexus_cit */
 /* Start items for tcm_loop_nexus_cit */
 
 
+static int tcm_loop_alloc_sess_cb(struct se_portal_group *se_tpg,
+				  struct se_session *se_sess, void *p)
+{
+	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
+					struct tcm_loop_tpg, tl_se_tpg);
+
+	tl_tpg->tl_nexus = p;
+	return 0;
+}
+
 static int tcm_loop_make_nexus(
 static int tcm_loop_make_nexus(
 	struct tcm_loop_tpg *tl_tpg,
 	struct tcm_loop_tpg *tl_tpg,
 	const char *name)
 	const char *name)
 {
 {
-	struct se_portal_group *se_tpg;
 	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
 	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
 	struct tcm_loop_nexus *tl_nexus;
 	struct tcm_loop_nexus *tl_nexus;
-	int ret = -ENOMEM;
+	int ret;
 
 
 	if (tl_tpg->tl_nexus) {
 	if (tl_tpg->tl_nexus) {
 		pr_debug("tl_tpg->tl_nexus already exists\n");
 		pr_debug("tl_tpg->tl_nexus already exists\n");
 		return -EEXIST;
 		return -EEXIST;
 	}
 	}
-	se_tpg = &tl_tpg->tl_se_tpg;
 
 
 	tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL);
 	tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL);
 	if (!tl_nexus) {
 	if (!tl_nexus) {
 		pr_err("Unable to allocate struct tcm_loop_nexus\n");
 		pr_err("Unable to allocate struct tcm_loop_nexus\n");
 		return -ENOMEM;
 		return -ENOMEM;
 	}
 	}
-	/*
-	 * Initialize the struct se_session pointer
-	 */
-	tl_nexus->se_sess = transport_init_session(
-				TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS);
+
+	tl_nexus->se_sess = target_alloc_session(&tl_tpg->tl_se_tpg, 0, 0,
+					TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
+					name, tl_nexus, tcm_loop_alloc_sess_cb);
 	if (IS_ERR(tl_nexus->se_sess)) {
 	if (IS_ERR(tl_nexus->se_sess)) {
 		ret = PTR_ERR(tl_nexus->se_sess);
 		ret = PTR_ERR(tl_nexus->se_sess);
-		goto out;
-	}
-	/*
-	 * Since we are running in 'demo mode' this call with generate a
-	 * struct se_node_acl for the tcm_loop struct se_portal_group with the SCSI
-	 * Initiator port name of the passed configfs group 'name'.
-	 */
-	tl_nexus->se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
-				se_tpg, (unsigned char *)name);
-	if (!tl_nexus->se_sess->se_node_acl) {
-		transport_free_session(tl_nexus->se_sess);
-		goto out;
+		kfree(tl_nexus);
+		return ret;
 	}
 	}
-	/* Now, register the I_T Nexus as active. */
-	transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
-			tl_nexus->se_sess, tl_nexus);
-	tl_tpg->tl_nexus = tl_nexus;
+
 	pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
 	pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
 		" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
 		" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
 		name);
 		name);
 	return 0;
 	return 0;
-
-out:
-	kfree(tl_nexus);
-	return ret;
 }
 }
 
 
 static int tcm_loop_drop_nexus(
 static int tcm_loop_drop_nexus(

+ 52 - 43
drivers/target/sbp/sbp_target.c

@@ -196,45 +196,30 @@ static struct sbp_session *sbp_session_create(
 	struct sbp_session *sess;
 	struct sbp_session *sess;
 	int ret;
 	int ret;
 	char guid_str[17];
 	char guid_str[17];
-	struct se_node_acl *se_nacl;
+
+	snprintf(guid_str, sizeof(guid_str), "%016llx", guid);
 
 
 	sess = kmalloc(sizeof(*sess), GFP_KERNEL);
 	sess = kmalloc(sizeof(*sess), GFP_KERNEL);
 	if (!sess) {
 	if (!sess) {
 		pr_err("failed to allocate session descriptor\n");
 		pr_err("failed to allocate session descriptor\n");
 		return ERR_PTR(-ENOMEM);
 		return ERR_PTR(-ENOMEM);
 	}
 	}
+	spin_lock_init(&sess->lock);
+	INIT_LIST_HEAD(&sess->login_list);
+	INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work);
+	sess->guid = guid;
 
 
-	sess->se_sess = transport_init_session(TARGET_PROT_NORMAL);
+	sess->se_sess = target_alloc_session(&tpg->se_tpg, 128,
+					     sizeof(struct sbp_target_request),
+					     TARGET_PROT_NORMAL, guid_str,
+					     sess, NULL);
 	if (IS_ERR(sess->se_sess)) {
 	if (IS_ERR(sess->se_sess)) {
 		pr_err("failed to init se_session\n");
 		pr_err("failed to init se_session\n");
-
 		ret = PTR_ERR(sess->se_sess);
 		ret = PTR_ERR(sess->se_sess);
 		kfree(sess);
 		kfree(sess);
 		return ERR_PTR(ret);
 		return ERR_PTR(ret);
 	}
 	}
 
 
-	snprintf(guid_str, sizeof(guid_str), "%016llx", guid);
-
-	se_nacl = core_tpg_check_initiator_node_acl(&tpg->se_tpg, guid_str);
-	if (!se_nacl) {
-		pr_warn("Node ACL not found for %s\n", guid_str);
-
-		transport_free_session(sess->se_sess);
-		kfree(sess);
-
-		return ERR_PTR(-EPERM);
-	}
-
-	sess->se_sess->se_node_acl = se_nacl;
-
-	spin_lock_init(&sess->lock);
-	INIT_LIST_HEAD(&sess->login_list);
-	INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work);
-
-	sess->guid = guid;
-
-	transport_register_session(&tpg->se_tpg, se_nacl, sess->se_sess, sess);
-
 	return sess;
 	return sess;
 }
 }
 
 
@@ -908,7 +893,6 @@ static void tgt_agent_process_work(struct work_struct *work)
 					STATUS_BLOCK_SBP_STATUS(
 					STATUS_BLOCK_SBP_STATUS(
 						SBP_STATUS_REQ_TYPE_NOTSUPP));
 						SBP_STATUS_REQ_TYPE_NOTSUPP));
 			sbp_send_status(req);
 			sbp_send_status(req);
-			sbp_free_request(req);
 			return;
 			return;
 		case 3: /* Dummy ORB */
 		case 3: /* Dummy ORB */
 			req->status.status |= cpu_to_be32(
 			req->status.status |= cpu_to_be32(
@@ -919,7 +903,6 @@ static void tgt_agent_process_work(struct work_struct *work)
 					STATUS_BLOCK_SBP_STATUS(
 					STATUS_BLOCK_SBP_STATUS(
 						SBP_STATUS_DUMMY_ORB_COMPLETE));
 						SBP_STATUS_DUMMY_ORB_COMPLETE));
 			sbp_send_status(req);
 			sbp_send_status(req);
-			sbp_free_request(req);
 			return;
 			return;
 		default:
 		default:
 			BUG();
 			BUG();
@@ -938,6 +921,25 @@ static inline bool tgt_agent_check_active(struct sbp_target_agent *agent)
 	return active;
 	return active;
 }
 }
 
 
+static struct sbp_target_request *sbp_mgt_get_req(struct sbp_session *sess,
+	struct fw_card *card, u64 next_orb)
+{
+	struct se_session *se_sess = sess->se_sess;
+	struct sbp_target_request *req;
+	int tag;
+
+	tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
+	if (tag < 0)
+		return ERR_PTR(-ENOMEM);
+
+	req = &((struct sbp_target_request *)se_sess->sess_cmd_map)[tag];
+	memset(req, 0, sizeof(*req));
+	req->se_cmd.map_tag = tag;
+	req->se_cmd.tag = next_orb;
+
+	return req;
+}
+
 static void tgt_agent_fetch_work(struct work_struct *work)
 static void tgt_agent_fetch_work(struct work_struct *work)
 {
 {
 	struct sbp_target_agent *agent =
 	struct sbp_target_agent *agent =
@@ -949,8 +951,8 @@ static void tgt_agent_fetch_work(struct work_struct *work)
 	u64 next_orb = agent->orb_pointer;
 	u64 next_orb = agent->orb_pointer;
 
 
 	while (next_orb && tgt_agent_check_active(agent)) {
 	while (next_orb && tgt_agent_check_active(agent)) {
-		req = kzalloc(sizeof(*req), GFP_KERNEL);
-		if (!req) {
+		req = sbp_mgt_get_req(sess, sess->card, next_orb);
+		if (IS_ERR(req)) {
 			spin_lock_bh(&agent->lock);
 			spin_lock_bh(&agent->lock);
 			agent->state = AGENT_STATE_DEAD;
 			agent->state = AGENT_STATE_DEAD;
 			spin_unlock_bh(&agent->lock);
 			spin_unlock_bh(&agent->lock);
@@ -985,7 +987,6 @@ static void tgt_agent_fetch_work(struct work_struct *work)
 			spin_unlock_bh(&agent->lock);
 			spin_unlock_bh(&agent->lock);
 
 
 			sbp_send_status(req);
 			sbp_send_status(req);
-			sbp_free_request(req);
 			return;
 			return;
 		}
 		}
 
 
@@ -1232,7 +1233,7 @@ static void sbp_handle_command(struct sbp_target_request *req)
 	req->se_cmd.tag = req->orb_pointer;
 	req->se_cmd.tag = req->orb_pointer;
 	if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
 	if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
 			      req->sense_buf, unpacked_lun, data_length,
 			      req->sense_buf, unpacked_lun, data_length,
-			      TCM_SIMPLE_TAG, data_dir, 0))
+			      TCM_SIMPLE_TAG, data_dir, TARGET_SCF_ACK_KREF))
 		goto err;
 		goto err;
 
 
 	return;
 	return;
@@ -1244,7 +1245,6 @@ err:
 		STATUS_BLOCK_LEN(1) |
 		STATUS_BLOCK_LEN(1) |
 		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
 		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
 	sbp_send_status(req);
 	sbp_send_status(req);
-	sbp_free_request(req);
 }
 }
 
 
 /*
 /*
@@ -1343,22 +1343,29 @@ static int sbp_rw_data(struct sbp_target_request *req)
 
 
 static int sbp_send_status(struct sbp_target_request *req)
 static int sbp_send_status(struct sbp_target_request *req)
 {
 {
-	int ret, length;
+	int rc, ret = 0, length;
 	struct sbp_login_descriptor *login = req->login;
 	struct sbp_login_descriptor *login = req->login;
 
 
 	length = (((be32_to_cpu(req->status.status) >> 24) & 0x07) + 1) * 4;
 	length = (((be32_to_cpu(req->status.status) >> 24) & 0x07) + 1) * 4;
 
 
-	ret = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST,
+	rc = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST,
 			login->status_fifo_addr, &req->status, length);
 			login->status_fifo_addr, &req->status, length);
-	if (ret != RCODE_COMPLETE) {
-		pr_debug("sbp_send_status: write failed: 0x%x\n", ret);
-		return -EIO;
+	if (rc != RCODE_COMPLETE) {
+		pr_debug("sbp_send_status: write failed: 0x%x\n", rc);
+		ret = -EIO;
+		goto put_ref;
 	}
 	}
 
 
 	pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n",
 	pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n",
 			req->orb_pointer);
 			req->orb_pointer);
-
-	return 0;
+	/*
+	 * Drop the extra ACK_KREF reference taken by target_submit_cmd()
+	 * ahead of sbp_check_stop_free() -> transport_generic_free_cmd()
+	 * final se_cmd->cmd_kref put.
+	 */
+put_ref:
+	target_put_sess_cmd(&req->se_cmd);
+	return ret;
 }
 }
 
 
 static void sbp_sense_mangle(struct sbp_target_request *req)
 static void sbp_sense_mangle(struct sbp_target_request *req)
@@ -1447,9 +1454,13 @@ static int sbp_send_sense(struct sbp_target_request *req)
 
 
 static void sbp_free_request(struct sbp_target_request *req)
 static void sbp_free_request(struct sbp_target_request *req)
 {
 {
+	struct se_cmd *se_cmd = &req->se_cmd;
+	struct se_session *se_sess = se_cmd->se_sess;
+
 	kfree(req->pg_tbl);
 	kfree(req->pg_tbl);
 	kfree(req->cmd_buf);
 	kfree(req->cmd_buf);
-	kfree(req);
+
+	percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
 }
 }
 
 
 static void sbp_mgt_agent_process(struct work_struct *work)
 static void sbp_mgt_agent_process(struct work_struct *work)
@@ -1609,7 +1620,6 @@ static void sbp_mgt_agent_rw(struct fw_card *card,
 			rcode = RCODE_CONFLICT_ERROR;
 			rcode = RCODE_CONFLICT_ERROR;
 			goto out;
 			goto out;
 		}
 		}
-
 		req = kzalloc(sizeof(*req), GFP_ATOMIC);
 		req = kzalloc(sizeof(*req), GFP_ATOMIC);
 		if (!req) {
 		if (!req) {
 			rcode = RCODE_CONFLICT_ERROR;
 			rcode = RCODE_CONFLICT_ERROR;
@@ -1815,8 +1825,7 @@ static int sbp_check_stop_free(struct se_cmd *se_cmd)
 	struct sbp_target_request *req = container_of(se_cmd,
 	struct sbp_target_request *req = container_of(se_cmd,
 			struct sbp_target_request, se_cmd);
 			struct sbp_target_request, se_cmd);
 
 
-	transport_generic_free_cmd(&req->se_cmd, 0);
-	return 1;
+	return transport_generic_free_cmd(&req->se_cmd, 0);
 }
 }
 
 
 static int sbp_count_se_tpg_luns(struct se_portal_group *tpg)
 static int sbp_count_se_tpg_luns(struct se_portal_group *tpg)

+ 14 - 27
drivers/target/target_core_device.c

@@ -86,7 +86,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
 		se_cmd->lun_ref_active = true;
 		se_cmd->lun_ref_active = true;
 
 
 		if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
 		if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
-		    (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
+		    deve->lun_access_ro) {
 			pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
 			pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
 				" Access for 0x%08llx\n",
 				" Access for 0x%08llx\n",
 				se_cmd->se_tfo->get_fabric_name(),
 				se_cmd->se_tfo->get_fabric_name(),
@@ -199,7 +199,7 @@ bool target_lun_is_rdonly(struct se_cmd *cmd)
 
 
 	rcu_read_lock();
 	rcu_read_lock();
 	deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun);
 	deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun);
-	ret = (deve && deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY);
+	ret = deve && deve->lun_access_ro;
 	rcu_read_unlock();
 	rcu_read_unlock();
 
 
 	return ret;
 	return ret;
@@ -258,22 +258,15 @@ void core_free_device_list_for_node(
 
 
 void core_update_device_list_access(
 void core_update_device_list_access(
 	u64 mapped_lun,
 	u64 mapped_lun,
-	u32 lun_access,
+	bool lun_access_ro,
 	struct se_node_acl *nacl)
 	struct se_node_acl *nacl)
 {
 {
 	struct se_dev_entry *deve;
 	struct se_dev_entry *deve;
 
 
 	mutex_lock(&nacl->lun_entry_mutex);
 	mutex_lock(&nacl->lun_entry_mutex);
 	deve = target_nacl_find_deve(nacl, mapped_lun);
 	deve = target_nacl_find_deve(nacl, mapped_lun);
-	if (deve) {
-		if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
-			deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
-			deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
-		} else {
-			deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
-			deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
-		}
-	}
+	if (deve)
+		deve->lun_access_ro = lun_access_ro;
 	mutex_unlock(&nacl->lun_entry_mutex);
 	mutex_unlock(&nacl->lun_entry_mutex);
 }
 }
 
 
@@ -319,7 +312,7 @@ int core_enable_device_list_for_node(
 	struct se_lun *lun,
 	struct se_lun *lun,
 	struct se_lun_acl *lun_acl,
 	struct se_lun_acl *lun_acl,
 	u64 mapped_lun,
 	u64 mapped_lun,
-	u32 lun_access,
+	bool lun_access_ro,
 	struct se_node_acl *nacl,
 	struct se_node_acl *nacl,
 	struct se_portal_group *tpg)
 	struct se_portal_group *tpg)
 {
 {
@@ -340,11 +333,7 @@ int core_enable_device_list_for_node(
 	kref_init(&new->pr_kref);
 	kref_init(&new->pr_kref);
 	init_completion(&new->pr_comp);
 	init_completion(&new->pr_comp);
 
 
-	if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE)
-		new->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
-	else
-		new->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
-
+	new->lun_access_ro = lun_access_ro;
 	new->creation_time = get_jiffies_64();
 	new->creation_time = get_jiffies_64();
 	new->attach_count++;
 	new->attach_count++;
 
 
@@ -433,7 +422,7 @@ void core_disable_device_list_for_node(
 
 
 	hlist_del_rcu(&orig->link);
 	hlist_del_rcu(&orig->link);
 	clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
 	clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
-	orig->lun_flags = 0;
+	orig->lun_access_ro = false;
 	orig->creation_time = 0;
 	orig->creation_time = 0;
 	orig->attach_count--;
 	orig->attach_count--;
 	/*
 	/*
@@ -558,8 +547,7 @@ int core_dev_add_lun(
 {
 {
 	int rc;
 	int rc;
 
 
-	rc = core_tpg_add_lun(tpg, lun,
-				TRANSPORT_LUNFLAGS_READ_WRITE, dev);
+	rc = core_tpg_add_lun(tpg, lun, false, dev);
 	if (rc < 0)
 	if (rc < 0)
 		return rc;
 		return rc;
 
 
@@ -635,7 +623,7 @@ int core_dev_add_initiator_node_lun_acl(
 	struct se_portal_group *tpg,
 	struct se_portal_group *tpg,
 	struct se_lun_acl *lacl,
 	struct se_lun_acl *lacl,
 	struct se_lun *lun,
 	struct se_lun *lun,
-	u32 lun_access)
+	bool lun_access_ro)
 {
 {
 	struct se_node_acl *nacl = lacl->se_lun_nacl;
 	struct se_node_acl *nacl = lacl->se_lun_nacl;
 	/*
 	/*
@@ -647,20 +635,19 @@ int core_dev_add_initiator_node_lun_acl(
 	if (!nacl)
 	if (!nacl)
 		return -EINVAL;
 		return -EINVAL;
 
 
-	if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
-	    (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
-		lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+	if (lun->lun_access_ro)
+		lun_access_ro = true;
 
 
 	lacl->se_lun = lun;
 	lacl->se_lun = lun;
 
 
 	if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
 	if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
-			lun_access, nacl, tpg) < 0)
+			lun_access_ro, nacl, tpg) < 0)
 		return -EINVAL;
 		return -EINVAL;
 
 
 	pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for "
 	pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for "
 		" InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
 		" InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
 		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
 		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
-		(lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
+		lun_access_ro ? "RO" : "RW",
 		nacl->initiatorname);
 		nacl->initiatorname);
 	/*
 	/*
 	 * Check to see if there are any existing persistent reservation APTPL
 	 * Check to see if there are any existing persistent reservation APTPL

+ 14 - 18
drivers/target/target_core_fabric_configfs.c

@@ -78,7 +78,7 @@ static int target_fabric_mappedlun_link(
 			struct se_lun_acl, se_lun_group);
 			struct se_lun_acl, se_lun_group);
 	struct se_portal_group *se_tpg;
 	struct se_portal_group *se_tpg;
 	struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s;
 	struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s;
-	int lun_access;
+	bool lun_access_ro;
 
 
 	if (lun->lun_link_magic != SE_LUN_LINK_MAGIC) {
 	if (lun->lun_link_magic != SE_LUN_LINK_MAGIC) {
 		pr_err("Bad lun->lun_link_magic, not a valid lun_ci pointer:"
 		pr_err("Bad lun->lun_link_magic, not a valid lun_ci pointer:"
@@ -115,19 +115,18 @@ static int target_fabric_mappedlun_link(
 	}
 	}
 	/*
 	/*
 	 * If this struct se_node_acl was dynamically generated with
 	 * If this struct se_node_acl was dynamically generated with
-	 * tpg_1/attrib/generate_node_acls=1, use the existing deve->lun_flags,
-	 * which be will write protected (READ-ONLY) when
+	 * tpg_1/attrib/generate_node_acls=1, use the existing
+	 * deve->lun_access_ro value, which will be true when
 	 * tpg_1/attrib/demo_mode_write_protect=1
 	 * tpg_1/attrib/demo_mode_write_protect=1
 	 */
 	 */
 	rcu_read_lock();
 	rcu_read_lock();
 	deve = target_nacl_find_deve(lacl->se_lun_nacl, lacl->mapped_lun);
 	deve = target_nacl_find_deve(lacl->se_lun_nacl, lacl->mapped_lun);
 	if (deve)
 	if (deve)
-		lun_access = deve->lun_flags;
+		lun_access_ro = deve->lun_access_ro;
 	else
 	else
-		lun_access =
+		lun_access_ro =
 			(se_tpg->se_tpg_tfo->tpg_check_prod_mode_write_protect(
 			(se_tpg->se_tpg_tfo->tpg_check_prod_mode_write_protect(
-				se_tpg)) ? TRANSPORT_LUNFLAGS_READ_ONLY :
-					   TRANSPORT_LUNFLAGS_READ_WRITE;
+				se_tpg)) ? true : false;
 	rcu_read_unlock();
 	rcu_read_unlock();
 	/*
 	/*
 	 * Determine the actual mapped LUN value user wants..
 	 * Determine the actual mapped LUN value user wants..
@@ -135,7 +134,7 @@ static int target_fabric_mappedlun_link(
 	 * This value is what the SCSI Initiator actually sees the
 	 * This value is what the SCSI Initiator actually sees the
 	 * $FABRIC/$WWPN/$TPGT/lun/lun_* as on their SCSI Initiator Ports.
 	 * $FABRIC/$WWPN/$TPGT/lun/lun_* as on their SCSI Initiator Ports.
 	 */
 	 */
-	return core_dev_add_initiator_node_lun_acl(se_tpg, lacl, lun, lun_access);
+	return core_dev_add_initiator_node_lun_acl(se_tpg, lacl, lun, lun_access_ro);
 }
 }
 
 
 static int target_fabric_mappedlun_unlink(
 static int target_fabric_mappedlun_unlink(
@@ -167,8 +166,7 @@ static ssize_t target_fabric_mappedlun_write_protect_show(
 	rcu_read_lock();
 	rcu_read_lock();
 	deve = target_nacl_find_deve(se_nacl, lacl->mapped_lun);
 	deve = target_nacl_find_deve(se_nacl, lacl->mapped_lun);
 	if (deve) {
 	if (deve) {
-		len = sprintf(page, "%d\n",
-			(deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) ? 1 : 0);
+		len = sprintf(page, "%d\n", deve->lun_access_ro);
 	}
 	}
 	rcu_read_unlock();
 	rcu_read_unlock();
 
 
@@ -181,25 +179,23 @@ static ssize_t target_fabric_mappedlun_write_protect_store(
 	struct se_lun_acl *lacl = item_to_lun_acl(item);
 	struct se_lun_acl *lacl = item_to_lun_acl(item);
 	struct se_node_acl *se_nacl = lacl->se_lun_nacl;
 	struct se_node_acl *se_nacl = lacl->se_lun_nacl;
 	struct se_portal_group *se_tpg = se_nacl->se_tpg;
 	struct se_portal_group *se_tpg = se_nacl->se_tpg;
-	unsigned long op;
+	unsigned long wp;
 	int ret;
 	int ret;
 
 
-	ret = kstrtoul(page, 0, &op);
+	ret = kstrtoul(page, 0, &wp);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
-	if ((op != 1) && (op != 0))
+	if ((wp != 1) && (wp != 0))
 		return -EINVAL;
 		return -EINVAL;
 
 
-	core_update_device_list_access(lacl->mapped_lun, (op) ?
-			TRANSPORT_LUNFLAGS_READ_ONLY :
-			TRANSPORT_LUNFLAGS_READ_WRITE,
-			lacl->se_lun_nacl);
+	/* wp=1 means lun_access_ro=true */
+	core_update_device_list_access(lacl->mapped_lun, wp, lacl->se_lun_nacl);
 
 
 	pr_debug("%s_ConfigFS: Changed Initiator ACL: %s"
 	pr_debug("%s_ConfigFS: Changed Initiator ACL: %s"
 		" Mapped LUN: %llu Write Protect bit to %s\n",
 		" Mapped LUN: %llu Write Protect bit to %s\n",
 		se_tpg->se_tpg_tfo->get_fabric_name(),
 		se_tpg->se_tpg_tfo->get_fabric_name(),
-		se_nacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF");
+		se_nacl->initiatorname, lacl->mapped_lun, (wp) ? "ON" : "OFF");
 
 
 	return count;
 	return count;
 
 

+ 34 - 0
drivers/target/target_core_iblock.c

@@ -412,9 +412,40 @@ iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
 	return 0;
 	return 0;
 }
 }
 
 
+static sense_reason_t
+iblock_execute_write_same_direct(struct block_device *bdev, struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct scatterlist *sg = &cmd->t_data_sg[0];
+	struct page *page = NULL;
+	int ret;
+
+	if (sg->offset) {
+		page = alloc_page(GFP_KERNEL);
+		if (!page)
+			return TCM_OUT_OF_RESOURCES;
+		sg_copy_to_buffer(sg, cmd->t_data_nents, page_address(page),
+				  dev->dev_attrib.block_size);
+	}
+
+	ret = blkdev_issue_write_same(bdev,
+				target_to_linux_sector(dev, cmd->t_task_lba),
+				target_to_linux_sector(dev,
+					sbc_get_write_same_sectors(cmd)),
+				GFP_KERNEL, page ? page : sg_page(sg));
+	if (page)
+		__free_page(page);
+	if (ret)
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+	target_complete_cmd(cmd, GOOD);
+	return 0;
+}
+
 static sense_reason_t
 static sense_reason_t
 iblock_execute_write_same(struct se_cmd *cmd)
 iblock_execute_write_same(struct se_cmd *cmd)
 {
 {
+	struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
 	struct iblock_req *ibr;
 	struct iblock_req *ibr;
 	struct scatterlist *sg;
 	struct scatterlist *sg;
 	struct bio *bio;
 	struct bio *bio;
@@ -439,6 +470,9 @@ iblock_execute_write_same(struct se_cmd *cmd)
 		return TCM_INVALID_CDB_FIELD;
 		return TCM_INVALID_CDB_FIELD;
 	}
 	}
 
 
+	if (bdev_write_same(bdev))
+		return iblock_execute_write_same_direct(bdev, cmd);
+
 	ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
 	ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
 	if (!ibr)
 	if (!ibr)
 		goto fail;
 		goto fail;

+ 4 - 4
drivers/target/target_core_internal.h

@@ -59,10 +59,10 @@ struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16);
 void	target_pr_kref_release(struct kref *);
 void	target_pr_kref_release(struct kref *);
 void	core_free_device_list_for_node(struct se_node_acl *,
 void	core_free_device_list_for_node(struct se_node_acl *,
 		struct se_portal_group *);
 		struct se_portal_group *);
-void	core_update_device_list_access(u64, u32, struct se_node_acl *);
+void	core_update_device_list_access(u64, bool, struct se_node_acl *);
 struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *, u64);
 struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *, u64);
 int	core_enable_device_list_for_node(struct se_lun *, struct se_lun_acl *,
 int	core_enable_device_list_for_node(struct se_lun *, struct se_lun_acl *,
-		u64, u32, struct se_node_acl *, struct se_portal_group *);
+		u64, bool, struct se_node_acl *, struct se_portal_group *);
 void	core_disable_device_list_for_node(struct se_lun *, struct se_dev_entry *,
 void	core_disable_device_list_for_node(struct se_lun *, struct se_dev_entry *,
 		struct se_node_acl *, struct se_portal_group *);
 		struct se_node_acl *, struct se_portal_group *);
 void	core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *);
 void	core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *);
@@ -72,7 +72,7 @@ void	core_dev_del_lun(struct se_portal_group *, struct se_lun *);
 struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
 struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
 		struct se_node_acl *, u64, int *);
 		struct se_node_acl *, u64, int *);
 int	core_dev_add_initiator_node_lun_acl(struct se_portal_group *,
 int	core_dev_add_initiator_node_lun_acl(struct se_portal_group *,
-		struct se_lun_acl *, struct se_lun *lun, u32);
+		struct se_lun_acl *, struct se_lun *lun, bool);
 int	core_dev_del_initiator_node_lun_acl(struct se_lun *,
 int	core_dev_del_initiator_node_lun_acl(struct se_lun *,
 		struct se_lun_acl *);
 		struct se_lun_acl *);
 void	core_dev_free_initiator_node_lun_acl(struct se_portal_group *,
 void	core_dev_free_initiator_node_lun_acl(struct se_portal_group *,
@@ -118,7 +118,7 @@ void	core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *,
 void	core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
 void	core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
 struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u64);
 struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u64);
 int	core_tpg_add_lun(struct se_portal_group *, struct se_lun *,
 int	core_tpg_add_lun(struct se_portal_group *, struct se_lun *,
-		u32, struct se_device *);
+		bool, struct se_device *);
 void core_tpg_remove_lun(struct se_portal_group *, struct se_lun *);
 void core_tpg_remove_lun(struct se_portal_group *, struct se_lun *);
 struct se_node_acl *core_tpg_add_initiator_node_acl(struct se_portal_group *tpg,
 struct se_node_acl *core_tpg_add_initiator_node_acl(struct se_portal_group *tpg,
 		const char *initiatorname);
 		const char *initiatorname);

+ 1 - 2
drivers/target/target_core_spc.c

@@ -997,7 +997,6 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
 	int length = 0;
 	int length = 0;
 	int ret;
 	int ret;
 	int i;
 	int i;
-	bool read_only = target_lun_is_rdonly(cmd);;
 
 
 	memset(buf, 0, SE_MODE_PAGE_BUF);
 	memset(buf, 0, SE_MODE_PAGE_BUF);
 
 
@@ -1008,7 +1007,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
 	length = ten ? 3 : 2;
 	length = ten ? 3 : 2;
 
 
 	/* DEVICE-SPECIFIC PARAMETER */
 	/* DEVICE-SPECIFIC PARAMETER */
-	if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || read_only)
+	if (cmd->se_lun->lun_access_ro || target_lun_is_rdonly(cmd))
 		spc_modesense_write_protect(&buf[length], type);
 		spc_modesense_write_protect(&buf[length], type);
 
 
 	/*
 	/*

+ 10 - 11
drivers/target/target_core_tpg.c

@@ -121,7 +121,7 @@ void core_tpg_add_node_to_devs(
 	struct se_portal_group *tpg,
 	struct se_portal_group *tpg,
 	struct se_lun *lun_orig)
 	struct se_lun *lun_orig)
 {
 {
-	u32 lun_access = 0;
+	bool lun_access_ro = true;
 	struct se_lun *lun;
 	struct se_lun *lun;
 	struct se_device *dev;
 	struct se_device *dev;
 
 
@@ -137,27 +137,26 @@ void core_tpg_add_node_to_devs(
 		 * demo_mode_write_protect is ON, or READ_ONLY;
 		 * demo_mode_write_protect is ON, or READ_ONLY;
 		 */
 		 */
 		if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
 		if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
-			lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
+			lun_access_ro = false;
 		} else {
 		} else {
 			/*
 			/*
 			 * Allow only optical drives to issue R/W in default RO
 			 * Allow only optical drives to issue R/W in default RO
 			 * demo mode.
 			 * demo mode.
 			 */
 			 */
 			if (dev->transport->get_device_type(dev) == TYPE_DISK)
 			if (dev->transport->get_device_type(dev) == TYPE_DISK)
-				lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+				lun_access_ro = true;
 			else
 			else
-				lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
+				lun_access_ro = false;
 		}
 		}
 
 
 		pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s"
 		pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s"
 			" access for LUN in Demo Mode\n",
 			" access for LUN in Demo Mode\n",
 			tpg->se_tpg_tfo->get_fabric_name(),
 			tpg->se_tpg_tfo->get_fabric_name(),
 			tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
 			tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
-			(lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
-			"READ-WRITE" : "READ-ONLY");
+			lun_access_ro ? "READ-ONLY" : "READ-WRITE");
 
 
 		core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
 		core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
-						 lun_access, acl, tpg);
+						 lun_access_ro, acl, tpg);
 		/*
 		/*
 		 * Check to see if there are any existing persistent reservation
 		 * Check to see if there are any existing persistent reservation
 		 * APTPL pre-registrations that need to be enabled for this dynamic
 		 * APTPL pre-registrations that need to be enabled for this dynamic
@@ -522,7 +521,7 @@ int core_tpg_register(
 			return PTR_ERR(se_tpg->tpg_virt_lun0);
 			return PTR_ERR(se_tpg->tpg_virt_lun0);
 
 
 		ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0,
 		ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0,
-				TRANSPORT_LUNFLAGS_READ_ONLY, g_lun0_dev);
+				true, g_lun0_dev);
 		if (ret < 0) {
 		if (ret < 0) {
 			kfree(se_tpg->tpg_virt_lun0);
 			kfree(se_tpg->tpg_virt_lun0);
 			return ret;
 			return ret;
@@ -616,7 +615,7 @@ struct se_lun *core_tpg_alloc_lun(
 int core_tpg_add_lun(
 int core_tpg_add_lun(
 	struct se_portal_group *tpg,
 	struct se_portal_group *tpg,
 	struct se_lun *lun,
 	struct se_lun *lun,
-	u32 lun_access,
+	bool lun_access_ro,
 	struct se_device *dev)
 	struct se_device *dev)
 {
 {
 	int ret;
 	int ret;
@@ -644,9 +643,9 @@ int core_tpg_add_lun(
 	spin_unlock(&dev->se_port_lock);
 	spin_unlock(&dev->se_port_lock);
 
 
 	if (dev->dev_flags & DF_READ_ONLY)
 	if (dev->dev_flags & DF_READ_ONLY)
-		lun->lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+		lun->lun_access_ro = true;
 	else
 	else
-		lun->lun_access = lun_access;
+		lun->lun_access_ro = lun_access_ro;
 	if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
 	if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
 		hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist);
 		hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist);
 	mutex_unlock(&tpg->tpg_lun_mutex);
 	mutex_unlock(&tpg->tpg_lun_mutex);

+ 64 - 2
drivers/target/target_core_transport.c

@@ -281,6 +281,17 @@ struct se_session *transport_init_session_tags(unsigned int tag_num,
 	struct se_session *se_sess;
 	struct se_session *se_sess;
 	int rc;
 	int rc;
 
 
+	if (tag_num != 0 && !tag_size) {
+		pr_err("init_session_tags called with percpu-ida tag_num:"
+		       " %u, but zero tag_size\n", tag_num);
+		return ERR_PTR(-EINVAL);
+	}
+	if (!tag_num && tag_size) {
+		pr_err("init_session_tags called with percpu-ida tag_size:"
+		       " %u, but zero tag_num\n", tag_size);
+		return ERR_PTR(-EINVAL);
+	}
+
 	se_sess = transport_init_session(sup_prot_ops);
 	se_sess = transport_init_session(sup_prot_ops);
 	if (IS_ERR(se_sess))
 	if (IS_ERR(se_sess))
 		return se_sess;
 		return se_sess;
@@ -374,6 +385,51 @@ void transport_register_session(
 }
 }
 EXPORT_SYMBOL(transport_register_session);
 EXPORT_SYMBOL(transport_register_session);
 
 
+struct se_session *
+target_alloc_session(struct se_portal_group *tpg,
+		     unsigned int tag_num, unsigned int tag_size,
+		     enum target_prot_op prot_op,
+		     const char *initiatorname, void *private,
+		     int (*callback)(struct se_portal_group *,
+				     struct se_session *, void *))
+{
+	struct se_session *sess;
+
+	/*
+	 * If the fabric driver is using percpu-ida based pre allocation
+	 * of I/O descriptor tags, go ahead and perform that setup now..
+	 */
+	if (tag_num != 0)
+		sess = transport_init_session_tags(tag_num, tag_size, prot_op);
+	else
+		sess = transport_init_session(prot_op);
+
+	if (IS_ERR(sess))
+		return sess;
+
+	sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg,
+					(unsigned char *)initiatorname);
+	if (!sess->se_node_acl) {
+		transport_free_session(sess);
+		return ERR_PTR(-EACCES);
+	}
+	/*
+	 * Go ahead and perform any remaining fabric setup that is
+	 * required before transport_register_session().
+	 */
+	if (callback != NULL) {
+		int rc = callback(tpg, sess, private);
+		if (rc) {
+			transport_free_session(sess);
+			return ERR_PTR(rc);
+		}
+	}
+
+	transport_register_session(tpg, sess->se_node_acl, sess, private);
+	return sess;
+}
+EXPORT_SYMBOL(target_alloc_session);
+
 static void target_release_session(struct kref *kref)
 static void target_release_session(struct kref *kref)
 {
 {
 	struct se_session *se_sess = container_of(kref,
 	struct se_session *se_sess = container_of(kref,
@@ -1941,6 +1997,9 @@ static void transport_complete_qf(struct se_cmd *cmd)
 
 
 	switch (cmd->data_direction) {
 	switch (cmd->data_direction) {
 	case DMA_FROM_DEVICE:
 	case DMA_FROM_DEVICE:
+		if (cmd->scsi_status)
+			goto queue_status;
+
 		trace_target_cmd_complete(cmd);
 		trace_target_cmd_complete(cmd);
 		ret = cmd->se_tfo->queue_data_in(cmd);
 		ret = cmd->se_tfo->queue_data_in(cmd);
 		break;
 		break;
@@ -1951,6 +2010,7 @@ static void transport_complete_qf(struct se_cmd *cmd)
 		}
 		}
 		/* Fall through for DMA_TO_DEVICE */
 		/* Fall through for DMA_TO_DEVICE */
 	case DMA_NONE:
 	case DMA_NONE:
+queue_status:
 		trace_target_cmd_complete(cmd);
 		trace_target_cmd_complete(cmd);
 		ret = cmd->se_tfo->queue_status(cmd);
 		ret = cmd->se_tfo->queue_status(cmd);
 		break;
 		break;
@@ -2072,6 +2132,9 @@ static void target_complete_ok_work(struct work_struct *work)
 queue_rsp:
 queue_rsp:
 	switch (cmd->data_direction) {
 	switch (cmd->data_direction) {
 	case DMA_FROM_DEVICE:
 	case DMA_FROM_DEVICE:
+		if (cmd->scsi_status)
+			goto queue_status;
+
 		atomic_long_add(cmd->data_length,
 		atomic_long_add(cmd->data_length,
 				&cmd->se_lun->lun_stats.tx_data_octets);
 				&cmd->se_lun->lun_stats.tx_data_octets);
 		/*
 		/*
@@ -2111,6 +2174,7 @@ queue_rsp:
 		}
 		}
 		/* Fall through for DMA_TO_DEVICE */
 		/* Fall through for DMA_TO_DEVICE */
 	case DMA_NONE:
 	case DMA_NONE:
+queue_status:
 		trace_target_cmd_complete(cmd);
 		trace_target_cmd_complete(cmd);
 		ret = cmd->se_tfo->queue_status(cmd);
 		ret = cmd->se_tfo->queue_status(cmd);
 		if (ret == -EAGAIN || ret == -ENOMEM)
 		if (ret == -EAGAIN || ret == -ENOMEM)
@@ -2596,8 +2660,6 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
 
 
 	list_for_each_entry_safe(se_cmd, tmp_cmd,
 	list_for_each_entry_safe(se_cmd, tmp_cmd,
 				&se_sess->sess_wait_list, se_cmd_list) {
 				&se_sess->sess_wait_list, se_cmd_list) {
-		list_del_init(&se_cmd->se_cmd_list);
-
 		pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
 		pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
 			" %d\n", se_cmd, se_cmd->t_state,
 			" %d\n", se_cmd, se_cmd->t_state,
 			se_cmd->se_tfo->get_cmd_state(se_cmd));
 			se_cmd->se_tfo->get_cmd_state(se_cmd));

+ 155 - 112
drivers/target/target_core_user.c

@@ -26,6 +26,7 @@
 #include <linux/vmalloc.h>
 #include <linux/vmalloc.h>
 #include <linux/uio_driver.h>
 #include <linux/uio_driver.h>
 #include <linux/stringify.h>
 #include <linux/stringify.h>
+#include <linux/bitops.h>
 #include <net/genetlink.h>
 #include <net/genetlink.h>
 #include <scsi/scsi_common.h>
 #include <scsi/scsi_common.h>
 #include <scsi/scsi_proto.h>
 #include <scsi/scsi_proto.h>
@@ -63,8 +64,11 @@
 
 
 #define TCMU_TIME_OUT (30 * MSEC_PER_SEC)
 #define TCMU_TIME_OUT (30 * MSEC_PER_SEC)
 
 
+#define DATA_BLOCK_BITS 256
+#define DATA_BLOCK_SIZE 4096
+
 #define CMDR_SIZE (16 * 4096)
 #define CMDR_SIZE (16 * 4096)
-#define DATA_SIZE (257 * 4096)
+#define DATA_SIZE (DATA_BLOCK_BITS * DATA_BLOCK_SIZE)
 
 
 #define TCMU_RING_SIZE (CMDR_SIZE + DATA_SIZE)
 #define TCMU_RING_SIZE (CMDR_SIZE + DATA_SIZE)
 
 
@@ -93,12 +97,11 @@ struct tcmu_dev {
 	u32 cmdr_size;
 	u32 cmdr_size;
 	u32 cmdr_last_cleaned;
 	u32 cmdr_last_cleaned;
 	/* Offset of data ring from start of mb */
 	/* Offset of data ring from start of mb */
+	/* Must add data_off and mb_addr to get the address */
 	size_t data_off;
 	size_t data_off;
 	size_t data_size;
 	size_t data_size;
-	/* Ring head + tail values. */
-	/* Must add data_off and mb_addr to get the address */
-	size_t data_head;
-	size_t data_tail;
+
+	DECLARE_BITMAP(data_bitmap, DATA_BLOCK_BITS);
 
 
 	wait_queue_head_t wait_cmdr;
 	wait_queue_head_t wait_cmdr;
 	/* TODO should this be a mutex? */
 	/* TODO should this be a mutex? */
@@ -122,9 +125,9 @@ struct tcmu_cmd {
 
 
 	uint16_t cmd_id;
 	uint16_t cmd_id;
 
 
-	/* Can't use se_cmd->data_length when cleaning up expired cmds, because if
+	/* Can't use se_cmd when cleaning up expired cmds, because if
 	   cmd has been completed then accessing se_cmd is off limits */
 	   cmd has been completed then accessing se_cmd is off limits */
-	size_t data_length;
+	DECLARE_BITMAP(data_bitmap, DATA_BLOCK_BITS);
 
 
 	unsigned long deadline;
 	unsigned long deadline;
 
 
@@ -168,13 +171,6 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
 
 
 	tcmu_cmd->se_cmd = se_cmd;
 	tcmu_cmd->se_cmd = se_cmd;
 	tcmu_cmd->tcmu_dev = udev;
 	tcmu_cmd->tcmu_dev = udev;
-	tcmu_cmd->data_length = se_cmd->data_length;
-
-	if (se_cmd->se_cmd_flags & SCF_BIDI) {
-		BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
-		tcmu_cmd->data_length += se_cmd->t_bidi_data_sg->length;
-	}
-
 	tcmu_cmd->deadline = jiffies + msecs_to_jiffies(TCMU_TIME_OUT);
 	tcmu_cmd->deadline = jiffies + msecs_to_jiffies(TCMU_TIME_OUT);
 
 
 	idr_preload(GFP_KERNEL);
 	idr_preload(GFP_KERNEL);
@@ -231,105 +227,126 @@ static inline size_t head_to_end(size_t head, size_t size)
 	return size - head;
 	return size - head;
 }
 }
 
 
+static inline void new_iov(struct iovec **iov, int *iov_cnt,
+			   struct tcmu_dev *udev)
+{
+	struct iovec *iovec;
+
+	if (*iov_cnt != 0)
+		(*iov)++;
+	(*iov_cnt)++;
+
+	iovec = *iov;
+	memset(iovec, 0, sizeof(struct iovec));
+}
+
 #define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)
 #define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)
 
 
+/* offset is relative to mb_addr */
+static inline size_t get_block_offset(struct tcmu_dev *dev,
+		int block, int remaining)
+{
+	return dev->data_off + block * DATA_BLOCK_SIZE +
+		DATA_BLOCK_SIZE - remaining;
+}
+
+static inline size_t iov_tail(struct tcmu_dev *udev, struct iovec *iov)
+{
+	return (size_t)iov->iov_base + iov->iov_len;
+}
+
 static void alloc_and_scatter_data_area(struct tcmu_dev *udev,
 static void alloc_and_scatter_data_area(struct tcmu_dev *udev,
 	struct scatterlist *data_sg, unsigned int data_nents,
 	struct scatterlist *data_sg, unsigned int data_nents,
 	struct iovec **iov, int *iov_cnt, bool copy_data)
 	struct iovec **iov, int *iov_cnt, bool copy_data)
 {
 {
-	int i;
+	int i, block;
+	int block_remaining = 0;
 	void *from, *to;
 	void *from, *to;
-	size_t copy_bytes;
+	size_t copy_bytes, to_offset;
 	struct scatterlist *sg;
 	struct scatterlist *sg;
 
 
 	for_each_sg(data_sg, sg, data_nents, i) {
 	for_each_sg(data_sg, sg, data_nents, i) {
-		copy_bytes = min_t(size_t, sg->length,
-				 head_to_end(udev->data_head, udev->data_size));
+		int sg_remaining = sg->length;
 		from = kmap_atomic(sg_page(sg)) + sg->offset;
 		from = kmap_atomic(sg_page(sg)) + sg->offset;
-		to = (void *) udev->mb_addr + udev->data_off + udev->data_head;
-
-		if (copy_data) {
-			memcpy(to, from, copy_bytes);
-			tcmu_flush_dcache_range(to, copy_bytes);
-		}
-
-		/* Even iov_base is relative to mb_addr */
-		(*iov)->iov_len = copy_bytes;
-		(*iov)->iov_base = (void __user *) udev->data_off +
-						udev->data_head;
-		(*iov_cnt)++;
-		(*iov)++;
-
-		UPDATE_HEAD(udev->data_head, copy_bytes, udev->data_size);
-
-		/* Uh oh, we wrapped the buffer. Must split sg across 2 iovs. */
-		if (sg->length != copy_bytes) {
-			void *from_skip = from + copy_bytes;
-
-			copy_bytes = sg->length - copy_bytes;
-
-			(*iov)->iov_len = copy_bytes;
-			(*iov)->iov_base = (void __user *) udev->data_off +
-							udev->data_head;
-
+		while (sg_remaining > 0) {
+			if (block_remaining == 0) {
+				block = find_first_zero_bit(udev->data_bitmap,
+						DATA_BLOCK_BITS);
+				block_remaining = DATA_BLOCK_SIZE;
+				set_bit(block, udev->data_bitmap);
+			}
+			copy_bytes = min_t(size_t, sg_remaining,
+					block_remaining);
+			to_offset = get_block_offset(udev, block,
+					block_remaining);
+			to = (void *)udev->mb_addr + to_offset;
+			if (*iov_cnt != 0 &&
+			    to_offset == iov_tail(udev, *iov)) {
+				(*iov)->iov_len += copy_bytes;
+			} else {
+				new_iov(iov, iov_cnt, udev);
+				(*iov)->iov_base = (void __user *) to_offset;
+				(*iov)->iov_len = copy_bytes;
+			}
 			if (copy_data) {
 			if (copy_data) {
-				to = (void *) udev->mb_addr +
-					udev->data_off + udev->data_head;
-				memcpy(to, from_skip, copy_bytes);
+				memcpy(to, from + sg->length - sg_remaining,
+					copy_bytes);
 				tcmu_flush_dcache_range(to, copy_bytes);
 				tcmu_flush_dcache_range(to, copy_bytes);
 			}
 			}
-
-			(*iov_cnt)++;
-			(*iov)++;
-
-			UPDATE_HEAD(udev->data_head,
-				copy_bytes, udev->data_size);
+			sg_remaining -= copy_bytes;
+			block_remaining -= copy_bytes;
 		}
 		}
-
 		kunmap_atomic(from - sg->offset);
 		kunmap_atomic(from - sg->offset);
 	}
 	}
 }
 }
 
 
-static void gather_and_free_data_area(struct tcmu_dev *udev,
-	struct scatterlist *data_sg, unsigned int data_nents)
+static void free_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd)
 {
 {
-	int i;
+	bitmap_xor(udev->data_bitmap, udev->data_bitmap, cmd->data_bitmap,
+		   DATA_BLOCK_BITS);
+}
+
+static void gather_data_area(struct tcmu_dev *udev, unsigned long *cmd_bitmap,
+		struct scatterlist *data_sg, unsigned int data_nents)
+{
+	int i, block;
+	int block_remaining = 0;
 	void *from, *to;
 	void *from, *to;
-	size_t copy_bytes;
+	size_t copy_bytes, from_offset;
 	struct scatterlist *sg;
 	struct scatterlist *sg;
 
 
-	/* It'd be easier to look at entry's iovec again, but UAM */
 	for_each_sg(data_sg, sg, data_nents, i) {
 	for_each_sg(data_sg, sg, data_nents, i) {
-		copy_bytes = min_t(size_t, sg->length,
-				 head_to_end(udev->data_tail, udev->data_size));
-
+		int sg_remaining = sg->length;
 		to = kmap_atomic(sg_page(sg)) + sg->offset;
 		to = kmap_atomic(sg_page(sg)) + sg->offset;
-		WARN_ON(sg->length + sg->offset > PAGE_SIZE);
-		from = (void *) udev->mb_addr +
-			udev->data_off + udev->data_tail;
-		tcmu_flush_dcache_range(from, copy_bytes);
-		memcpy(to, from, copy_bytes);
-
-		UPDATE_HEAD(udev->data_tail, copy_bytes, udev->data_size);
-
-		/* Uh oh, wrapped the data buffer for this sg's data */
-		if (sg->length != copy_bytes) {
-			void *to_skip = to + copy_bytes;
-
-			from = (void *) udev->mb_addr +
-				udev->data_off + udev->data_tail;
-			WARN_ON(udev->data_tail);
-			copy_bytes = sg->length - copy_bytes;
+		while (sg_remaining > 0) {
+			if (block_remaining == 0) {
+				block = find_first_bit(cmd_bitmap,
+						DATA_BLOCK_BITS);
+				block_remaining = DATA_BLOCK_SIZE;
+				clear_bit(block, cmd_bitmap);
+			}
+			copy_bytes = min_t(size_t, sg_remaining,
+					block_remaining);
+			from_offset = get_block_offset(udev, block,
+					block_remaining);
+			from = (void *) udev->mb_addr + from_offset;
 			tcmu_flush_dcache_range(from, copy_bytes);
 			tcmu_flush_dcache_range(from, copy_bytes);
-			memcpy(to_skip, from, copy_bytes);
+			memcpy(to + sg->length - sg_remaining, from,
+					copy_bytes);
 
 
-			UPDATE_HEAD(udev->data_tail,
-				copy_bytes, udev->data_size);
+			sg_remaining -= copy_bytes;
+			block_remaining -= copy_bytes;
 		}
 		}
 		kunmap_atomic(to - sg->offset);
 		kunmap_atomic(to - sg->offset);
 	}
 	}
 }
 }
 
 
+static inline size_t spc_bitmap_free(unsigned long *bitmap)
+{
+	return DATA_BLOCK_SIZE * (DATA_BLOCK_BITS -
+			bitmap_weight(bitmap, DATA_BLOCK_BITS));
+}
+
 /*
 /*
  * We can't queue a command until we have space available on the cmd ring *and*
  * We can't queue a command until we have space available on the cmd ring *and*
  * space available on the data ring.
  * space available on the data ring.
@@ -339,9 +356,8 @@ static void gather_and_free_data_area(struct tcmu_dev *udev,
 static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t data_needed)
 static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t data_needed)
 {
 {
 	struct tcmu_mailbox *mb = udev->mb_addr;
 	struct tcmu_mailbox *mb = udev->mb_addr;
-	size_t space;
+	size_t space, cmd_needed;
 	u32 cmd_head;
 	u32 cmd_head;
-	size_t cmd_needed;
 
 
 	tcmu_flush_dcache_range(mb, sizeof(*mb));
 	tcmu_flush_dcache_range(mb, sizeof(*mb));
 
 
@@ -363,10 +379,10 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t d
 		return false;
 		return false;
 	}
 	}
 
 
-	space = spc_free(udev->data_head, udev->data_tail, udev->data_size);
+	space = spc_bitmap_free(udev->data_bitmap);
 	if (space < data_needed) {
 	if (space < data_needed) {
-		pr_debug("no data space: %zu %zu %zu\n", udev->data_head,
-		       udev->data_tail, udev->data_size);
+		pr_debug("no data space: only %zu available, but ask for %zu\n",
+				space, data_needed);
 		return false;
 		return false;
 	}
 	}
 
 
@@ -385,6 +401,8 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
 	uint32_t cmd_head;
 	uint32_t cmd_head;
 	uint64_t cdb_off;
 	uint64_t cdb_off;
 	bool copy_to_data_area;
 	bool copy_to_data_area;
+	size_t data_length;
+	DECLARE_BITMAP(old_bitmap, DATA_BLOCK_BITS);
 
 
 	if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
 	if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
 		return -EINVAL;
 		return -EINVAL;
@@ -393,12 +411,12 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
 	 * Must be a certain minimum size for response sense info, but
 	 * Must be a certain minimum size for response sense info, but
 	 * also may be larger if the iov array is large.
 	 * also may be larger if the iov array is large.
 	 *
 	 *
-	 * iovs = sgl_nents+1, for end-of-ring case, plus another 1
-	 * b/c size == offsetof one-past-element.
+	 * We prepare way too many iovs for potential uses here, because it's
+	 * expensive to tell how many regions are freed in the bitmap
 	*/
 	*/
 	base_command_size = max(offsetof(struct tcmu_cmd_entry,
 	base_command_size = max(offsetof(struct tcmu_cmd_entry,
-					 req.iov[se_cmd->t_bidi_data_nents +
-						 se_cmd->t_data_nents + 2]),
+				req.iov[se_cmd->t_bidi_data_nents +
+					se_cmd->t_data_nents]),
 				sizeof(struct tcmu_cmd_entry));
 				sizeof(struct tcmu_cmd_entry));
 	command_size = base_command_size
 	command_size = base_command_size
 		+ round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE);
 		+ round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE);
@@ -409,13 +427,18 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
 
 
 	mb = udev->mb_addr;
 	mb = udev->mb_addr;
 	cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
 	cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
+	data_length = se_cmd->data_length;
+	if (se_cmd->se_cmd_flags & SCF_BIDI) {
+		BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
+		data_length += se_cmd->t_bidi_data_sg->length;
+	}
 	if ((command_size > (udev->cmdr_size / 2))
 	if ((command_size > (udev->cmdr_size / 2))
-	    || tcmu_cmd->data_length > (udev->data_size - 1))
+	    || data_length > udev->data_size)
 		pr_warn("TCMU: Request of size %zu/%zu may be too big for %u/%zu "
 		pr_warn("TCMU: Request of size %zu/%zu may be too big for %u/%zu "
-			"cmd/data ring buffers\n", command_size, tcmu_cmd->data_length,
+			"cmd/data ring buffers\n", command_size, data_length,
 			udev->cmdr_size, udev->data_size);
 			udev->cmdr_size, udev->data_size);
 
 
-	while (!is_ring_space_avail(udev, command_size, tcmu_cmd->data_length)) {
+	while (!is_ring_space_avail(udev, command_size, data_length)) {
 		int ret;
 		int ret;
 		DEFINE_WAIT(__wait);
 		DEFINE_WAIT(__wait);
 
 
@@ -462,6 +485,8 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
 	entry->hdr.kflags = 0;
 	entry->hdr.kflags = 0;
 	entry->hdr.uflags = 0;
 	entry->hdr.uflags = 0;
 
 
+	bitmap_copy(old_bitmap, udev->data_bitmap, DATA_BLOCK_BITS);
+
 	/*
 	/*
 	 * Fix up iovecs, and handle if allocation in data ring wrapped.
 	 * Fix up iovecs, and handle if allocation in data ring wrapped.
 	 */
 	 */
@@ -480,6 +505,10 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
 		se_cmd->t_bidi_data_nents, &iov, &iov_cnt, false);
 		se_cmd->t_bidi_data_nents, &iov, &iov_cnt, false);
 	entry->req.iov_bidi_cnt = iov_cnt;
 	entry->req.iov_bidi_cnt = iov_cnt;
 
 
+	/* cmd's data_bitmap is what changed in process */
+	bitmap_xor(tcmu_cmd->data_bitmap, old_bitmap, udev->data_bitmap,
+			DATA_BLOCK_BITS);
+
 	/* All offsets relative to mb_addr, not start of entry! */
 	/* All offsets relative to mb_addr, not start of entry! */
 	cdb_off = CMDR_OFF + cmd_head + base_command_size;
 	cdb_off = CMDR_OFF + cmd_head + base_command_size;
 	memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb));
 	memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb));
@@ -530,35 +559,42 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
 	struct tcmu_dev *udev = cmd->tcmu_dev;
 	struct tcmu_dev *udev = cmd->tcmu_dev;
 
 
 	if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
 	if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
-		/* cmd has been completed already from timeout, just reclaim data
-		   ring space */
-		UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
+		/*
+		 * cmd has been completed already from timeout, just reclaim
+		 * data ring space and free cmd
+		 */
+		free_data_area(udev, cmd);
+
+		kmem_cache_free(tcmu_cmd_cache, cmd);
 		return;
 		return;
 	}
 	}
 
 
 	if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
 	if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
-		UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
+		free_data_area(udev, cmd);
 		pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
 		pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
 			cmd->se_cmd);
 			cmd->se_cmd);
 		entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
 		entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
 	} else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
 	} else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
 		memcpy(se_cmd->sense_buffer, entry->rsp.sense_buffer,
 		memcpy(se_cmd->sense_buffer, entry->rsp.sense_buffer,
 			       se_cmd->scsi_sense_length);
 			       se_cmd->scsi_sense_length);
-
-		UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
+		free_data_area(udev, cmd);
 	} else if (se_cmd->se_cmd_flags & SCF_BIDI) {
 	} else if (se_cmd->se_cmd_flags & SCF_BIDI) {
-		/* Discard data_out buffer */
-		UPDATE_HEAD(udev->data_tail,
-			(size_t)se_cmd->t_data_sg->length, udev->data_size);
+		DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
 
 
-		/* Get Data-In buffer */
-		gather_and_free_data_area(udev,
+		/* Get Data-In buffer before clean up */
+		bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
+		gather_data_area(udev, bitmap,
 			se_cmd->t_bidi_data_sg, se_cmd->t_bidi_data_nents);
 			se_cmd->t_bidi_data_sg, se_cmd->t_bidi_data_nents);
+		free_data_area(udev, cmd);
 	} else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
 	} else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
-		gather_and_free_data_area(udev,
+		DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
+
+		bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
+		gather_data_area(udev, bitmap,
 			se_cmd->t_data_sg, se_cmd->t_data_nents);
 			se_cmd->t_data_sg, se_cmd->t_data_nents);
+		free_data_area(udev, cmd);
 	} else if (se_cmd->data_direction == DMA_TO_DEVICE) {
 	} else if (se_cmd->data_direction == DMA_TO_DEVICE) {
-		UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
+		free_data_area(udev, cmd);
 	} else if (se_cmd->data_direction != DMA_NONE) {
 	} else if (se_cmd->data_direction != DMA_NONE) {
 		pr_warn("TCMU: data direction was %d!\n",
 		pr_warn("TCMU: data direction was %d!\n",
 			se_cmd->data_direction);
 			se_cmd->data_direction);
@@ -894,11 +930,13 @@ static int tcmu_configure_device(struct se_device *dev)
 
 
 	mb = udev->mb_addr;
 	mb = udev->mb_addr;
 	mb->version = TCMU_MAILBOX_VERSION;
 	mb->version = TCMU_MAILBOX_VERSION;
+	mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC;
 	mb->cmdr_off = CMDR_OFF;
 	mb->cmdr_off = CMDR_OFF;
 	mb->cmdr_size = udev->cmdr_size;
 	mb->cmdr_size = udev->cmdr_size;
 
 
 	WARN_ON(!PAGE_ALIGNED(udev->data_off));
 	WARN_ON(!PAGE_ALIGNED(udev->data_off));
 	WARN_ON(udev->data_size % PAGE_SIZE);
 	WARN_ON(udev->data_size % PAGE_SIZE);
+	WARN_ON(udev->data_size % DATA_BLOCK_SIZE);
 
 
 	info->version = __stringify(TCMU_MAILBOX_VERSION);
 	info->version = __stringify(TCMU_MAILBOX_VERSION);
 
 
@@ -942,12 +980,12 @@ err_vzalloc:
 	return ret;
 	return ret;
 }
 }
 
 
-static int tcmu_check_pending_cmd(int id, void *p, void *data)
+static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
 {
 {
-	struct tcmu_cmd *cmd = p;
-
-	if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
+	if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
+		kmem_cache_free(tcmu_cmd_cache, cmd);
 		return 0;
 		return 0;
+	}
 	return -EINVAL;
 	return -EINVAL;
 }
 }
 
 
@@ -962,6 +1000,8 @@ static void tcmu_dev_call_rcu(struct rcu_head *p)
 static void tcmu_free_device(struct se_device *dev)
 static void tcmu_free_device(struct se_device *dev)
 {
 {
 	struct tcmu_dev *udev = TCMU_DEV(dev);
 	struct tcmu_dev *udev = TCMU_DEV(dev);
+	struct tcmu_cmd *cmd;
+	bool all_expired = true;
 	int i;
 	int i;
 
 
 	del_timer_sync(&udev->timeout);
 	del_timer_sync(&udev->timeout);
@@ -970,10 +1010,13 @@ static void tcmu_free_device(struct se_device *dev)
 
 
 	/* Upper layer should drain all requests before calling this */
 	/* Upper layer should drain all requests before calling this */
 	spin_lock_irq(&udev->commands_lock);
 	spin_lock_irq(&udev->commands_lock);
-	i = idr_for_each(&udev->commands, tcmu_check_pending_cmd, NULL);
+	idr_for_each_entry(&udev->commands, cmd, i) {
+		if (tcmu_check_and_free_pending_cmd(cmd) != 0)
+			all_expired = false;
+	}
 	idr_destroy(&udev->commands);
 	idr_destroy(&udev->commands);
 	spin_unlock_irq(&udev->commands_lock);
 	spin_unlock_irq(&udev->commands_lock);
-	WARN_ON(i);
+	WARN_ON(!all_expired);
 
 
 	/* Device was configured */
 	/* Device was configured */
 	if (udev->uio_info.uio_dev) {
 	if (udev->uio_info.uio_dev) {

+ 16 - 4
drivers/target/tcm_fc/tfc_cmd.c

@@ -107,8 +107,7 @@ void ft_release_cmd(struct se_cmd *se_cmd)
 
 
 int ft_check_stop_free(struct se_cmd *se_cmd)
 int ft_check_stop_free(struct se_cmd *se_cmd)
 {
 {
-	transport_generic_free_cmd(se_cmd, 0);
-	return 1;
+	return transport_generic_free_cmd(se_cmd, 0);
 }
 }
 
 
 /*
 /*
@@ -179,6 +178,12 @@ int ft_queue_status(struct se_cmd *se_cmd)
 		return -ENOMEM;
 		return -ENOMEM;
 	}
 	}
 	lport->tt.exch_done(cmd->seq);
 	lport->tt.exch_done(cmd->seq);
+	/*
+	 * Drop the extra ACK_KREF reference taken by target_submit_cmd()
+	 * ahead of ft_check_stop_free() -> transport_generic_free_cmd()
+	 * final se_cmd->cmd_kref put.
+	 */
+	target_put_sess_cmd(&cmd->se_cmd);
 	return 0;
 	return 0;
 }
 }
 
 
@@ -387,7 +392,7 @@ static void ft_send_tm(struct ft_cmd *cmd)
 	/* FIXME: Add referenced task tag for ABORT_TASK */
 	/* FIXME: Add referenced task tag for ABORT_TASK */
 	rc = target_submit_tmr(&cmd->se_cmd, cmd->sess->se_sess,
 	rc = target_submit_tmr(&cmd->se_cmd, cmd->sess->se_sess,
 		&cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
 		&cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
-		cmd, tm_func, GFP_KERNEL, 0, 0);
+		cmd, tm_func, GFP_KERNEL, 0, TARGET_SCF_ACK_KREF);
 	if (rc < 0)
 	if (rc < 0)
 		ft_send_resp_code_and_free(cmd, FCP_TMF_FAILED);
 		ft_send_resp_code_and_free(cmd, FCP_TMF_FAILED);
 }
 }
@@ -422,6 +427,12 @@ void ft_queue_tm_resp(struct se_cmd *se_cmd)
 	pr_debug("tmr fn %d resp %d fcp code %d\n",
 	pr_debug("tmr fn %d resp %d fcp code %d\n",
 		  tmr->function, tmr->response, code);
 		  tmr->function, tmr->response, code);
 	ft_send_resp_code(cmd, code);
 	ft_send_resp_code(cmd, code);
+	/*
+	 * Drop the extra ACK_KREF reference taken by target_submit_tmr()
+	 * ahead of ft_check_stop_free() -> transport_generic_free_cmd()
+	 * final se_cmd->cmd_kref put.
+	 */
+	target_put_sess_cmd(&cmd->se_cmd);
 }
 }
 
 
 void ft_aborted_task(struct se_cmd *se_cmd)
 void ft_aborted_task(struct se_cmd *se_cmd)
@@ -560,7 +571,8 @@ static void ft_send_work(struct work_struct *work)
 	 */
 	 */
 	if (target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb,
 	if (target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb,
 			      &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
 			      &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
-			      ntohl(fcp->fc_dl), task_attr, data_dir, 0))
+			      ntohl(fcp->fc_dl), task_attr, data_dir,
+			      TARGET_SCF_ACK_KREF))
 		goto err;
 		goto err;
 
 
 	pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl);
 	pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl);

+ 22 - 22
drivers/target/tcm_fc/tfc_sess.c

@@ -186,6 +186,20 @@ out:
 	return NULL;
 	return NULL;
 }
 }
 
 
+static int ft_sess_alloc_cb(struct se_portal_group *se_tpg,
+			    struct se_session *se_sess, void *p)
+{
+	struct ft_sess *sess = p;
+	struct ft_tport *tport = sess->tport;
+	struct hlist_head *head = &tport->hash[ft_sess_hash(sess->port_id)];
+
+	pr_debug("port_id %x sess %p\n", sess->port_id, sess);
+	hlist_add_head_rcu(&sess->hash, head);
+	tport->sess_count++;
+
+	return 0;
+}
+
 /*
 /*
  * Allocate session and enter it in the hash for the local port.
  * Allocate session and enter it in the hash for the local port.
  * Caller holds ft_lport_lock.
  * Caller holds ft_lport_lock.
@@ -194,7 +208,6 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
 				      struct fc_rport_priv *rdata)
 				      struct fc_rport_priv *rdata)
 {
 {
 	struct se_portal_group *se_tpg = &tport->tpg->se_tpg;
 	struct se_portal_group *se_tpg = &tport->tpg->se_tpg;
-	struct se_node_acl *se_acl;
 	struct ft_sess *sess;
 	struct ft_sess *sess;
 	struct hlist_head *head;
 	struct hlist_head *head;
 	unsigned char initiatorname[TRANSPORT_IQN_LEN];
 	unsigned char initiatorname[TRANSPORT_IQN_LEN];
@@ -210,31 +223,18 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
 	if (!sess)
 	if (!sess)
 		return NULL;
 		return NULL;
 
 
-	sess->se_sess = transport_init_session_tags(TCM_FC_DEFAULT_TAGS,
-						    sizeof(struct ft_cmd),
-						    TARGET_PROT_NORMAL);
-	if (IS_ERR(sess->se_sess)) {
-		kfree(sess);
-		return NULL;
-	}
+	kref_init(&sess->kref); /* ref for table entry */
+	sess->tport = tport;
+	sess->port_id = port_id;
 
 
-	se_acl = core_tpg_get_initiator_node_acl(se_tpg, &initiatorname[0]);
-	if (!se_acl) {
-		transport_free_session(sess->se_sess);
+	sess->se_sess = target_alloc_session(se_tpg, TCM_FC_DEFAULT_TAGS,
+					     sizeof(struct ft_cmd),
+					     TARGET_PROT_NORMAL, &initiatorname[0],
+					     sess, ft_sess_alloc_cb);
+	if (IS_ERR(sess->se_sess)) {
 		kfree(sess);
 		kfree(sess);
 		return NULL;
 		return NULL;
 	}
 	}
-	sess->se_sess->se_node_acl = se_acl;
-	sess->tport = tport;
-	sess->port_id = port_id;
-	kref_init(&sess->kref);	/* ref for table entry */
-	hlist_add_head_rcu(&sess->hash, head);
-	tport->sess_count++;
-
-	pr_debug("port_id %x sess %p\n", port_id, sess);
-
-	transport_register_session(&tport->tpg->se_tpg, se_acl,
-				   sess->se_sess, sess);
 	return sess;
 	return sess;
 }
 }
 
 

+ 87 - 106
drivers/usb/gadget/function/f_tcm.c

@@ -41,13 +41,6 @@ static inline struct f_uas *to_f_uas(struct usb_function *f)
 	return container_of(f, struct f_uas, function);
 	return container_of(f, struct f_uas, function);
 }
 }
 
 
-static void usbg_cmd_release(struct kref *);
-
-static inline void usbg_cleanup_cmd(struct usbg_cmd *cmd)
-{
-	kref_put(&cmd->ref, usbg_cmd_release);
-}
-
 /* Start bot.c code */
 /* Start bot.c code */
 
 
 static int bot_enqueue_cmd_cbw(struct f_uas *fu)
 static int bot_enqueue_cmd_cbw(struct f_uas *fu)
@@ -68,7 +61,7 @@ static void bot_status_complete(struct usb_ep *ep, struct usb_request *req)
 	struct usbg_cmd *cmd = req->context;
 	struct usbg_cmd *cmd = req->context;
 	struct f_uas *fu = cmd->fu;
 	struct f_uas *fu = cmd->fu;
 
 
-	usbg_cleanup_cmd(cmd);
+	transport_generic_free_cmd(&cmd->se_cmd, 0);
 	if (req->status < 0) {
 	if (req->status < 0) {
 		pr_err("ERR %s(%d)\n", __func__, __LINE__);
 		pr_err("ERR %s(%d)\n", __func__, __LINE__);
 		return;
 		return;
@@ -605,7 +598,7 @@ static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req)
 		break;
 		break;
 
 
 	case UASP_QUEUE_COMMAND:
 	case UASP_QUEUE_COMMAND:
-		usbg_cleanup_cmd(cmd);
+		transport_generic_free_cmd(&cmd->se_cmd, 0);
 		usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
 		usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
 		break;
 		break;
 
 
@@ -615,7 +608,7 @@ static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req)
 	return;
 	return;
 
 
 cleanup:
 cleanup:
-	usbg_cleanup_cmd(cmd);
+	transport_generic_free_cmd(&cmd->se_cmd, 0);
 }
 }
 
 
 static int uasp_send_status_response(struct usbg_cmd *cmd)
 static int uasp_send_status_response(struct usbg_cmd *cmd)
@@ -977,7 +970,7 @@ static void usbg_data_write_cmpl(struct usb_ep *ep, struct usb_request *req)
 	return;
 	return;
 
 
 cleanup:
 cleanup:
-	usbg_cleanup_cmd(cmd);
+	transport_generic_free_cmd(&cmd->se_cmd, 0);
 }
 }
 
 
 static int usbg_prepare_w_request(struct usbg_cmd *cmd, struct usb_request *req)
 static int usbg_prepare_w_request(struct usbg_cmd *cmd, struct usb_request *req)
@@ -1046,7 +1039,7 @@ static void usbg_cmd_work(struct work_struct *work)
 	struct se_cmd *se_cmd;
 	struct se_cmd *se_cmd;
 	struct tcm_usbg_nexus *tv_nexus;
 	struct tcm_usbg_nexus *tv_nexus;
 	struct usbg_tpg *tpg;
 	struct usbg_tpg *tpg;
-	int dir;
+	int dir, flags = (TARGET_SCF_UNKNOWN_SIZE | TARGET_SCF_ACK_KREF);
 
 
 	se_cmd = &cmd->se_cmd;
 	se_cmd = &cmd->se_cmd;
 	tpg = cmd->fu->tpg;
 	tpg = cmd->fu->tpg;
@@ -1060,9 +1053,9 @@ static void usbg_cmd_work(struct work_struct *work)
 		goto out;
 		goto out;
 	}
 	}
 
 
-	if (target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess,
-			cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun,
-			0, cmd->prio_attr, dir, TARGET_SCF_UNKNOWN_SIZE) < 0)
+	if (target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess, cmd->cmd_buf,
+			      cmd->sense_iu.sense, cmd->unpacked_lun, 0,
+			      cmd->prio_attr, dir, flags) < 0)
 		goto out;
 		goto out;
 
 
 	return;
 	return;
@@ -1070,42 +1063,64 @@ static void usbg_cmd_work(struct work_struct *work)
 out:
 out:
 	transport_send_check_condition_and_sense(se_cmd,
 	transport_send_check_condition_and_sense(se_cmd,
 			TCM_UNSUPPORTED_SCSI_OPCODE, 1);
 			TCM_UNSUPPORTED_SCSI_OPCODE, 1);
-	usbg_cleanup_cmd(cmd);
+	transport_generic_free_cmd(&cmd->se_cmd, 0);
 }
 }
 
 
+static struct usbg_cmd *usbg_get_cmd(struct f_uas *fu,
+		struct tcm_usbg_nexus *tv_nexus, u32 scsi_tag)
+{
+	struct se_session *se_sess = tv_nexus->tvn_se_sess;
+	struct usbg_cmd *cmd;
+	int tag;
+
+	tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
+	if (tag < 0)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = &((struct usbg_cmd *)se_sess->sess_cmd_map)[tag];
+	memset(cmd, 0, sizeof(*cmd));
+	cmd->se_cmd.map_tag = tag;
+	cmd->se_cmd.tag = cmd->tag = scsi_tag;
+	cmd->fu = fu;
+
+	return cmd;
+}
+
+static void usbg_release_cmd(struct se_cmd *);
+
 static int usbg_submit_command(struct f_uas *fu,
 static int usbg_submit_command(struct f_uas *fu,
 		void *cmdbuf, unsigned int len)
 		void *cmdbuf, unsigned int len)
 {
 {
 	struct command_iu *cmd_iu = cmdbuf;
 	struct command_iu *cmd_iu = cmdbuf;
 	struct usbg_cmd *cmd;
 	struct usbg_cmd *cmd;
-	struct usbg_tpg *tpg;
-	struct tcm_usbg_nexus *tv_nexus;
+	struct usbg_tpg *tpg = fu->tpg;
+	struct tcm_usbg_nexus *tv_nexus = tpg->tpg_nexus;
 	u32 cmd_len;
 	u32 cmd_len;
+	u16 scsi_tag;
 
 
 	if (cmd_iu->iu_id != IU_ID_COMMAND) {
 	if (cmd_iu->iu_id != IU_ID_COMMAND) {
 		pr_err("Unsupported type %d\n", cmd_iu->iu_id);
 		pr_err("Unsupported type %d\n", cmd_iu->iu_id);
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
-	cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
-	if (!cmd)
-		return -ENOMEM;
-
-	cmd->fu = fu;
-
-	/* XXX until I figure out why I can't free in on complete */
-	kref_init(&cmd->ref);
-	kref_get(&cmd->ref);
+	tv_nexus = tpg->tpg_nexus;
+	if (!tv_nexus) {
+		pr_err("Missing nexus, ignoring command\n");
+		return -EINVAL;
+	}
 
 
-	tpg = fu->tpg;
 	cmd_len = (cmd_iu->len & ~0x3) + 16;
 	cmd_len = (cmd_iu->len & ~0x3) + 16;
 	if (cmd_len > USBG_MAX_CMD)
 	if (cmd_len > USBG_MAX_CMD)
-		goto err;
+		return -EINVAL;
 
 
+	scsi_tag = be16_to_cpup(&cmd_iu->tag);
+	cmd = usbg_get_cmd(fu, tv_nexus, scsi_tag);
+	if (IS_ERR(cmd)) {
+		pr_err("usbg_get_cmd failed\n");
+		return -ENOMEM;
+	}
 	memcpy(cmd->cmd_buf, cmd_iu->cdb, cmd_len);
 	memcpy(cmd->cmd_buf, cmd_iu->cdb, cmd_len);
 
 
-	cmd->tag = be16_to_cpup(&cmd_iu->tag);
-	cmd->se_cmd.tag = cmd->tag;
 	if (fu->flags & USBG_USE_STREAMS) {
 	if (fu->flags & USBG_USE_STREAMS) {
 		if (cmd->tag > UASP_SS_EP_COMP_NUM_STREAMS)
 		if (cmd->tag > UASP_SS_EP_COMP_NUM_STREAMS)
 			goto err;
 			goto err;
@@ -1117,12 +1132,6 @@ static int usbg_submit_command(struct f_uas *fu,
 		cmd->stream = &fu->stream[0];
 		cmd->stream = &fu->stream[0];
 	}
 	}
 
 
-	tv_nexus = tpg->tpg_nexus;
-	if (!tv_nexus) {
-		pr_err("Missing nexus, ignoring command\n");
-		goto err;
-	}
-
 	switch (cmd_iu->prio_attr & 0x7) {
 	switch (cmd_iu->prio_attr & 0x7) {
 	case UAS_HEAD_TAG:
 	case UAS_HEAD_TAG:
 		cmd->prio_attr = TCM_HEAD_TAG;
 		cmd->prio_attr = TCM_HEAD_TAG;
@@ -1148,7 +1157,7 @@ static int usbg_submit_command(struct f_uas *fu,
 
 
 	return 0;
 	return 0;
 err:
 err:
-	kfree(cmd);
+	usbg_release_cmd(&cmd->se_cmd);
 	return -EINVAL;
 	return -EINVAL;
 }
 }
 
 
@@ -1182,7 +1191,7 @@ static void bot_cmd_work(struct work_struct *work)
 out:
 out:
 	transport_send_check_condition_and_sense(se_cmd,
 	transport_send_check_condition_and_sense(se_cmd,
 				TCM_UNSUPPORTED_SCSI_OPCODE, 1);
 				TCM_UNSUPPORTED_SCSI_OPCODE, 1);
-	usbg_cleanup_cmd(cmd);
+	transport_generic_free_cmd(&cmd->se_cmd, 0);
 }
 }
 
 
 static int bot_submit_command(struct f_uas *fu,
 static int bot_submit_command(struct f_uas *fu,
@@ -1190,7 +1199,7 @@ static int bot_submit_command(struct f_uas *fu,
 {
 {
 	struct bulk_cb_wrap *cbw = cmdbuf;
 	struct bulk_cb_wrap *cbw = cmdbuf;
 	struct usbg_cmd *cmd;
 	struct usbg_cmd *cmd;
-	struct usbg_tpg *tpg;
+	struct usbg_tpg *tpg = fu->tpg;
 	struct tcm_usbg_nexus *tv_nexus;
 	struct tcm_usbg_nexus *tv_nexus;
 	u32 cmd_len;
 	u32 cmd_len;
 
 
@@ -1207,28 +1216,20 @@ static int bot_submit_command(struct f_uas *fu,
 	if (cmd_len < 1 || cmd_len > 16)
 	if (cmd_len < 1 || cmd_len > 16)
 		return -EINVAL;
 		return -EINVAL;
 
 
-	cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
-	if (!cmd)
-		return -ENOMEM;
-
-	cmd->fu = fu;
-
-	/* XXX until I figure out why I can't free in on complete */
-	kref_init(&cmd->ref);
-	kref_get(&cmd->ref);
-
-	tpg = fu->tpg;
-
-	memcpy(cmd->cmd_buf, cbw->CDB, cmd_len);
-
-	cmd->bot_tag = cbw->Tag;
-
 	tv_nexus = tpg->tpg_nexus;
 	tv_nexus = tpg->tpg_nexus;
 	if (!tv_nexus) {
 	if (!tv_nexus) {
 		pr_err("Missing nexus, ignoring command\n");
 		pr_err("Missing nexus, ignoring command\n");
-		goto err;
+		return -ENODEV;
 	}
 	}
 
 
+	cmd = usbg_get_cmd(fu, tv_nexus, cbw->Tag);
+	if (IS_ERR(cmd)) {
+		pr_err("usbg_get_cmd failed\n");
+		return -ENOMEM;
+	}
+	memcpy(cmd->cmd_buf, cbw->CDB, cmd_len);
+
+	cmd->bot_tag = cbw->Tag;
 	cmd->prio_attr = TCM_SIMPLE_TAG;
 	cmd->prio_attr = TCM_SIMPLE_TAG;
 	cmd->unpacked_lun = cbw->Lun;
 	cmd->unpacked_lun = cbw->Lun;
 	cmd->is_read = cbw->Flags & US_BULK_FLAG_IN ? 1 : 0;
 	cmd->is_read = cbw->Flags & US_BULK_FLAG_IN ? 1 : 0;
@@ -1239,9 +1240,6 @@ static int bot_submit_command(struct f_uas *fu,
 	queue_work(tpg->workqueue, &cmd->work);
 	queue_work(tpg->workqueue, &cmd->work);
 
 
 	return 0;
 	return 0;
-err:
-	kfree(cmd);
-	return -EINVAL;
 }
 }
 
 
 /* Start fabric.c code */
 /* Start fabric.c code */
@@ -1282,20 +1280,14 @@ static u32 usbg_tpg_get_inst_index(struct se_portal_group *se_tpg)
 	return 1;
 	return 1;
 }
 }
 
 
-static void usbg_cmd_release(struct kref *ref)
-{
-	struct usbg_cmd *cmd = container_of(ref, struct usbg_cmd,
-			ref);
-
-	transport_generic_free_cmd(&cmd->se_cmd, 0);
-}
-
 static void usbg_release_cmd(struct se_cmd *se_cmd)
 static void usbg_release_cmd(struct se_cmd *se_cmd)
 {
 {
 	struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
 	struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
 			se_cmd);
 			se_cmd);
+	struct se_session *se_sess = se_cmd->se_sess;
+
 	kfree(cmd->data_buf);
 	kfree(cmd->data_buf);
-	kfree(cmd);
+	percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
 }
 }
 
 
 static int usbg_shutdown_session(struct se_session *se_sess)
 static int usbg_shutdown_session(struct se_session *se_sess)
@@ -1579,55 +1571,48 @@ out:
 	return ret;
 	return ret;
 }
 }
 
 
+static int usbg_alloc_sess_cb(struct se_portal_group *se_tpg,
+			      struct se_session *se_sess, void *p)
+{
+	struct usbg_tpg *tpg = container_of(se_tpg,
+				struct usbg_tpg, se_tpg);
+
+	tpg->tpg_nexus = p;
+	return 0;
+}
+
 static int tcm_usbg_make_nexus(struct usbg_tpg *tpg, char *name)
 static int tcm_usbg_make_nexus(struct usbg_tpg *tpg, char *name)
 {
 {
-	struct se_portal_group *se_tpg;
 	struct tcm_usbg_nexus *tv_nexus;
 	struct tcm_usbg_nexus *tv_nexus;
-	int ret;
+	int ret = 0;
 
 
 	mutex_lock(&tpg->tpg_mutex);
 	mutex_lock(&tpg->tpg_mutex);
 	if (tpg->tpg_nexus) {
 	if (tpg->tpg_nexus) {
 		ret = -EEXIST;
 		ret = -EEXIST;
 		pr_debug("tpg->tpg_nexus already exists\n");
 		pr_debug("tpg->tpg_nexus already exists\n");
-		goto err_unlock;
+		goto out_unlock;
 	}
 	}
-	se_tpg = &tpg->se_tpg;
 
 
-	ret = -ENOMEM;
 	tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
 	tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
-	if (!tv_nexus)
-		goto err_unlock;
-	tv_nexus->tvn_se_sess = transport_init_session(TARGET_PROT_NORMAL);
-	if (IS_ERR(tv_nexus->tvn_se_sess))
-		goto err_free;
+	if (!tv_nexus) {
+		ret = -ENOMEM;
+		goto out_unlock;
+	}
 
 
-	/*
-	 * Since we are running in 'demo mode' this call with generate a
-	 * struct se_node_acl for the tcm_vhost struct se_portal_group with
-	 * the SCSI Initiator port name of the passed configfs group 'name'.
-	 */
-	tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
-			se_tpg, name);
-	if (!tv_nexus->tvn_se_sess->se_node_acl) {
+	tv_nexus->tvn_se_sess = target_alloc_session(&tpg->se_tpg,
+						     USB_G_DEFAULT_SESSION_TAGS,
+						     sizeof(struct usbg_cmd),
+						     TARGET_PROT_NORMAL, name,
+						     tv_nexus, usbg_alloc_sess_cb);
+	if (IS_ERR(tv_nexus->tvn_se_sess)) {
 #define MAKE_NEXUS_MSG "core_tpg_check_initiator_node_acl() failed for %s\n"
 #define MAKE_NEXUS_MSG "core_tpg_check_initiator_node_acl() failed for %s\n"
 		pr_debug(MAKE_NEXUS_MSG, name);
 		pr_debug(MAKE_NEXUS_MSG, name);
 #undef MAKE_NEXUS_MSG
 #undef MAKE_NEXUS_MSG
-		goto err_session;
+		ret = PTR_ERR(tv_nexus->tvn_se_sess);
+		kfree(tv_nexus);
 	}
 	}
-	/*
-	 * Now register the TCM vHost virtual I_T Nexus as active.
-	 */
-	transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
-			tv_nexus->tvn_se_sess, tv_nexus);
-	tpg->tpg_nexus = tv_nexus;
-	mutex_unlock(&tpg->tpg_mutex);
-	return 0;
 
 
-err_session:
-	transport_free_session(tv_nexus->tvn_se_sess);
-err_free:
-	kfree(tv_nexus);
-err_unlock:
+out_unlock:
 	mutex_unlock(&tpg->tpg_mutex);
 	mutex_unlock(&tpg->tpg_mutex);
 	return ret;
 	return ret;
 }
 }
@@ -1735,11 +1720,7 @@ static void usbg_port_unlink(struct se_portal_group *se_tpg,
 
 
 static int usbg_check_stop_free(struct se_cmd *se_cmd)
 static int usbg_check_stop_free(struct se_cmd *se_cmd)
 {
 {
-	struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
-			se_cmd);
-
-	kref_put(&cmd->ref, usbg_cmd_release);
-	return 1;
+	return target_put_sess_cmd(se_cmd);
 }
 }
 
 
 static const struct target_core_fabric_ops usbg_ops = {
 static const struct target_core_fabric_ops usbg_ops = {

+ 2 - 0
drivers/usb/gadget/function/tcm.h

@@ -23,6 +23,8 @@ enum {
 #define USB_G_ALT_INT_BBB       0
 #define USB_G_ALT_INT_BBB       0
 #define USB_G_ALT_INT_UAS       1
 #define USB_G_ALT_INT_UAS       1
 
 
+#define USB_G_DEFAULT_SESSION_TAGS	128
+
 struct tcm_usbg_nexus {
 struct tcm_usbg_nexus {
 	struct se_session *tvn_se_sess;
 	struct se_session *tvn_se_sess;
 };
 };

+ 41 - 58
drivers/vhost/scsi.c

@@ -1664,8 +1664,7 @@ static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
 	mutex_unlock(&vhost_scsi_mutex);
 	mutex_unlock(&vhost_scsi_mutex);
 }
 }
 
 
-static void vhost_scsi_free_cmd_map_res(struct vhost_scsi_nexus *nexus,
-				       struct se_session *se_sess)
+static void vhost_scsi_free_cmd_map_res(struct se_session *se_sess)
 {
 {
 	struct vhost_scsi_cmd *tv_cmd;
 	struct vhost_scsi_cmd *tv_cmd;
 	unsigned int i;
 	unsigned int i;
@@ -1721,98 +1720,82 @@ static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
 	NULL,
 	NULL,
 };
 };
 
 
-static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
-				const char *name)
+static int vhost_scsi_nexus_cb(struct se_portal_group *se_tpg,
+			       struct se_session *se_sess, void *p)
 {
 {
-	struct se_portal_group *se_tpg;
-	struct se_session *se_sess;
-	struct vhost_scsi_nexus *tv_nexus;
 	struct vhost_scsi_cmd *tv_cmd;
 	struct vhost_scsi_cmd *tv_cmd;
 	unsigned int i;
 	unsigned int i;
 
 
-	mutex_lock(&tpg->tv_tpg_mutex);
-	if (tpg->tpg_nexus) {
-		mutex_unlock(&tpg->tv_tpg_mutex);
-		pr_debug("tpg->tpg_nexus already exists\n");
-		return -EEXIST;
-	}
-	se_tpg = &tpg->se_tpg;
-
-	tv_nexus = kzalloc(sizeof(struct vhost_scsi_nexus), GFP_KERNEL);
-	if (!tv_nexus) {
-		mutex_unlock(&tpg->tv_tpg_mutex);
-		pr_err("Unable to allocate struct vhost_scsi_nexus\n");
-		return -ENOMEM;
-	}
-	/*
-	 *  Initialize the struct se_session pointer and setup tagpool
-	 *  for struct vhost_scsi_cmd descriptors
-	 */
-	tv_nexus->tvn_se_sess = transport_init_session_tags(
-					VHOST_SCSI_DEFAULT_TAGS,
-					sizeof(struct vhost_scsi_cmd),
-					TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS);
-	if (IS_ERR(tv_nexus->tvn_se_sess)) {
-		mutex_unlock(&tpg->tv_tpg_mutex);
-		kfree(tv_nexus);
-		return -ENOMEM;
-	}
-	se_sess = tv_nexus->tvn_se_sess;
 	for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
 	for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
 		tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
 		tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
 
 
 		tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) *
 		tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) *
 					VHOST_SCSI_PREALLOC_SGLS, GFP_KERNEL);
 					VHOST_SCSI_PREALLOC_SGLS, GFP_KERNEL);
 		if (!tv_cmd->tvc_sgl) {
 		if (!tv_cmd->tvc_sgl) {
-			mutex_unlock(&tpg->tv_tpg_mutex);
 			pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
 			pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
 			goto out;
 			goto out;
 		}
 		}
 
 
 		tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) *
 		tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) *
-					VHOST_SCSI_PREALLOC_UPAGES, GFP_KERNEL);
+				VHOST_SCSI_PREALLOC_UPAGES, GFP_KERNEL);
 		if (!tv_cmd->tvc_upages) {
 		if (!tv_cmd->tvc_upages) {
-			mutex_unlock(&tpg->tv_tpg_mutex);
 			pr_err("Unable to allocate tv_cmd->tvc_upages\n");
 			pr_err("Unable to allocate tv_cmd->tvc_upages\n");
 			goto out;
 			goto out;
 		}
 		}
 
 
 		tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) *
 		tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) *
-					VHOST_SCSI_PREALLOC_PROT_SGLS, GFP_KERNEL);
+				VHOST_SCSI_PREALLOC_PROT_SGLS, GFP_KERNEL);
 		if (!tv_cmd->tvc_prot_sgl) {
 		if (!tv_cmd->tvc_prot_sgl) {
-			mutex_unlock(&tpg->tv_tpg_mutex);
 			pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
 			pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
 			goto out;
 			goto out;
 		}
 		}
 	}
 	}
+	return 0;
+out:
+	vhost_scsi_free_cmd_map_res(se_sess);
+	return -ENOMEM;
+}
+
+static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
+				const char *name)
+{
+	struct se_portal_group *se_tpg;
+	struct vhost_scsi_nexus *tv_nexus;
+
+	mutex_lock(&tpg->tv_tpg_mutex);
+	if (tpg->tpg_nexus) {
+		mutex_unlock(&tpg->tv_tpg_mutex);
+		pr_debug("tpg->tpg_nexus already exists\n");
+		return -EEXIST;
+	}
+	se_tpg = &tpg->se_tpg;
+
+	tv_nexus = kzalloc(sizeof(struct vhost_scsi_nexus), GFP_KERNEL);
+	if (!tv_nexus) {
+		mutex_unlock(&tpg->tv_tpg_mutex);
+		pr_err("Unable to allocate struct vhost_scsi_nexus\n");
+		return -ENOMEM;
+	}
 	/*
 	/*
 	 * Since we are running in 'demo mode' this call with generate a
 	 * Since we are running in 'demo mode' this call with generate a
 	 * struct se_node_acl for the vhost_scsi struct se_portal_group with
 	 * struct se_node_acl for the vhost_scsi struct se_portal_group with
 	 * the SCSI Initiator port name of the passed configfs group 'name'.
 	 * the SCSI Initiator port name of the passed configfs group 'name'.
 	 */
 	 */
-	tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
-				se_tpg, (unsigned char *)name);
-	if (!tv_nexus->tvn_se_sess->se_node_acl) {
+	tv_nexus->tvn_se_sess = target_alloc_session(&tpg->se_tpg,
+					VHOST_SCSI_DEFAULT_TAGS,
+					sizeof(struct vhost_scsi_cmd),
+					TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
+					(unsigned char *)name, tv_nexus,
+					vhost_scsi_nexus_cb);
+	if (IS_ERR(tv_nexus->tvn_se_sess)) {
 		mutex_unlock(&tpg->tv_tpg_mutex);
 		mutex_unlock(&tpg->tv_tpg_mutex);
-		pr_debug("core_tpg_check_initiator_node_acl() failed"
-				" for %s\n", name);
-		goto out;
+		kfree(tv_nexus);
+		return -ENOMEM;
 	}
 	}
-	/*
-	 * Now register the TCM vhost virtual I_T Nexus as active.
-	 */
-	transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
-			tv_nexus->tvn_se_sess, tv_nexus);
 	tpg->tpg_nexus = tv_nexus;
 	tpg->tpg_nexus = tv_nexus;
 
 
 	mutex_unlock(&tpg->tv_tpg_mutex);
 	mutex_unlock(&tpg->tv_tpg_mutex);
 	return 0;
 	return 0;
-
-out:
-	vhost_scsi_free_cmd_map_res(tv_nexus, se_sess);
-	transport_free_session(se_sess);
-	kfree(tv_nexus);
-	return -ENOMEM;
 }
 }
 
 
 static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
 static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
@@ -1853,7 +1836,7 @@ static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
 		" %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
 		" %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
 		tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
 		tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
 
 
-	vhost_scsi_free_cmd_map_res(tv_nexus, se_sess);
+	vhost_scsi_free_cmd_map_res(se_sess);
 	/*
 	/*
 	 * Release the SCSI I_T Nexus to the emulated vhost Target Port
 	 * Release the SCSI I_T Nexus to the emulated vhost Target Port
 	 */
 	 */

+ 139 - 142
drivers/xen/xen-scsiback.c

@@ -141,6 +141,8 @@ struct scsiback_tmr {
 	wait_queue_head_t tmr_wait;
 	wait_queue_head_t tmr_wait;
 };
 };
 
 
+#define VSCSI_DEFAULT_SESSION_TAGS	128
+
 struct scsiback_nexus {
 struct scsiback_nexus {
 	/* Pointer to TCM session for I_T Nexus */
 	/* Pointer to TCM session for I_T Nexus */
 	struct se_session *tvn_se_sess;
 	struct se_session *tvn_se_sess;
@@ -190,7 +192,6 @@ module_param_named(max_buffer_pages, scsiback_max_buffer_pages, int, 0644);
 MODULE_PARM_DESC(max_buffer_pages,
 MODULE_PARM_DESC(max_buffer_pages,
 "Maximum number of free pages to keep in backend buffer");
 "Maximum number of free pages to keep in backend buffer");
 
 
-static struct kmem_cache *scsiback_cachep;
 static DEFINE_SPINLOCK(free_pages_lock);
 static DEFINE_SPINLOCK(free_pages_lock);
 static int free_pages_num;
 static int free_pages_num;
 static LIST_HEAD(scsiback_free_pages);
 static LIST_HEAD(scsiback_free_pages);
@@ -321,11 +322,11 @@ static void scsiback_free_translation_entry(struct kref *kref)
 	kfree(entry);
 	kfree(entry);
 }
 }
 
 
-static void scsiback_do_resp_with_sense(char *sense_buffer, int32_t result,
-			uint32_t resid, struct vscsibk_pend *pending_req)
+static void scsiback_send_response(struct vscsibk_info *info,
+			char *sense_buffer, int32_t result, uint32_t resid,
+			uint16_t rqid)
 {
 {
 	struct vscsiif_response *ring_res;
 	struct vscsiif_response *ring_res;
-	struct vscsibk_info *info = pending_req->info;
 	int notify;
 	int notify;
 	struct scsi_sense_hdr sshdr;
 	struct scsi_sense_hdr sshdr;
 	unsigned long flags;
 	unsigned long flags;
@@ -337,7 +338,7 @@ static void scsiback_do_resp_with_sense(char *sense_buffer, int32_t result,
 	info->ring.rsp_prod_pvt++;
 	info->ring.rsp_prod_pvt++;
 
 
 	ring_res->rslt   = result;
 	ring_res->rslt   = result;
-	ring_res->rqid   = pending_req->rqid;
+	ring_res->rqid   = rqid;
 
 
 	if (sense_buffer != NULL &&
 	if (sense_buffer != NULL &&
 	    scsi_normalize_sense(sense_buffer, VSCSIIF_SENSE_BUFFERSIZE,
 	    scsi_normalize_sense(sense_buffer, VSCSIIF_SENSE_BUFFERSIZE,
@@ -357,6 +358,13 @@ static void scsiback_do_resp_with_sense(char *sense_buffer, int32_t result,
 
 
 	if (notify)
 	if (notify)
 		notify_remote_via_irq(info->irq);
 		notify_remote_via_irq(info->irq);
+}
+
+static void scsiback_do_resp_with_sense(char *sense_buffer, int32_t result,
+			uint32_t resid, struct vscsibk_pend *pending_req)
+{
+	scsiback_send_response(pending_req->info, sense_buffer, result,
+			       resid, pending_req->rqid);
 
 
 	if (pending_req->v2p)
 	if (pending_req->v2p)
 		kref_put(&pending_req->v2p->kref,
 		kref_put(&pending_req->v2p->kref,
@@ -380,6 +388,12 @@ static void scsiback_cmd_done(struct vscsibk_pend *pending_req)
 	scsiback_fast_flush_area(pending_req);
 	scsiback_fast_flush_area(pending_req);
 	scsiback_do_resp_with_sense(sense_buffer, errors, resid, pending_req);
 	scsiback_do_resp_with_sense(sense_buffer, errors, resid, pending_req);
 	scsiback_put(info);
 	scsiback_put(info);
+	/*
+	 * Drop the extra KREF_ACK reference taken by target_submit_cmd_map_sgls()
+	 * ahead of scsiback_check_stop_free() ->  transport_generic_free_cmd()
+	 * final se_cmd->cmd_kref put.
+	 */
+	target_put_sess_cmd(&pending_req->se_cmd);
 }
 }
 
 
 static void scsiback_cmd_exec(struct vscsibk_pend *pending_req)
 static void scsiback_cmd_exec(struct vscsibk_pend *pending_req)
@@ -388,16 +402,12 @@ static void scsiback_cmd_exec(struct vscsibk_pend *pending_req)
 	struct se_session *sess = pending_req->v2p->tpg->tpg_nexus->tvn_se_sess;
 	struct se_session *sess = pending_req->v2p->tpg->tpg_nexus->tvn_se_sess;
 	int rc;
 	int rc;
 
 
-	memset(pending_req->sense_buffer, 0, VSCSIIF_SENSE_BUFFERSIZE);
-
-	memset(se_cmd, 0, sizeof(*se_cmd));
-
 	scsiback_get(pending_req->info);
 	scsiback_get(pending_req->info);
 	se_cmd->tag = pending_req->rqid;
 	se_cmd->tag = pending_req->rqid;
 	rc = target_submit_cmd_map_sgls(se_cmd, sess, pending_req->cmnd,
 	rc = target_submit_cmd_map_sgls(se_cmd, sess, pending_req->cmnd,
 			pending_req->sense_buffer, pending_req->v2p->lun,
 			pending_req->sense_buffer, pending_req->v2p->lun,
 			pending_req->data_len, 0,
 			pending_req->data_len, 0,
-			pending_req->sc_data_direction, 0,
+			pending_req->sc_data_direction, TARGET_SCF_ACK_KREF,
 			pending_req->sgl, pending_req->n_sg,
 			pending_req->sgl, pending_req->n_sg,
 			NULL, 0, NULL, 0);
 			NULL, 0, NULL, 0);
 	if (rc < 0) {
 	if (rc < 0) {
@@ -586,45 +596,40 @@ static void scsiback_disconnect(struct vscsibk_info *info)
 static void scsiback_device_action(struct vscsibk_pend *pending_req,
 static void scsiback_device_action(struct vscsibk_pend *pending_req,
 	enum tcm_tmreq_table act, int tag)
 	enum tcm_tmreq_table act, int tag)
 {
 {
-	int rc, err = FAILED;
 	struct scsiback_tpg *tpg = pending_req->v2p->tpg;
 	struct scsiback_tpg *tpg = pending_req->v2p->tpg;
+	struct scsiback_nexus *nexus = tpg->tpg_nexus;
 	struct se_cmd *se_cmd = &pending_req->se_cmd;
 	struct se_cmd *se_cmd = &pending_req->se_cmd;
 	struct scsiback_tmr *tmr;
 	struct scsiback_tmr *tmr;
+	u64 unpacked_lun = pending_req->v2p->lun;
+	int rc, err = FAILED;
 
 
 	tmr = kzalloc(sizeof(struct scsiback_tmr), GFP_KERNEL);
 	tmr = kzalloc(sizeof(struct scsiback_tmr), GFP_KERNEL);
-	if (!tmr)
-		goto out;
+	if (!tmr) {
+		target_put_sess_cmd(se_cmd);
+		goto err;
+	}
 
 
 	init_waitqueue_head(&tmr->tmr_wait);
 	init_waitqueue_head(&tmr->tmr_wait);
 
 
-	transport_init_se_cmd(se_cmd, tpg->se_tpg.se_tpg_tfo,
-		tpg->tpg_nexus->tvn_se_sess, 0, DMA_NONE, TCM_SIMPLE_TAG,
-		&pending_req->sense_buffer[0]);
-
-	rc = core_tmr_alloc_req(se_cmd, tmr, act, GFP_KERNEL);
-	if (rc < 0)
-		goto out;
-
-	se_cmd->se_tmr_req->ref_task_tag = tag;
-
-	if (transport_lookup_tmr_lun(se_cmd, pending_req->v2p->lun) < 0)
-		goto out;
+	rc = target_submit_tmr(&pending_req->se_cmd, nexus->tvn_se_sess,
+			       &pending_req->sense_buffer[0],
+			       unpacked_lun, tmr, act, GFP_KERNEL,
+			       tag, TARGET_SCF_ACK_KREF);
+	if (rc)
+		goto err;
 
 
-	transport_generic_handle_tmr(se_cmd);
 	wait_event(tmr->tmr_wait, atomic_read(&tmr->tmr_complete));
 	wait_event(tmr->tmr_wait, atomic_read(&tmr->tmr_complete));
 
 
 	err = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ?
 	err = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ?
 		SUCCESS : FAILED;
 		SUCCESS : FAILED;
 
 
-out:
-	if (tmr) {
-		transport_generic_free_cmd(&pending_req->se_cmd, 1);
+	scsiback_do_resp_with_sense(NULL, err, 0, pending_req);
+	transport_generic_free_cmd(&pending_req->se_cmd, 1);
+	return;
+err:
+	if (tmr)
 		kfree(tmr);
 		kfree(tmr);
-	}
-
 	scsiback_do_resp_with_sense(NULL, err, 0, pending_req);
 	scsiback_do_resp_with_sense(NULL, err, 0, pending_req);
-
-	kmem_cache_free(scsiback_cachep, pending_req);
 }
 }
 
 
 /*
 /*
@@ -653,15 +658,53 @@ out:
 	return entry;
 	return entry;
 }
 }
 
 
-static int prepare_pending_reqs(struct vscsibk_info *info,
-				struct vscsiif_request *ring_req,
-				struct vscsibk_pend *pending_req)
+static struct vscsibk_pend *scsiback_get_pend_req(struct vscsiif_back_ring *ring,
+				struct v2p_entry *v2p)
+{
+	struct scsiback_tpg *tpg = v2p->tpg;
+	struct scsiback_nexus *nexus = tpg->tpg_nexus;
+	struct se_session *se_sess = nexus->tvn_se_sess;
+	struct vscsibk_pend *req;
+	int tag, i;
+
+	tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
+	if (tag < 0) {
+		pr_err("Unable to obtain tag for vscsiif_request\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	req = &((struct vscsibk_pend *)se_sess->sess_cmd_map)[tag];
+	memset(req, 0, sizeof(*req));
+	req->se_cmd.map_tag = tag;
+
+	for (i = 0; i < VSCSI_MAX_GRANTS; i++)
+		req->grant_handles[i] = SCSIBACK_INVALID_HANDLE;
+
+	return req;
+}
+
+static struct vscsibk_pend *prepare_pending_reqs(struct vscsibk_info *info,
+				struct vscsiif_back_ring *ring,
+				struct vscsiif_request *ring_req)
 {
 {
+	struct vscsibk_pend *pending_req;
 	struct v2p_entry *v2p;
 	struct v2p_entry *v2p;
 	struct ids_tuple vir;
 	struct ids_tuple vir;
 
 
-	pending_req->rqid       = ring_req->rqid;
-	pending_req->info       = info;
+	/* request range check from frontend */
+	if ((ring_req->sc_data_direction != DMA_BIDIRECTIONAL) &&
+		(ring_req->sc_data_direction != DMA_TO_DEVICE) &&
+		(ring_req->sc_data_direction != DMA_FROM_DEVICE) &&
+		(ring_req->sc_data_direction != DMA_NONE)) {
+		pr_debug("invalid parameter data_dir = %d\n",
+			ring_req->sc_data_direction);
+		return ERR_PTR(-EINVAL);
+	}
+	if (ring_req->cmd_len > VSCSIIF_MAX_COMMAND_SIZE) {
+		pr_debug("invalid parameter cmd_len = %d\n",
+			ring_req->cmd_len);
+		return ERR_PTR(-EINVAL);
+	}
 
 
 	vir.chn = ring_req->channel;
 	vir.chn = ring_req->channel;
 	vir.tgt = ring_req->id;
 	vir.tgt = ring_req->id;
@@ -669,33 +712,24 @@ static int prepare_pending_reqs(struct vscsibk_info *info,
 
 
 	v2p = scsiback_do_translation(info, &vir);
 	v2p = scsiback_do_translation(info, &vir);
 	if (!v2p) {
 	if (!v2p) {
-		pending_req->v2p = NULL;
 		pr_debug("the v2p of (chn:%d, tgt:%d, lun:%d) doesn't exist.\n",
 		pr_debug("the v2p of (chn:%d, tgt:%d, lun:%d) doesn't exist.\n",
-			vir.chn, vir.tgt, vir.lun);
-		return -ENODEV;
+			 vir.chn, vir.tgt, vir.lun);
+		return ERR_PTR(-ENODEV);
 	}
 	}
-	pending_req->v2p = v2p;
 
 
-	/* request range check from frontend */
-	pending_req->sc_data_direction = ring_req->sc_data_direction;
-	if ((pending_req->sc_data_direction != DMA_BIDIRECTIONAL) &&
-		(pending_req->sc_data_direction != DMA_TO_DEVICE) &&
-		(pending_req->sc_data_direction != DMA_FROM_DEVICE) &&
-		(pending_req->sc_data_direction != DMA_NONE)) {
-		pr_debug("invalid parameter data_dir = %d\n",
-			pending_req->sc_data_direction);
-		return -EINVAL;
+	pending_req = scsiback_get_pend_req(ring, v2p);
+	if (IS_ERR(pending_req)) {
+		kref_put(&v2p->kref, scsiback_free_translation_entry);
+		return ERR_PTR(-ENOMEM);
 	}
 	}
-
+	pending_req->rqid = ring_req->rqid;
+	pending_req->info = info;
+	pending_req->v2p = v2p;
+	pending_req->sc_data_direction = ring_req->sc_data_direction;
 	pending_req->cmd_len = ring_req->cmd_len;
 	pending_req->cmd_len = ring_req->cmd_len;
-	if (pending_req->cmd_len > VSCSIIF_MAX_COMMAND_SIZE) {
-		pr_debug("invalid parameter cmd_len = %d\n",
-			pending_req->cmd_len);
-		return -EINVAL;
-	}
 	memcpy(pending_req->cmnd, ring_req->cmnd, pending_req->cmd_len);
 	memcpy(pending_req->cmnd, ring_req->cmnd, pending_req->cmd_len);
 
 
-	return 0;
+	return pending_req;
 }
 }
 
 
 static int scsiback_do_cmd_fn(struct vscsibk_info *info)
 static int scsiback_do_cmd_fn(struct vscsibk_info *info)
@@ -704,7 +738,7 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
 	struct vscsiif_request ring_req;
 	struct vscsiif_request ring_req;
 	struct vscsibk_pend *pending_req;
 	struct vscsibk_pend *pending_req;
 	RING_IDX rc, rp;
 	RING_IDX rc, rp;
-	int err, more_to_do;
+	int more_to_do;
 	uint32_t result;
 	uint32_t result;
 
 
 	rc = ring->req_cons;
 	rc = ring->req_cons;
@@ -722,16 +756,13 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
 	while ((rc != rp)) {
 	while ((rc != rp)) {
 		if (RING_REQUEST_CONS_OVERFLOW(ring, rc))
 		if (RING_REQUEST_CONS_OVERFLOW(ring, rc))
 			break;
 			break;
-		pending_req = kmem_cache_alloc(scsiback_cachep, GFP_KERNEL);
-		if (!pending_req)
-			return 1;
 
 
 		RING_COPY_REQUEST(ring, rc, &ring_req);
 		RING_COPY_REQUEST(ring, rc, &ring_req);
 		ring->req_cons = ++rc;
 		ring->req_cons = ++rc;
 
 
-		err = prepare_pending_reqs(info, &ring_req, pending_req);
-		if (err) {
-			switch (err) {
+		pending_req = prepare_pending_reqs(info, ring, &ring_req);
+		if (IS_ERR(pending_req)) {
+			switch (PTR_ERR(pending_req)) {
 			case -ENODEV:
 			case -ENODEV:
 				result = DID_NO_CONNECT;
 				result = DID_NO_CONNECT;
 				break;
 				break;
@@ -739,9 +770,8 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
 				result = DRIVER_ERROR;
 				result = DRIVER_ERROR;
 				break;
 				break;
 			}
 			}
-			scsiback_do_resp_with_sense(NULL, result << 24, 0,
-						    pending_req);
-			kmem_cache_free(scsiback_cachep, pending_req);
+			scsiback_send_response(info, NULL, result << 24, 0,
+					       ring_req.rqid);
 			return 1;
 			return 1;
 		}
 		}
 
 
@@ -750,8 +780,8 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
 			if (scsiback_gnttab_data_map(&ring_req, pending_req)) {
 			if (scsiback_gnttab_data_map(&ring_req, pending_req)) {
 				scsiback_fast_flush_area(pending_req);
 				scsiback_fast_flush_area(pending_req);
 				scsiback_do_resp_with_sense(NULL,
 				scsiback_do_resp_with_sense(NULL,
-					DRIVER_ERROR << 24, 0, pending_req);
-				kmem_cache_free(scsiback_cachep, pending_req);
+						DRIVER_ERROR << 24, 0, pending_req);
+				transport_generic_free_cmd(&pending_req->se_cmd, 0);
 			} else {
 			} else {
 				scsiback_cmd_exec(pending_req);
 				scsiback_cmd_exec(pending_req);
 			}
 			}
@@ -765,9 +795,9 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
 			break;
 			break;
 		default:
 		default:
 			pr_err_ratelimited("invalid request\n");
 			pr_err_ratelimited("invalid request\n");
-			scsiback_do_resp_with_sense(NULL, DRIVER_ERROR << 24,
-						    0, pending_req);
-			kmem_cache_free(scsiback_cachep, pending_req);
+			scsiback_do_resp_with_sense(NULL, DRIVER_ERROR << 24, 0,
+						    pending_req);
+			transport_generic_free_cmd(&pending_req->se_cmd, 0);
 			break;
 			break;
 		}
 		}
 
 
@@ -1353,24 +1383,20 @@ static u32 scsiback_tpg_get_inst_index(struct se_portal_group *se_tpg)
 
 
 static int scsiback_check_stop_free(struct se_cmd *se_cmd)
 static int scsiback_check_stop_free(struct se_cmd *se_cmd)
 {
 {
-	/*
-	 * Do not release struct se_cmd's containing a valid TMR pointer.
-	 * These will be released directly in scsiback_device_action()
-	 * with transport_generic_free_cmd().
-	 */
-	if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
-		return 0;
-
-	transport_generic_free_cmd(se_cmd, 0);
-	return 1;
+	return transport_generic_free_cmd(se_cmd, 0);
 }
 }
 
 
 static void scsiback_release_cmd(struct se_cmd *se_cmd)
 static void scsiback_release_cmd(struct se_cmd *se_cmd)
 {
 {
-	struct vscsibk_pend *pending_req = container_of(se_cmd,
-				struct vscsibk_pend, se_cmd);
+	struct se_session *se_sess = se_cmd->se_sess;
+	struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
+
+	if (se_tmr && se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
+		struct scsiback_tmr *tmr = se_tmr->fabric_tmr_ptr;
+		kfree(tmr);
+	}
 
 
-	kmem_cache_free(scsiback_cachep, pending_req);
+	percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
 }
 }
 
 
 static int scsiback_shutdown_session(struct se_session *se_sess)
 static int scsiback_shutdown_session(struct se_session *se_sess)
@@ -1494,61 +1520,49 @@ static struct configfs_attribute *scsiback_param_attrs[] = {
 	NULL,
 	NULL,
 };
 };
 
 
+static int scsiback_alloc_sess_cb(struct se_portal_group *se_tpg,
+				  struct se_session *se_sess, void *p)
+{
+	struct scsiback_tpg *tpg = container_of(se_tpg,
+				struct scsiback_tpg, se_tpg);
+
+	tpg->tpg_nexus = p;
+	return 0;
+}
+
 static int scsiback_make_nexus(struct scsiback_tpg *tpg,
 static int scsiback_make_nexus(struct scsiback_tpg *tpg,
 				const char *name)
 				const char *name)
 {
 {
-	struct se_portal_group *se_tpg;
-	struct se_session *se_sess;
 	struct scsiback_nexus *tv_nexus;
 	struct scsiback_nexus *tv_nexus;
+	int ret = 0;
 
 
 	mutex_lock(&tpg->tv_tpg_mutex);
 	mutex_lock(&tpg->tv_tpg_mutex);
 	if (tpg->tpg_nexus) {
 	if (tpg->tpg_nexus) {
-		mutex_unlock(&tpg->tv_tpg_mutex);
 		pr_debug("tpg->tpg_nexus already exists\n");
 		pr_debug("tpg->tpg_nexus already exists\n");
-		return -EEXIST;
+		ret = -EEXIST;
+		goto out_unlock;
 	}
 	}
-	se_tpg = &tpg->se_tpg;
 
 
 	tv_nexus = kzalloc(sizeof(struct scsiback_nexus), GFP_KERNEL);
 	tv_nexus = kzalloc(sizeof(struct scsiback_nexus), GFP_KERNEL);
 	if (!tv_nexus) {
 	if (!tv_nexus) {
-		mutex_unlock(&tpg->tv_tpg_mutex);
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto out_unlock;
 	}
 	}
-	/*
-	 * Initialize the struct se_session pointer
-	 */
-	tv_nexus->tvn_se_sess = transport_init_session(TARGET_PROT_NORMAL);
+
+	tv_nexus->tvn_se_sess = target_alloc_session(&tpg->se_tpg,
+						     VSCSI_DEFAULT_SESSION_TAGS,
+						     sizeof(struct vscsibk_pend),
+						     TARGET_PROT_NORMAL, name,
+						     tv_nexus, scsiback_alloc_sess_cb);
 	if (IS_ERR(tv_nexus->tvn_se_sess)) {
 	if (IS_ERR(tv_nexus->tvn_se_sess)) {
-		mutex_unlock(&tpg->tv_tpg_mutex);
 		kfree(tv_nexus);
 		kfree(tv_nexus);
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto out_unlock;
 	}
 	}
-	se_sess = tv_nexus->tvn_se_sess;
-	/*
-	 * Since we are running in 'demo mode' this call with generate a
-	 * struct se_node_acl for the scsiback struct se_portal_group with
-	 * the SCSI Initiator port name of the passed configfs group 'name'.
-	 */
-	tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
-				se_tpg, (unsigned char *)name);
-	if (!tv_nexus->tvn_se_sess->se_node_acl) {
-		mutex_unlock(&tpg->tv_tpg_mutex);
-		pr_debug("core_tpg_check_initiator_node_acl() failed for %s\n",
-			 name);
-		goto out;
-	}
-	/* Now register the TCM pvscsi virtual I_T Nexus as active. */
-	transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
-			tv_nexus->tvn_se_sess, tv_nexus);
-	tpg->tpg_nexus = tv_nexus;
 
 
+out_unlock:
 	mutex_unlock(&tpg->tv_tpg_mutex);
 	mutex_unlock(&tpg->tv_tpg_mutex);
-	return 0;
-
-out:
-	transport_free_session(se_sess);
-	kfree(tv_nexus);
-	return -ENOMEM;
+	return ret;
 }
 }
 
 
 static int scsiback_drop_nexus(struct scsiback_tpg *tpg)
 static int scsiback_drop_nexus(struct scsiback_tpg *tpg)
@@ -1866,16 +1880,6 @@ static struct xenbus_driver scsiback_driver = {
 	.otherend_changed	= scsiback_frontend_changed
 	.otherend_changed	= scsiback_frontend_changed
 };
 };
 
 
-static void scsiback_init_pend(void *p)
-{
-	struct vscsibk_pend *pend = p;
-	int i;
-
-	memset(pend, 0, sizeof(*pend));
-	for (i = 0; i < VSCSI_MAX_GRANTS; i++)
-		pend->grant_handles[i] = SCSIBACK_INVALID_HANDLE;
-}
-
 static int __init scsiback_init(void)
 static int __init scsiback_init(void)
 {
 {
 	int ret;
 	int ret;
@@ -1886,14 +1890,9 @@ static int __init scsiback_init(void)
 	pr_debug("xen-pvscsi: fabric module %s on %s/%s on "UTS_RELEASE"\n",
 	pr_debug("xen-pvscsi: fabric module %s on %s/%s on "UTS_RELEASE"\n",
 		 VSCSI_VERSION, utsname()->sysname, utsname()->machine);
 		 VSCSI_VERSION, utsname()->sysname, utsname()->machine);
 
 
-	scsiback_cachep = kmem_cache_create("vscsiif_cache",
-		sizeof(struct vscsibk_pend), 0, 0, scsiback_init_pend);
-	if (!scsiback_cachep)
-		return -ENOMEM;
-
 	ret = xenbus_register_backend(&scsiback_driver);
 	ret = xenbus_register_backend(&scsiback_driver);
 	if (ret)
 	if (ret)
-		goto out_cache_destroy;
+		goto out;
 
 
 	ret = target_register_template(&scsiback_ops);
 	ret = target_register_template(&scsiback_ops);
 	if (ret)
 	if (ret)
@@ -1903,8 +1902,7 @@ static int __init scsiback_init(void)
 
 
 out_unregister_xenbus:
 out_unregister_xenbus:
 	xenbus_unregister_driver(&scsiback_driver);
 	xenbus_unregister_driver(&scsiback_driver);
-out_cache_destroy:
-	kmem_cache_destroy(scsiback_cachep);
+out:
 	pr_err("%s: error %d\n", __func__, ret);
 	pr_err("%s: error %d\n", __func__, ret);
 	return ret;
 	return ret;
 }
 }
@@ -1920,7 +1918,6 @@ static void __exit scsiback_exit(void)
 	}
 	}
 	target_unregister_template(&scsiback_ops);
 	target_unregister_template(&scsiback_ops);
 	xenbus_unregister_driver(&scsiback_driver);
 	xenbus_unregister_driver(&scsiback_driver);
-	kmem_cache_destroy(scsiback_cachep);
 }
 }
 
 
 module_init(scsiback_init);
 module_init(scsiback_init);

+ 2 - 9
include/target/target_core_base.h

@@ -144,12 +144,6 @@ enum se_cmd_flags_table {
 	SCF_USE_CPUID			= 0x00800000,
 	SCF_USE_CPUID			= 0x00800000,
 };
 };
 
 
-/* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
-enum transport_lunflags_table {
-	TRANSPORT_LUNFLAGS_READ_ONLY		= 0x01,
-	TRANSPORT_LUNFLAGS_READ_WRITE		= 0x02,
-};
-
 /*
 /*
  * Used by transport_send_check_condition_and_sense()
  * Used by transport_send_check_condition_and_sense()
  * to signal which ASC/ASCQ sense payload should be built.
  * to signal which ASC/ASCQ sense payload should be built.
@@ -633,11 +627,10 @@ struct se_lun_acl {
 };
 };
 
 
 struct se_dev_entry {
 struct se_dev_entry {
-	/* See transport_lunflags_table */
 	u64			mapped_lun;
 	u64			mapped_lun;
 	u64			pr_res_key;
 	u64			pr_res_key;
 	u64			creation_time;
 	u64			creation_time;
-	u32			lun_flags;
+	bool			lun_access_ro;
 	u32			attach_count;
 	u32			attach_count;
 	atomic_long_t		total_cmds;
 	atomic_long_t		total_cmds;
 	atomic_long_t		read_bytes;
 	atomic_long_t		read_bytes;
@@ -711,7 +704,7 @@ struct se_lun {
 	u64			unpacked_lun;
 	u64			unpacked_lun;
 #define SE_LUN_LINK_MAGIC			0xffff7771
 #define SE_LUN_LINK_MAGIC			0xffff7771
 	u32			lun_link_magic;
 	u32			lun_link_magic;
-	u32			lun_access;
+	bool			lun_access_ro;
 	u32			lun_index;
 	u32			lun_index;
 
 
 	/* RELATIVE TARGET PORT IDENTIFER */
 	/* RELATIVE TARGET PORT IDENTIFER */

+ 6 - 0
include/target/target_core_fabric.h

@@ -108,6 +108,12 @@ void target_unregister_template(const struct target_core_fabric_ops *fo);
 int target_depend_item(struct config_item *item);
 int target_depend_item(struct config_item *item);
 void target_undepend_item(struct config_item *item);
 void target_undepend_item(struct config_item *item);
 
 
+struct se_session *target_alloc_session(struct se_portal_group *,
+		unsigned int, unsigned int, enum target_prot_op prot_op,
+		const char *, void *,
+		int (*callback)(struct se_portal_group *,
+				struct se_session *, void *));
+
 struct se_session *transport_init_session(enum target_prot_op);
 struct se_session *transport_init_session(enum target_prot_op);
 int transport_alloc_session_tags(struct se_session *, unsigned int,
 int transport_alloc_session_tags(struct se_session *, unsigned int,
 		unsigned int);
 		unsigned int);

+ 1 - 0
include/uapi/linux/target_core_user.h

@@ -41,6 +41,7 @@
 
 
 #define TCMU_MAILBOX_VERSION 2
 #define TCMU_MAILBOX_VERSION 2
 #define ALIGN_SIZE 64 /* Should be enough for most CPUs */
 #define ALIGN_SIZE 64 /* Should be enough for most CPUs */
+#define TCMU_MAILBOX_FLAG_CAP_OOOC (1 << 0) /* Out-of-order completions */
 
 
 struct tcmu_mailbox {
 struct tcmu_mailbox {
 	__u16 version;
 	__u16 version;