|
@@ -76,12 +76,12 @@ isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
|
|
|
static void
|
|
|
isert_qp_event_callback(struct ib_event *e, void *context)
|
|
|
{
|
|
|
- struct isert_conn *isert_conn = (struct isert_conn *)context;
|
|
|
+ struct isert_conn *isert_conn = context;
|
|
|
|
|
|
isert_err("conn %p event: %d\n", isert_conn, e->event);
|
|
|
switch (e->event) {
|
|
|
case IB_EVENT_COMM_EST:
|
|
|
- rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST);
|
|
|
+ rdma_notify(isert_conn->cm_id, IB_EVENT_COMM_EST);
|
|
|
break;
|
|
|
case IB_EVENT_QP_LAST_WQE_REACHED:
|
|
|
isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n");
|
|
@@ -107,13 +107,12 @@ isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static int
|
|
|
-isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
|
|
|
+static struct isert_comp *
|
|
|
+isert_comp_get(struct isert_conn *isert_conn)
|
|
|
{
|
|
|
- struct isert_device *device = isert_conn->conn_device;
|
|
|
- struct ib_qp_init_attr attr;
|
|
|
+ struct isert_device *device = isert_conn->device;
|
|
|
struct isert_comp *comp;
|
|
|
- int ret, i, min = 0;
|
|
|
+ int i, min = 0;
|
|
|
|
|
|
mutex_lock(&device_list_mutex);
|
|
|
for (i = 0; i < device->comps_used; i++)
|
|
@@ -122,9 +121,30 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
|
|
|
min = i;
|
|
|
comp = &device->comps[min];
|
|
|
comp->active_qps++;
|
|
|
+ mutex_unlock(&device_list_mutex);
|
|
|
+
|
|
|
isert_info("conn %p, using comp %p min_index: %d\n",
|
|
|
isert_conn, comp, min);
|
|
|
+
|
|
|
+ return comp;
|
|
|
+}
|
|
|
+
|
|
|
+static void
|
|
|
+isert_comp_put(struct isert_comp *comp)
|
|
|
+{
|
|
|
+ mutex_lock(&device_list_mutex);
|
|
|
+ comp->active_qps--;
|
|
|
mutex_unlock(&device_list_mutex);
|
|
|
+}
|
|
|
+
|
|
|
+static struct ib_qp *
|
|
|
+isert_create_qp(struct isert_conn *isert_conn,
|
|
|
+ struct isert_comp *comp,
|
|
|
+ struct rdma_cm_id *cma_id)
|
|
|
+{
|
|
|
+ struct isert_device *device = isert_conn->device;
|
|
|
+ struct ib_qp_init_attr attr;
|
|
|
+ int ret;
|
|
|
|
|
|
memset(&attr, 0, sizeof(struct ib_qp_init_attr));
|
|
|
attr.event_handler = isert_qp_event_callback;
|
|
@@ -149,19 +169,31 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
|
|
|
if (device->pi_capable)
|
|
|
attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
|
|
|
|
|
|
- ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
|
|
|
+ ret = rdma_create_qp(cma_id, device->pd, &attr);
|
|
|
if (ret) {
|
|
|
isert_err("rdma_create_qp failed for cma_id %d\n", ret);
|
|
|
+ return ERR_PTR(ret);
|
|
|
+ }
|
|
|
+
|
|
|
+ return cma_id->qp;
|
|
|
+}
|
|
|
+
|
|
|
+static int
|
|
|
+isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
|
|
|
+{
|
|
|
+ struct isert_comp *comp;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ comp = isert_comp_get(isert_conn);
|
|
|
+ isert_conn->qp = isert_create_qp(isert_conn, comp, cma_id);
|
|
|
+ if (IS_ERR(isert_conn->qp)) {
|
|
|
+ ret = PTR_ERR(isert_conn->qp);
|
|
|
goto err;
|
|
|
}
|
|
|
- isert_conn->conn_qp = cma_id->qp;
|
|
|
|
|
|
return 0;
|
|
|
err:
|
|
|
- mutex_lock(&device_list_mutex);
|
|
|
- comp->active_qps--;
|
|
|
- mutex_unlock(&device_list_mutex);
|
|
|
-
|
|
|
+ isert_comp_put(comp);
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -174,18 +206,19 @@ isert_cq_event_callback(struct ib_event *e, void *context)
|
|
|
static int
|
|
|
isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
|
|
|
{
|
|
|
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
|
|
|
+ struct isert_device *device = isert_conn->device;
|
|
|
+ struct ib_device *ib_dev = device->ib_device;
|
|
|
struct iser_rx_desc *rx_desc;
|
|
|
struct ib_sge *rx_sg;
|
|
|
u64 dma_addr;
|
|
|
int i, j;
|
|
|
|
|
|
- isert_conn->conn_rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
|
|
|
+ isert_conn->rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
|
|
|
sizeof(struct iser_rx_desc), GFP_KERNEL);
|
|
|
- if (!isert_conn->conn_rx_descs)
|
|
|
+ if (!isert_conn->rx_descs)
|
|
|
goto fail;
|
|
|
|
|
|
- rx_desc = isert_conn->conn_rx_descs;
|
|
|
+ rx_desc = isert_conn->rx_descs;
|
|
|
|
|
|
for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
|
|
|
dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
|
|
@@ -198,21 +231,21 @@ isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
|
|
|
rx_sg = &rx_desc->rx_sg;
|
|
|
rx_sg->addr = rx_desc->dma_addr;
|
|
|
rx_sg->length = ISER_RX_PAYLOAD_SIZE;
|
|
|
- rx_sg->lkey = isert_conn->conn_mr->lkey;
|
|
|
+ rx_sg->lkey = device->mr->lkey;
|
|
|
}
|
|
|
|
|
|
- isert_conn->conn_rx_desc_head = 0;
|
|
|
+ isert_conn->rx_desc_head = 0;
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
dma_map_fail:
|
|
|
- rx_desc = isert_conn->conn_rx_descs;
|
|
|
+ rx_desc = isert_conn->rx_descs;
|
|
|
for (j = 0; j < i; j++, rx_desc++) {
|
|
|
ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
|
|
|
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
|
|
|
}
|
|
|
- kfree(isert_conn->conn_rx_descs);
|
|
|
- isert_conn->conn_rx_descs = NULL;
|
|
|
+ kfree(isert_conn->rx_descs);
|
|
|
+ isert_conn->rx_descs = NULL;
|
|
|
fail:
|
|
|
isert_err("conn %p failed to allocate rx descriptors\n", isert_conn);
|
|
|
|
|
@@ -222,59 +255,51 @@ fail:
|
|
|
static void
|
|
|
isert_free_rx_descriptors(struct isert_conn *isert_conn)
|
|
|
{
|
|
|
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
|
|
|
+ struct ib_device *ib_dev = isert_conn->device->ib_device;
|
|
|
struct iser_rx_desc *rx_desc;
|
|
|
int i;
|
|
|
|
|
|
- if (!isert_conn->conn_rx_descs)
|
|
|
+ if (!isert_conn->rx_descs)
|
|
|
return;
|
|
|
|
|
|
- rx_desc = isert_conn->conn_rx_descs;
|
|
|
+ rx_desc = isert_conn->rx_descs;
|
|
|
for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
|
|
|
ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
|
|
|
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
|
|
|
}
|
|
|
|
|
|
- kfree(isert_conn->conn_rx_descs);
|
|
|
- isert_conn->conn_rx_descs = NULL;
|
|
|
+ kfree(isert_conn->rx_descs);
|
|
|
+ isert_conn->rx_descs = NULL;
|
|
|
}
|
|
|
|
|
|
static void isert_cq_work(struct work_struct *);
|
|
|
static void isert_cq_callback(struct ib_cq *, void *);
|
|
|
|
|
|
-static int
|
|
|
-isert_create_device_ib_res(struct isert_device *device)
|
|
|
+static void
|
|
|
+isert_free_comps(struct isert_device *device)
|
|
|
{
|
|
|
- struct ib_device *ib_dev = device->ib_device;
|
|
|
- struct ib_device_attr *dev_attr;
|
|
|
- int ret = 0, i;
|
|
|
- int max_cqe;
|
|
|
-
|
|
|
- dev_attr = &device->dev_attr;
|
|
|
- ret = isert_query_device(ib_dev, dev_attr);
|
|
|
- if (ret)
|
|
|
- return ret;
|
|
|
+ int i;
|
|
|
|
|
|
- max_cqe = min(ISER_MAX_CQ_LEN, dev_attr->max_cqe);
|
|
|
+ for (i = 0; i < device->comps_used; i++) {
|
|
|
+ struct isert_comp *comp = &device->comps[i];
|
|
|
|
|
|
- /* asign function handlers */
|
|
|
- if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
|
|
|
- dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) {
|
|
|
- device->use_fastreg = 1;
|
|
|
- device->reg_rdma_mem = isert_reg_rdma;
|
|
|
- device->unreg_rdma_mem = isert_unreg_rdma;
|
|
|
- } else {
|
|
|
- device->use_fastreg = 0;
|
|
|
- device->reg_rdma_mem = isert_map_rdma;
|
|
|
- device->unreg_rdma_mem = isert_unmap_cmd;
|
|
|
+ if (comp->cq) {
|
|
|
+ cancel_work_sync(&comp->work);
|
|
|
+ ib_destroy_cq(comp->cq);
|
|
|
+ }
|
|
|
}
|
|
|
+ kfree(device->comps);
|
|
|
+}
|
|
|
|
|
|
- /* Check signature cap */
|
|
|
- device->pi_capable = dev_attr->device_cap_flags &
|
|
|
- IB_DEVICE_SIGNATURE_HANDOVER ? true : false;
|
|
|
+static int
|
|
|
+isert_alloc_comps(struct isert_device *device,
|
|
|
+ struct ib_device_attr *attr)
|
|
|
+{
|
|
|
+ int i, max_cqe, ret = 0;
|
|
|
|
|
|
device->comps_used = min(ISERT_MAX_CQ, min_t(int, num_online_cpus(),
|
|
|
- device->ib_device->num_comp_vectors));
|
|
|
+ device->ib_device->num_comp_vectors));
|
|
|
+
|
|
|
isert_info("Using %d CQs, %s supports %d vectors support "
|
|
|
"Fast registration %d pi_capable %d\n",
|
|
|
device->comps_used, device->ib_device->name,
|
|
@@ -288,6 +313,8 @@ isert_create_device_ib_res(struct isert_device *device)
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
|
|
|
+ max_cqe = min(ISER_MAX_CQ_LEN, attr->max_cqe);
|
|
|
+
|
|
|
for (i = 0; i < device->comps_used; i++) {
|
|
|
struct isert_comp *comp = &device->comps[i];
|
|
|
|
|
@@ -299,6 +326,7 @@ isert_create_device_ib_res(struct isert_device *device)
|
|
|
(void *)comp,
|
|
|
max_cqe, i);
|
|
|
if (IS_ERR(comp->cq)) {
|
|
|
+ isert_err("Unable to allocate cq\n");
|
|
|
ret = PTR_ERR(comp->cq);
|
|
|
comp->cq = NULL;
|
|
|
goto out_cq;
|
|
@@ -310,40 +338,79 @@ isert_create_device_ib_res(struct isert_device *device)
|
|
|
}
|
|
|
|
|
|
return 0;
|
|
|
-
|
|
|
out_cq:
|
|
|
- for (i = 0; i < device->comps_used; i++) {
|
|
|
- struct isert_comp *comp = &device->comps[i];
|
|
|
+ isert_free_comps(device);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
|
|
|
- if (comp->cq) {
|
|
|
- cancel_work_sync(&comp->work);
|
|
|
- ib_destroy_cq(comp->cq);
|
|
|
- }
|
|
|
+static int
|
|
|
+isert_create_device_ib_res(struct isert_device *device)
|
|
|
+{
|
|
|
+ struct ib_device_attr *dev_attr;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ dev_attr = &device->dev_attr;
|
|
|
+ ret = isert_query_device(device->ib_device, dev_attr);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ /* asign function handlers */
|
|
|
+ if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
|
|
|
+ dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) {
|
|
|
+ device->use_fastreg = 1;
|
|
|
+ device->reg_rdma_mem = isert_reg_rdma;
|
|
|
+ device->unreg_rdma_mem = isert_unreg_rdma;
|
|
|
+ } else {
|
|
|
+ device->use_fastreg = 0;
|
|
|
+ device->reg_rdma_mem = isert_map_rdma;
|
|
|
+ device->unreg_rdma_mem = isert_unmap_cmd;
|
|
|
}
|
|
|
- kfree(device->comps);
|
|
|
|
|
|
+ ret = isert_alloc_comps(device, dev_attr);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ device->pd = ib_alloc_pd(device->ib_device);
|
|
|
+ if (IS_ERR(device->pd)) {
|
|
|
+ ret = PTR_ERR(device->pd);
|
|
|
+ isert_err("failed to allocate pd, device %p, ret=%d\n",
|
|
|
+ device, ret);
|
|
|
+ goto out_cq;
|
|
|
+ }
|
|
|
+
|
|
|
+ device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE);
|
|
|
+ if (IS_ERR(device->mr)) {
|
|
|
+ ret = PTR_ERR(device->mr);
|
|
|
+ isert_err("failed to create dma mr, device %p, ret=%d\n",
|
|
|
+ device, ret);
|
|
|
+ goto out_mr;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Check signature cap */
|
|
|
+ device->pi_capable = dev_attr->device_cap_flags &
|
|
|
+ IB_DEVICE_SIGNATURE_HANDOVER ? true : false;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+
|
|
|
+out_mr:
|
|
|
+ ib_dealloc_pd(device->pd);
|
|
|
+out_cq:
|
|
|
+ isert_free_comps(device);
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
static void
|
|
|
isert_free_device_ib_res(struct isert_device *device)
|
|
|
{
|
|
|
- int i;
|
|
|
-
|
|
|
isert_info("device %p\n", device);
|
|
|
|
|
|
- for (i = 0; i < device->comps_used; i++) {
|
|
|
- struct isert_comp *comp = &device->comps[i];
|
|
|
-
|
|
|
- cancel_work_sync(&comp->work);
|
|
|
- ib_destroy_cq(comp->cq);
|
|
|
- comp->cq = NULL;
|
|
|
- }
|
|
|
- kfree(device->comps);
|
|
|
+ ib_dereg_mr(device->mr);
|
|
|
+ ib_dealloc_pd(device->pd);
|
|
|
+ isert_free_comps(device);
|
|
|
}
|
|
|
|
|
|
static void
|
|
|
-isert_device_try_release(struct isert_device *device)
|
|
|
+isert_device_put(struct isert_device *device)
|
|
|
{
|
|
|
mutex_lock(&device_list_mutex);
|
|
|
device->refcount--;
|
|
@@ -357,7 +424,7 @@ isert_device_try_release(struct isert_device *device)
|
|
|
}
|
|
|
|
|
|
static struct isert_device *
|
|
|
-isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
|
|
|
+isert_device_get(struct rdma_cm_id *cma_id)
|
|
|
{
|
|
|
struct isert_device *device;
|
|
|
int ret;
|
|
@@ -404,13 +471,13 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
|
|
|
struct fast_reg_descriptor *fr_desc, *tmp;
|
|
|
int i = 0;
|
|
|
|
|
|
- if (list_empty(&isert_conn->conn_fr_pool))
|
|
|
+ if (list_empty(&isert_conn->fr_pool))
|
|
|
return;
|
|
|
|
|
|
isert_info("Freeing conn %p fastreg pool", isert_conn);
|
|
|
|
|
|
list_for_each_entry_safe(fr_desc, tmp,
|
|
|
- &isert_conn->conn_fr_pool, list) {
|
|
|
+ &isert_conn->fr_pool, list) {
|
|
|
list_del(&fr_desc->list);
|
|
|
ib_free_fast_reg_page_list(fr_desc->data_frpl);
|
|
|
ib_dereg_mr(fr_desc->data_mr);
|
|
@@ -424,9 +491,9 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
|
|
|
++i;
|
|
|
}
|
|
|
|
|
|
- if (i < isert_conn->conn_fr_pool_size)
|
|
|
+ if (i < isert_conn->fr_pool_size)
|
|
|
isert_warn("Pool still has %d regions registered\n",
|
|
|
- isert_conn->conn_fr_pool_size - i);
|
|
|
+ isert_conn->fr_pool_size - i);
|
|
|
}
|
|
|
|
|
|
static int
|
|
@@ -526,7 +593,7 @@ static int
|
|
|
isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
|
|
|
{
|
|
|
struct fast_reg_descriptor *fr_desc;
|
|
|
- struct isert_device *device = isert_conn->conn_device;
|
|
|
+ struct isert_device *device = isert_conn->device;
|
|
|
struct se_session *se_sess = isert_conn->conn->sess->se_sess;
|
|
|
struct se_node_acl *se_nacl = se_sess->se_node_acl;
|
|
|
int i, ret, tag_num;
|
|
@@ -537,7 +604,7 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
|
|
|
tag_num = max_t(u32, ISCSIT_MIN_TAGS, se_nacl->queue_depth);
|
|
|
tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
|
|
|
|
|
|
- isert_conn->conn_fr_pool_size = 0;
|
|
|
+ isert_conn->fr_pool_size = 0;
|
|
|
for (i = 0; i < tag_num; i++) {
|
|
|
fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
|
|
|
if (!fr_desc) {
|
|
@@ -547,7 +614,7 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
|
|
|
}
|
|
|
|
|
|
ret = isert_create_fr_desc(device->ib_device,
|
|
|
- isert_conn->conn_pd, fr_desc);
|
|
|
+ device->pd, fr_desc);
|
|
|
if (ret) {
|
|
|
isert_err("Failed to create fastreg descriptor err=%d\n",
|
|
|
ret);
|
|
@@ -555,12 +622,12 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
|
|
|
goto err;
|
|
|
}
|
|
|
|
|
|
- list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
|
|
|
- isert_conn->conn_fr_pool_size++;
|
|
|
+ list_add_tail(&fr_desc->list, &isert_conn->fr_pool);
|
|
|
+ isert_conn->fr_pool_size++;
|
|
|
}
|
|
|
|
|
|
isert_dbg("Creating conn %p fastreg pool size=%d",
|
|
|
- isert_conn, isert_conn->conn_fr_pool_size);
|
|
|
+ isert_conn, isert_conn->fr_pool_size);
|
|
|
|
|
|
return 0;
|
|
|
|
|
@@ -569,55 +636,50 @@ err:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-static int
|
|
|
-isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
|
|
|
+static void
|
|
|
+isert_init_conn(struct isert_conn *isert_conn)
|
|
|
{
|
|
|
- struct isert_np *isert_np = cma_id->context;
|
|
|
- struct iscsi_np *np = isert_np->np;
|
|
|
- struct isert_conn *isert_conn;
|
|
|
- struct isert_device *device;
|
|
|
- struct ib_device *ib_dev = cma_id->device;
|
|
|
- int ret = 0;
|
|
|
-
|
|
|
- spin_lock_bh(&np->np_thread_lock);
|
|
|
- if (!np->enabled) {
|
|
|
- spin_unlock_bh(&np->np_thread_lock);
|
|
|
- isert_dbg("iscsi_np is not enabled, reject connect request\n");
|
|
|
- return rdma_reject(cma_id, NULL, 0);
|
|
|
- }
|
|
|
- spin_unlock_bh(&np->np_thread_lock);
|
|
|
-
|
|
|
- isert_dbg("cma_id: %p, portal: %p\n",
|
|
|
- cma_id, cma_id->context);
|
|
|
-
|
|
|
- isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
|
|
|
- if (!isert_conn) {
|
|
|
- isert_err("Unable to allocate isert_conn\n");
|
|
|
- return -ENOMEM;
|
|
|
- }
|
|
|
isert_conn->state = ISER_CONN_INIT;
|
|
|
- INIT_LIST_HEAD(&isert_conn->conn_accept_node);
|
|
|
- init_completion(&isert_conn->conn_login_comp);
|
|
|
+ INIT_LIST_HEAD(&isert_conn->accept_node);
|
|
|
+ init_completion(&isert_conn->login_comp);
|
|
|
init_completion(&isert_conn->login_req_comp);
|
|
|
- init_completion(&isert_conn->conn_wait);
|
|
|
- kref_init(&isert_conn->conn_kref);
|
|
|
- mutex_init(&isert_conn->conn_mutex);
|
|
|
- spin_lock_init(&isert_conn->conn_lock);
|
|
|
- INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
|
|
|
+ init_completion(&isert_conn->wait);
|
|
|
+ kref_init(&isert_conn->kref);
|
|
|
+ mutex_init(&isert_conn->mutex);
|
|
|
+ spin_lock_init(&isert_conn->pool_lock);
|
|
|
+ INIT_LIST_HEAD(&isert_conn->fr_pool);
|
|
|
+}
|
|
|
+
|
|
|
+static void
|
|
|
+isert_free_login_buf(struct isert_conn *isert_conn)
|
|
|
+{
|
|
|
+ struct ib_device *ib_dev = isert_conn->device->ib_device;
|
|
|
|
|
|
- isert_conn->conn_cm_id = cma_id;
|
|
|
+ ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
|
|
|
+ ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
|
|
|
+ ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
|
|
|
+ ISCSI_DEF_MAX_RECV_SEG_LEN,
|
|
|
+ DMA_FROM_DEVICE);
|
|
|
+ kfree(isert_conn->login_buf);
|
|
|
+}
|
|
|
+
|
|
|
+static int
|
|
|
+isert_alloc_login_buf(struct isert_conn *isert_conn,
|
|
|
+ struct ib_device *ib_dev)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
|
|
|
isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
|
|
|
ISER_RX_LOGIN_SIZE, GFP_KERNEL);
|
|
|
if (!isert_conn->login_buf) {
|
|
|
isert_err("Unable to allocate isert_conn->login_buf\n");
|
|
|
- ret = -ENOMEM;
|
|
|
- goto out;
|
|
|
+ return -ENOMEM;
|
|
|
}
|
|
|
|
|
|
isert_conn->login_req_buf = isert_conn->login_buf;
|
|
|
isert_conn->login_rsp_buf = isert_conn->login_buf +
|
|
|
ISCSI_DEF_MAX_RECV_SEG_LEN;
|
|
|
+
|
|
|
isert_dbg("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
|
|
|
isert_conn->login_buf, isert_conn->login_req_buf,
|
|
|
isert_conn->login_rsp_buf);
|
|
@@ -628,8 +690,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
|
|
|
|
|
|
ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
|
|
|
if (ret) {
|
|
|
- isert_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
|
|
|
- ret);
|
|
|
+ isert_err("login_req_dma mapping error: %d\n", ret);
|
|
|
isert_conn->login_req_dma = 0;
|
|
|
goto out_login_buf;
|
|
|
}
|
|
@@ -640,17 +701,58 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
|
|
|
|
|
|
ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
|
|
|
if (ret) {
|
|
|
- isert_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
|
|
|
- ret);
|
|
|
+ isert_err("login_rsp_dma mapping error: %d\n", ret);
|
|
|
isert_conn->login_rsp_dma = 0;
|
|
|
goto out_req_dma_map;
|
|
|
}
|
|
|
|
|
|
- device = isert_device_find_by_ib_dev(cma_id);
|
|
|
+ return 0;
|
|
|
+
|
|
|
+out_req_dma_map:
|
|
|
+ ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
|
|
|
+ ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
|
|
|
+out_login_buf:
|
|
|
+ kfree(isert_conn->login_buf);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static int
|
|
|
+isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
|
|
|
+{
|
|
|
+ struct isert_np *isert_np = cma_id->context;
|
|
|
+ struct iscsi_np *np = isert_np->np;
|
|
|
+ struct isert_conn *isert_conn;
|
|
|
+ struct isert_device *device;
|
|
|
+ int ret = 0;
|
|
|
+
|
|
|
+ spin_lock_bh(&np->np_thread_lock);
|
|
|
+ if (!np->enabled) {
|
|
|
+ spin_unlock_bh(&np->np_thread_lock);
|
|
|
+ isert_dbg("iscsi_np is not enabled, reject connect request\n");
|
|
|
+ return rdma_reject(cma_id, NULL, 0);
|
|
|
+ }
|
|
|
+ spin_unlock_bh(&np->np_thread_lock);
|
|
|
+
|
|
|
+ isert_dbg("cma_id: %p, portal: %p\n",
|
|
|
+ cma_id, cma_id->context);
|
|
|
+
|
|
|
+ isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
|
|
|
+ if (!isert_conn)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ isert_init_conn(isert_conn);
|
|
|
+ isert_conn->cm_id = cma_id;
|
|
|
+
|
|
|
+ ret = isert_alloc_login_buf(isert_conn, cma_id->device);
|
|
|
+ if (ret)
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ device = isert_device_get(cma_id);
|
|
|
if (IS_ERR(device)) {
|
|
|
ret = PTR_ERR(device);
|
|
|
goto out_rsp_dma_map;
|
|
|
}
|
|
|
+ isert_conn->device = device;
|
|
|
|
|
|
/* Set max inflight RDMA READ requests */
|
|
|
isert_conn->initiator_depth = min_t(u8,
|
|
@@ -658,24 +760,6 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
|
|
|
device->dev_attr.max_qp_init_rd_atom);
|
|
|
isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth);
|
|
|
|
|
|
- isert_conn->conn_device = device;
|
|
|
- isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device);
|
|
|
- if (IS_ERR(isert_conn->conn_pd)) {
|
|
|
- ret = PTR_ERR(isert_conn->conn_pd);
|
|
|
- isert_err("ib_alloc_pd failed for conn %p: ret=%d\n",
|
|
|
- isert_conn, ret);
|
|
|
- goto out_pd;
|
|
|
- }
|
|
|
-
|
|
|
- isert_conn->conn_mr = ib_get_dma_mr(isert_conn->conn_pd,
|
|
|
- IB_ACCESS_LOCAL_WRITE);
|
|
|
- if (IS_ERR(isert_conn->conn_mr)) {
|
|
|
- ret = PTR_ERR(isert_conn->conn_mr);
|
|
|
- isert_err("ib_get_dma_mr failed for conn %p: ret=%d\n",
|
|
|
- isert_conn, ret);
|
|
|
- goto out_mr;
|
|
|
- }
|
|
|
-
|
|
|
ret = isert_conn_setup_qp(isert_conn, cma_id);
|
|
|
if (ret)
|
|
|
goto out_conn_dev;
|
|
@@ -689,7 +773,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
|
|
|
goto out_conn_dev;
|
|
|
|
|
|
mutex_lock(&isert_np->np_accept_mutex);
|
|
|
- list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
|
|
|
+ list_add_tail(&isert_conn->accept_node, &isert_np->np_accept_list);
|
|
|
mutex_unlock(&isert_np->np_accept_mutex);
|
|
|
|
|
|
isert_info("np %p: Allow accept_np to continue\n", np);
|
|
@@ -697,19 +781,9 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
|
|
|
return 0;
|
|
|
|
|
|
out_conn_dev:
|
|
|
- ib_dereg_mr(isert_conn->conn_mr);
|
|
|
-out_mr:
|
|
|
- ib_dealloc_pd(isert_conn->conn_pd);
|
|
|
-out_pd:
|
|
|
- isert_device_try_release(device);
|
|
|
+ isert_device_put(device);
|
|
|
out_rsp_dma_map:
|
|
|
- ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
|
|
|
- ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
|
|
|
-out_req_dma_map:
|
|
|
- ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
|
|
|
- ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
|
|
|
-out_login_buf:
|
|
|
- kfree(isert_conn->login_buf);
|
|
|
+ isert_free_login_buf(isert_conn);
|
|
|
out:
|
|
|
kfree(isert_conn);
|
|
|
rdma_reject(cma_id, NULL, 0);
|
|
@@ -719,43 +793,32 @@ out:
|
|
|
static void
|
|
|
isert_connect_release(struct isert_conn *isert_conn)
|
|
|
{
|
|
|
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
|
|
|
- struct isert_device *device = isert_conn->conn_device;
|
|
|
+ struct isert_device *device = isert_conn->device;
|
|
|
|
|
|
isert_dbg("conn %p\n", isert_conn);
|
|
|
|
|
|
- if (device && device->use_fastreg)
|
|
|
+ BUG_ON(!device);
|
|
|
+
|
|
|
+ if (device->use_fastreg)
|
|
|
isert_conn_free_fastreg_pool(isert_conn);
|
|
|
|
|
|
isert_free_rx_descriptors(isert_conn);
|
|
|
- rdma_destroy_id(isert_conn->conn_cm_id);
|
|
|
+ if (isert_conn->cm_id)
|
|
|
+ rdma_destroy_id(isert_conn->cm_id);
|
|
|
|
|
|
- if (isert_conn->conn_qp) {
|
|
|
- struct isert_comp *comp = isert_conn->conn_qp->recv_cq->cq_context;
|
|
|
+ if (isert_conn->qp) {
|
|
|
+ struct isert_comp *comp = isert_conn->qp->recv_cq->cq_context;
|
|
|
|
|
|
- isert_dbg("dec completion context %p active_qps\n", comp);
|
|
|
- mutex_lock(&device_list_mutex);
|
|
|
- comp->active_qps--;
|
|
|
- mutex_unlock(&device_list_mutex);
|
|
|
-
|
|
|
- ib_destroy_qp(isert_conn->conn_qp);
|
|
|
+ isert_comp_put(comp);
|
|
|
+ ib_destroy_qp(isert_conn->qp);
|
|
|
}
|
|
|
|
|
|
- ib_dereg_mr(isert_conn->conn_mr);
|
|
|
- ib_dealloc_pd(isert_conn->conn_pd);
|
|
|
+ if (isert_conn->login_buf)
|
|
|
+ isert_free_login_buf(isert_conn);
|
|
|
|
|
|
- if (isert_conn->login_buf) {
|
|
|
- ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
|
|
|
- ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
|
|
|
- ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
|
|
|
- ISCSI_DEF_MAX_RECV_SEG_LEN,
|
|
|
- DMA_FROM_DEVICE);
|
|
|
- kfree(isert_conn->login_buf);
|
|
|
- }
|
|
|
- kfree(isert_conn);
|
|
|
+ isert_device_put(device);
|
|
|
|
|
|
- if (device)
|
|
|
- isert_device_try_release(device);
|
|
|
+ kfree(isert_conn);
|
|
|
}
|
|
|
|
|
|
static void
|
|
@@ -765,22 +828,22 @@ isert_connected_handler(struct rdma_cm_id *cma_id)
|
|
|
|
|
|
isert_info("conn %p\n", isert_conn);
|
|
|
|
|
|
- if (!kref_get_unless_zero(&isert_conn->conn_kref)) {
|
|
|
+ if (!kref_get_unless_zero(&isert_conn->kref)) {
|
|
|
isert_warn("conn %p connect_release is running\n", isert_conn);
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
- mutex_lock(&isert_conn->conn_mutex);
|
|
|
+ mutex_lock(&isert_conn->mutex);
|
|
|
if (isert_conn->state != ISER_CONN_FULL_FEATURE)
|
|
|
isert_conn->state = ISER_CONN_UP;
|
|
|
- mutex_unlock(&isert_conn->conn_mutex);
|
|
|
+ mutex_unlock(&isert_conn->mutex);
|
|
|
}
|
|
|
|
|
|
static void
|
|
|
-isert_release_conn_kref(struct kref *kref)
|
|
|
+isert_release_kref(struct kref *kref)
|
|
|
{
|
|
|
struct isert_conn *isert_conn = container_of(kref,
|
|
|
- struct isert_conn, conn_kref);
|
|
|
+ struct isert_conn, kref);
|
|
|
|
|
|
isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm,
|
|
|
current->pid);
|
|
@@ -791,7 +854,7 @@ isert_release_conn_kref(struct kref *kref)
|
|
|
static void
|
|
|
isert_put_conn(struct isert_conn *isert_conn)
|
|
|
{
|
|
|
- kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
|
|
|
+ kref_put(&isert_conn->kref, isert_release_kref);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -803,7 +866,7 @@ isert_put_conn(struct isert_conn *isert_conn)
|
|
|
* to TEMINATING and start teardown sequence (rdma_disconnect).
|
|
|
* In case the connection state is UP, complete flush as well.
|
|
|
*
|
|
|
- * This routine must be called with conn_mutex held. Thus it is
|
|
|
+ * This routine must be called with mutex held. Thus it is
|
|
|
* safe to call multiple times.
|
|
|
*/
|
|
|
static void
|
|
@@ -819,7 +882,7 @@ isert_conn_terminate(struct isert_conn *isert_conn)
|
|
|
isert_info("Terminating conn %p state %d\n",
|
|
|
isert_conn, isert_conn->state);
|
|
|
isert_conn->state = ISER_CONN_TERMINATING;
|
|
|
- err = rdma_disconnect(isert_conn->conn_cm_id);
|
|
|
+ err = rdma_disconnect(isert_conn->cm_id);
|
|
|
if (err)
|
|
|
isert_warn("Failed rdma_disconnect isert_conn %p\n",
|
|
|
isert_conn);
|
|
@@ -868,22 +931,25 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id,
|
|
|
|
|
|
isert_conn = cma_id->qp->qp_context;
|
|
|
|
|
|
- mutex_lock(&isert_conn->conn_mutex);
|
|
|
+ mutex_lock(&isert_conn->mutex);
|
|
|
isert_conn_terminate(isert_conn);
|
|
|
- mutex_unlock(&isert_conn->conn_mutex);
|
|
|
+ mutex_unlock(&isert_conn->mutex);
|
|
|
|
|
|
- isert_info("conn %p completing conn_wait\n", isert_conn);
|
|
|
- complete(&isert_conn->conn_wait);
|
|
|
+ isert_info("conn %p completing wait\n", isert_conn);
|
|
|
+ complete(&isert_conn->wait);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static void
|
|
|
+static int
|
|
|
isert_connect_error(struct rdma_cm_id *cma_id)
|
|
|
{
|
|
|
struct isert_conn *isert_conn = cma_id->qp->qp_context;
|
|
|
|
|
|
+ isert_conn->cm_id = NULL;
|
|
|
isert_put_conn(isert_conn);
|
|
|
+
|
|
|
+ return -1;
|
|
|
}
|
|
|
|
|
|
static int
|
|
@@ -912,7 +978,7 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
|
|
|
case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
|
|
|
case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
|
|
|
case RDMA_CM_EVENT_CONNECT_ERROR:
|
|
|
- isert_connect_error(cma_id);
|
|
|
+ ret = isert_connect_error(cma_id);
|
|
|
break;
|
|
|
default:
|
|
|
isert_err("Unhandled RDMA CMA event: %d\n", event->event);
|
|
@@ -927,11 +993,11 @@ isert_post_recv(struct isert_conn *isert_conn, u32 count)
|
|
|
{
|
|
|
struct ib_recv_wr *rx_wr, *rx_wr_failed;
|
|
|
int i, ret;
|
|
|
- unsigned int rx_head = isert_conn->conn_rx_desc_head;
|
|
|
+ unsigned int rx_head = isert_conn->rx_desc_head;
|
|
|
struct iser_rx_desc *rx_desc;
|
|
|
|
|
|
- for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) {
|
|
|
- rx_desc = &isert_conn->conn_rx_descs[rx_head];
|
|
|
+ for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
|
|
|
+ rx_desc = &isert_conn->rx_descs[rx_head];
|
|
|
rx_wr->wr_id = (uintptr_t)rx_desc;
|
|
|
rx_wr->sg_list = &rx_desc->rx_sg;
|
|
|
rx_wr->num_sge = 1;
|
|
@@ -943,14 +1009,14 @@ isert_post_recv(struct isert_conn *isert_conn, u32 count)
|
|
|
rx_wr->next = NULL; /* mark end of work requests list */
|
|
|
|
|
|
isert_conn->post_recv_buf_count += count;
|
|
|
- ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr,
|
|
|
+ ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr,
|
|
|
&rx_wr_failed);
|
|
|
if (ret) {
|
|
|
isert_err("ib_post_recv() failed with ret: %d\n", ret);
|
|
|
isert_conn->post_recv_buf_count -= count;
|
|
|
} else {
|
|
|
isert_dbg("Posted %d RX buffers\n", count);
|
|
|
- isert_conn->conn_rx_desc_head = rx_head;
|
|
|
+ isert_conn->rx_desc_head = rx_head;
|
|
|
}
|
|
|
return ret;
|
|
|
}
|
|
@@ -958,7 +1024,7 @@ isert_post_recv(struct isert_conn *isert_conn, u32 count)
|
|
|
static int
|
|
|
isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
|
|
|
{
|
|
|
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
|
|
|
+ struct ib_device *ib_dev = isert_conn->cm_id->device;
|
|
|
struct ib_send_wr send_wr, *send_wr_failed;
|
|
|
int ret;
|
|
|
|
|
@@ -972,7 +1038,7 @@ isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
|
|
|
send_wr.opcode = IB_WR_SEND;
|
|
|
send_wr.send_flags = IB_SEND_SIGNALED;
|
|
|
|
|
|
- ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed);
|
|
|
+ ret = ib_post_send(isert_conn->qp, &send_wr, &send_wr_failed);
|
|
|
if (ret)
|
|
|
isert_err("ib_post_send() failed, ret: %d\n", ret);
|
|
|
|
|
@@ -984,7 +1050,8 @@ isert_create_send_desc(struct isert_conn *isert_conn,
|
|
|
struct isert_cmd *isert_cmd,
|
|
|
struct iser_tx_desc *tx_desc)
|
|
|
{
|
|
|
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
|
|
|
+ struct isert_device *device = isert_conn->device;
|
|
|
+ struct ib_device *ib_dev = device->ib_device;
|
|
|
|
|
|
ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
|
|
|
ISER_HEADERS_LEN, DMA_TO_DEVICE);
|
|
@@ -995,8 +1062,8 @@ isert_create_send_desc(struct isert_conn *isert_conn,
|
|
|
tx_desc->num_sge = 1;
|
|
|
tx_desc->isert_cmd = isert_cmd;
|
|
|
|
|
|
- if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) {
|
|
|
- tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
|
|
|
+ if (tx_desc->tx_sg[0].lkey != device->mr->lkey) {
|
|
|
+ tx_desc->tx_sg[0].lkey = device->mr->lkey;
|
|
|
isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc);
|
|
|
}
|
|
|
}
|
|
@@ -1005,7 +1072,8 @@ static int
|
|
|
isert_init_tx_hdrs(struct isert_conn *isert_conn,
|
|
|
struct iser_tx_desc *tx_desc)
|
|
|
{
|
|
|
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
|
|
|
+ struct isert_device *device = isert_conn->device;
|
|
|
+ struct ib_device *ib_dev = device->ib_device;
|
|
|
u64 dma_addr;
|
|
|
|
|
|
dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
|
|
@@ -1018,7 +1086,7 @@ isert_init_tx_hdrs(struct isert_conn *isert_conn,
|
|
|
tx_desc->dma_addr = dma_addr;
|
|
|
tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
|
|
|
tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
|
|
|
- tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
|
|
|
+ tx_desc->tx_sg[0].lkey = device->mr->lkey;
|
|
|
|
|
|
isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n",
|
|
|
tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length,
|
|
@@ -1051,7 +1119,7 @@ isert_rdma_post_recvl(struct isert_conn *isert_conn)
|
|
|
memset(&sge, 0, sizeof(struct ib_sge));
|
|
|
sge.addr = isert_conn->login_req_dma;
|
|
|
sge.length = ISER_RX_LOGIN_SIZE;
|
|
|
- sge.lkey = isert_conn->conn_mr->lkey;
|
|
|
+ sge.lkey = isert_conn->device->mr->lkey;
|
|
|
|
|
|
isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
|
|
|
sge.addr, sge.length, sge.lkey);
|
|
@@ -1062,7 +1130,7 @@ isert_rdma_post_recvl(struct isert_conn *isert_conn)
|
|
|
rx_wr.num_sge = 1;
|
|
|
|
|
|
isert_conn->post_recv_buf_count++;
|
|
|
- ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail);
|
|
|
+ ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_fail);
|
|
|
if (ret) {
|
|
|
isert_err("ib_post_recv() failed: %d\n", ret);
|
|
|
isert_conn->post_recv_buf_count--;
|
|
@@ -1076,8 +1144,9 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
|
|
|
u32 length)
|
|
|
{
|
|
|
struct isert_conn *isert_conn = conn->context;
|
|
|
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
|
|
|
- struct iser_tx_desc *tx_desc = &isert_conn->conn_login_tx_desc;
|
|
|
+ struct isert_device *device = isert_conn->device;
|
|
|
+ struct ib_device *ib_dev = device->ib_device;
|
|
|
+ struct iser_tx_desc *tx_desc = &isert_conn->login_tx_desc;
|
|
|
int ret;
|
|
|
|
|
|
isert_create_send_desc(isert_conn, NULL, tx_desc);
|
|
@@ -1100,13 +1169,13 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
|
|
|
|
|
|
tx_dsg->addr = isert_conn->login_rsp_dma;
|
|
|
tx_dsg->length = length;
|
|
|
- tx_dsg->lkey = isert_conn->conn_mr->lkey;
|
|
|
+ tx_dsg->lkey = isert_conn->device->mr->lkey;
|
|
|
tx_desc->num_sge = 2;
|
|
|
}
|
|
|
if (!login->login_failed) {
|
|
|
if (login->login_complete) {
|
|
|
if (!conn->sess->sess_ops->SessionType &&
|
|
|
- isert_conn->conn_device->use_fastreg) {
|
|
|
+ isert_conn->device->use_fastreg) {
|
|
|
ret = isert_conn_create_fastreg_pool(isert_conn);
|
|
|
if (ret) {
|
|
|
isert_err("Conn: %p failed to create"
|
|
@@ -1124,9 +1193,9 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
|
|
|
return ret;
|
|
|
|
|
|
/* Now we are in FULL_FEATURE phase */
|
|
|
- mutex_lock(&isert_conn->conn_mutex);
|
|
|
+ mutex_lock(&isert_conn->mutex);
|
|
|
isert_conn->state = ISER_CONN_FULL_FEATURE;
|
|
|
- mutex_unlock(&isert_conn->conn_mutex);
|
|
|
+ mutex_unlock(&isert_conn->mutex);
|
|
|
goto post_send;
|
|
|
}
|
|
|
|
|
@@ -1185,7 +1254,7 @@ isert_rx_login_req(struct isert_conn *isert_conn)
|
|
|
memcpy(login->req_buf, &rx_desc->data[0], size);
|
|
|
|
|
|
if (login->first_request) {
|
|
|
- complete(&isert_conn->conn_login_comp);
|
|
|
+ complete(&isert_conn->login_comp);
|
|
|
return;
|
|
|
}
|
|
|
schedule_delayed_work(&conn->login_work, 0);
|
|
@@ -1194,7 +1263,7 @@ isert_rx_login_req(struct isert_conn *isert_conn)
|
|
|
static struct iscsi_cmd
|
|
|
*isert_allocate_cmd(struct iscsi_conn *conn)
|
|
|
{
|
|
|
- struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
|
|
|
+ struct isert_conn *isert_conn = conn->context;
|
|
|
struct isert_cmd *isert_cmd;
|
|
|
struct iscsi_cmd *cmd;
|
|
|
|
|
@@ -1379,13 +1448,12 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
|
|
|
{
|
|
|
struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
|
|
|
struct iscsi_conn *conn = isert_conn->conn;
|
|
|
- struct iscsi_session *sess = conn->sess;
|
|
|
struct iscsi_cmd *cmd;
|
|
|
struct isert_cmd *isert_cmd;
|
|
|
int ret = -EINVAL;
|
|
|
u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
|
|
|
|
|
|
- if (sess->sess_ops->SessionType &&
|
|
|
+ if (conn->sess->sess_ops->SessionType &&
|
|
|
(!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
|
|
|
isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
|
|
|
" ignoring\n", opcode);
|
|
@@ -1497,10 +1565,11 @@ isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
|
|
|
}
|
|
|
|
|
|
static void
|
|
|
-isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
|
|
|
- u32 xfer_len)
|
|
|
+isert_rcv_completion(struct iser_rx_desc *desc,
|
|
|
+ struct isert_conn *isert_conn,
|
|
|
+ u32 xfer_len)
|
|
|
{
|
|
|
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
|
|
|
+ struct ib_device *ib_dev = isert_conn->cm_id->device;
|
|
|
struct iscsi_hdr *hdr;
|
|
|
u64 rx_dma;
|
|
|
int rx_buflen, outstanding;
|
|
@@ -1532,9 +1601,9 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
|
|
|
if (login && !login->first_request)
|
|
|
isert_rx_login_req(isert_conn);
|
|
|
}
|
|
|
- mutex_lock(&isert_conn->conn_mutex);
|
|
|
+ mutex_lock(&isert_conn->mutex);
|
|
|
complete(&isert_conn->login_req_comp);
|
|
|
- mutex_unlock(&isert_conn->conn_mutex);
|
|
|
+ mutex_unlock(&isert_conn->mutex);
|
|
|
} else {
|
|
|
isert_rx_do_work(desc, isert_conn);
|
|
|
}
|
|
@@ -1566,7 +1635,7 @@ isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
|
|
|
struct scatterlist *sg, u32 nents, u32 length, u32 offset,
|
|
|
enum iser_ib_op_code op, struct isert_data_buf *data)
|
|
|
{
|
|
|
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
|
|
|
+ struct ib_device *ib_dev = isert_conn->cm_id->device;
|
|
|
|
|
|
data->dma_dir = op == ISER_IB_RDMA_WRITE ?
|
|
|
DMA_TO_DEVICE : DMA_FROM_DEVICE;
|
|
@@ -1597,7 +1666,7 @@ isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
|
|
|
static void
|
|
|
isert_unmap_data_buf(struct isert_conn *isert_conn, struct isert_data_buf *data)
|
|
|
{
|
|
|
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
|
|
|
+ struct ib_device *ib_dev = isert_conn->cm_id->device;
|
|
|
|
|
|
ib_dma_unmap_sg(ib_dev, data->sg, data->nents, data->dma_dir);
|
|
|
memset(data, 0, sizeof(*data));
|
|
@@ -1634,7 +1703,6 @@ static void
|
|
|
isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
|
|
|
{
|
|
|
struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
|
|
|
- LIST_HEAD(unmap_list);
|
|
|
|
|
|
isert_dbg("Cmd %p\n", isert_cmd);
|
|
|
|
|
@@ -1644,9 +1712,9 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
|
|
|
isert_unmap_data_buf(isert_conn, &wr->prot);
|
|
|
wr->fr_desc->ind &= ~ISERT_PROTECTED;
|
|
|
}
|
|
|
- spin_lock_bh(&isert_conn->conn_lock);
|
|
|
- list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool);
|
|
|
- spin_unlock_bh(&isert_conn->conn_lock);
|
|
|
+ spin_lock_bh(&isert_conn->pool_lock);
|
|
|
+ list_add_tail(&wr->fr_desc->list, &isert_conn->fr_pool);
|
|
|
+ spin_unlock_bh(&isert_conn->pool_lock);
|
|
|
wr->fr_desc = NULL;
|
|
|
}
|
|
|
|
|
@@ -1665,7 +1733,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
|
|
|
struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
|
|
|
struct isert_conn *isert_conn = isert_cmd->conn;
|
|
|
struct iscsi_conn *conn = isert_conn->conn;
|
|
|
- struct isert_device *device = isert_conn->conn_device;
|
|
|
+ struct isert_device *device = isert_conn->device;
|
|
|
struct iscsi_text_rsp *hdr;
|
|
|
|
|
|
isert_dbg("Cmd %p\n", isert_cmd);
|
|
@@ -1815,7 +1883,7 @@ isert_completion_rdma_write(struct iser_tx_desc *tx_desc,
|
|
|
struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
|
|
|
struct se_cmd *se_cmd = &cmd->se_cmd;
|
|
|
struct isert_conn *isert_conn = isert_cmd->conn;
|
|
|
- struct isert_device *device = isert_conn->conn_device;
|
|
|
+ struct isert_device *device = isert_conn->device;
|
|
|
int ret = 0;
|
|
|
|
|
|
if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
|
|
@@ -1841,7 +1909,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
|
|
|
struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
|
|
|
struct se_cmd *se_cmd = &cmd->se_cmd;
|
|
|
struct isert_conn *isert_conn = isert_cmd->conn;
|
|
|
- struct isert_device *device = isert_conn->conn_device;
|
|
|
+ struct isert_device *device = isert_conn->device;
|
|
|
int ret = 0;
|
|
|
|
|
|
if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
|
|
@@ -1861,11 +1929,13 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
|
|
|
cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
|
|
|
spin_unlock_bh(&cmd->istate_lock);
|
|
|
|
|
|
- if (ret)
|
|
|
+ if (ret) {
|
|
|
+ target_put_sess_cmd(se_cmd->se_sess, se_cmd);
|
|
|
transport_send_check_condition_and_sense(se_cmd,
|
|
|
se_cmd->pi_err, 0);
|
|
|
- else
|
|
|
+ } else {
|
|
|
target_execute_cmd(se_cmd);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
static void
|
|
@@ -1874,7 +1944,7 @@ isert_do_control_comp(struct work_struct *work)
|
|
|
struct isert_cmd *isert_cmd = container_of(work,
|
|
|
struct isert_cmd, comp_work);
|
|
|
struct isert_conn *isert_conn = isert_cmd->conn;
|
|
|
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
|
|
|
+ struct ib_device *ib_dev = isert_conn->cm_id->device;
|
|
|
struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
|
|
|
|
|
|
isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state);
|
|
@@ -1922,10 +1992,10 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
|
|
|
}
|
|
|
|
|
|
static void
|
|
|
-isert_send_completion(struct iser_tx_desc *tx_desc,
|
|
|
+isert_snd_completion(struct iser_tx_desc *tx_desc,
|
|
|
struct isert_conn *isert_conn)
|
|
|
{
|
|
|
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
|
|
|
+ struct ib_device *ib_dev = isert_conn->cm_id->device;
|
|
|
struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
|
|
|
struct isert_rdma_wr *wr;
|
|
|
|
|
@@ -1938,10 +2008,6 @@ isert_send_completion(struct iser_tx_desc *tx_desc,
|
|
|
isert_dbg("Cmd %p iser_ib_op %d\n", isert_cmd, wr->iser_ib_op);
|
|
|
|
|
|
switch (wr->iser_ib_op) {
|
|
|
- case ISER_IB_RECV:
|
|
|
- isert_err("Got ISER_IB_RECV\n");
|
|
|
- dump_stack();
|
|
|
- break;
|
|
|
case ISER_IB_SEND:
|
|
|
isert_response_completion(tx_desc, isert_cmd,
|
|
|
isert_conn, ib_dev);
|
|
@@ -1973,8 +2039,8 @@ isert_send_completion(struct iser_tx_desc *tx_desc,
|
|
|
static inline bool
|
|
|
is_isert_tx_desc(struct isert_conn *isert_conn, void *wr_id)
|
|
|
{
|
|
|
- void *start = isert_conn->conn_rx_descs;
|
|
|
- int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->conn_rx_descs);
|
|
|
+ void *start = isert_conn->rx_descs;
|
|
|
+ int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->rx_descs);
|
|
|
|
|
|
if (wr_id >= start && wr_id < start + len)
|
|
|
return false;
|
|
@@ -1986,11 +2052,11 @@ static void
|
|
|
isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc)
|
|
|
{
|
|
|
if (wc->wr_id == ISER_BEACON_WRID) {
|
|
|
- isert_info("conn %p completing conn_wait_comp_err\n",
|
|
|
+ isert_info("conn %p completing wait_comp_err\n",
|
|
|
isert_conn);
|
|
|
- complete(&isert_conn->conn_wait_comp_err);
|
|
|
+ complete(&isert_conn->wait_comp_err);
|
|
|
} else if (is_isert_tx_desc(isert_conn, (void *)(uintptr_t)wc->wr_id)) {
|
|
|
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
|
|
|
+ struct ib_device *ib_dev = isert_conn->cm_id->device;
|
|
|
struct isert_cmd *isert_cmd;
|
|
|
struct iser_tx_desc *desc;
|
|
|
|
|
@@ -2018,10 +2084,10 @@ isert_handle_wc(struct ib_wc *wc)
|
|
|
if (likely(wc->status == IB_WC_SUCCESS)) {
|
|
|
if (wc->opcode == IB_WC_RECV) {
|
|
|
rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id;
|
|
|
- isert_rx_completion(rx_desc, isert_conn, wc->byte_len);
|
|
|
+ isert_rcv_completion(rx_desc, isert_conn, wc->byte_len);
|
|
|
} else {
|
|
|
tx_desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
|
|
|
- isert_send_completion(tx_desc, isert_conn);
|
|
|
+ isert_snd_completion(tx_desc, isert_conn);
|
|
|
}
|
|
|
} else {
|
|
|
if (wc->status != IB_WC_WR_FLUSH_ERR)
|
|
@@ -2070,7 +2136,7 @@ isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
|
|
|
struct ib_send_wr *wr_failed;
|
|
|
int ret;
|
|
|
|
|
|
- ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr,
|
|
|
+ ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr,
|
|
|
&wr_failed);
|
|
|
if (ret) {
|
|
|
isert_err("ib_post_send failed with %d\n", ret);
|
|
@@ -2083,7 +2149,7 @@ static int
|
|
|
isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
|
|
|
{
|
|
|
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
|
|
|
- struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
|
|
|
+ struct isert_conn *isert_conn = conn->context;
|
|
|
struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
|
|
|
struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
|
|
|
&isert_cmd->tx_desc.iscsi_header;
|
|
@@ -2097,7 +2163,8 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
|
|
|
if (cmd->se_cmd.sense_buffer &&
|
|
|
((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
|
|
|
(cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
|
|
|
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
|
|
|
+ struct isert_device *device = isert_conn->device;
|
|
|
+ struct ib_device *ib_dev = device->ib_device;
|
|
|
struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
|
|
|
u32 padding, pdu_len;
|
|
|
|
|
@@ -2116,7 +2183,7 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
|
|
|
isert_cmd->pdu_buf_len = pdu_len;
|
|
|
tx_dsg->addr = isert_cmd->pdu_buf_dma;
|
|
|
tx_dsg->length = pdu_len;
|
|
|
- tx_dsg->lkey = isert_conn->conn_mr->lkey;
|
|
|
+ tx_dsg->lkey = device->mr->lkey;
|
|
|
isert_cmd->tx_desc.num_sge = 2;
|
|
|
}
|
|
|
|
|
@@ -2131,8 +2198,8 @@ static void
|
|
|
isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
|
|
|
{
|
|
|
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
|
|
|
- struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
|
|
|
- struct isert_device *device = isert_conn->conn_device;
|
|
|
+ struct isert_conn *isert_conn = conn->context;
|
|
|
+ struct isert_device *device = isert_conn->device;
|
|
|
|
|
|
spin_lock_bh(&conn->cmd_lock);
|
|
|
if (!list_empty(&cmd->i_conn_node))
|
|
@@ -2148,8 +2215,8 @@ isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
|
|
|
static enum target_prot_op
|
|
|
isert_get_sup_prot_ops(struct iscsi_conn *conn)
|
|
|
{
|
|
|
- struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
|
|
|
- struct isert_device *device = isert_conn->conn_device;
|
|
|
+ struct isert_conn *isert_conn = conn->context;
|
|
|
+ struct isert_device *device = isert_conn->device;
|
|
|
|
|
|
if (conn->tpg->tpg_attrib.t10_pi) {
|
|
|
if (device->pi_capable) {
|
|
@@ -2170,7 +2237,7 @@ isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
|
|
|
bool nopout_response)
|
|
|
{
|
|
|
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
|
|
|
- struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
|
|
|
+ struct isert_conn *isert_conn = conn->context;
|
|
|
struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
|
|
|
|
|
|
isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
|
|
@@ -2189,7 +2256,7 @@ static int
|
|
|
isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
|
|
|
{
|
|
|
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
|
|
|
- struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
|
|
|
+ struct isert_conn *isert_conn = conn->context;
|
|
|
struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
|
|
|
|
|
|
isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
|
|
@@ -2207,7 +2274,7 @@ static int
|
|
|
isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
|
|
|
{
|
|
|
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
|
|
|
- struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
|
|
|
+ struct isert_conn *isert_conn = conn->context;
|
|
|
struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
|
|
|
|
|
|
isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
|
|
@@ -2225,9 +2292,10 @@ static int
|
|
|
isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
|
|
|
{
|
|
|
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
|
|
|
- struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
|
|
|
+ struct isert_conn *isert_conn = conn->context;
|
|
|
struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
|
|
|
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
|
|
|
+ struct isert_device *device = isert_conn->device;
|
|
|
+ struct ib_device *ib_dev = device->ib_device;
|
|
|
struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
|
|
|
struct iscsi_reject *hdr =
|
|
|
(struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
|
|
@@ -2243,7 +2311,7 @@ isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
|
|
|
isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
|
|
|
tx_dsg->addr = isert_cmd->pdu_buf_dma;
|
|
|
tx_dsg->length = ISCSI_HDR_LEN;
|
|
|
- tx_dsg->lkey = isert_conn->conn_mr->lkey;
|
|
|
+ tx_dsg->lkey = device->mr->lkey;
|
|
|
isert_cmd->tx_desc.num_sge = 2;
|
|
|
|
|
|
isert_init_send_wr(isert_conn, isert_cmd, send_wr);
|
|
@@ -2257,7 +2325,7 @@ static int
|
|
|
isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
|
|
|
{
|
|
|
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
|
|
|
- struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
|
|
|
+ struct isert_conn *isert_conn = conn->context;
|
|
|
struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
|
|
|
struct iscsi_text_rsp *hdr =
|
|
|
(struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
|
|
@@ -2273,7 +2341,8 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
|
|
|
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
|
|
|
|
|
|
if (txt_rsp_len) {
|
|
|
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
|
|
|
+ struct isert_device *device = isert_conn->device;
|
|
|
+ struct ib_device *ib_dev = device->ib_device;
|
|
|
struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
|
|
|
void *txt_rsp_buf = cmd->buf_ptr;
|
|
|
|
|
@@ -2283,7 +2352,7 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
|
|
|
isert_cmd->pdu_buf_len = txt_rsp_len;
|
|
|
tx_dsg->addr = isert_cmd->pdu_buf_dma;
|
|
|
tx_dsg->length = txt_rsp_len;
|
|
|
- tx_dsg->lkey = isert_conn->conn_mr->lkey;
|
|
|
+ tx_dsg->lkey = device->mr->lkey;
|
|
|
isert_cmd->tx_desc.num_sge = 2;
|
|
|
}
|
|
|
isert_init_send_wr(isert_conn, isert_cmd, send_wr);
|
|
@@ -2300,7 +2369,8 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
|
|
|
{
|
|
|
struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
|
|
|
struct scatterlist *sg_start, *tmp_sg;
|
|
|
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
|
|
|
+ struct isert_device *device = isert_conn->device;
|
|
|
+ struct ib_device *ib_dev = device->ib_device;
|
|
|
u32 sg_off, page_off;
|
|
|
int i = 0, sg_nents;
|
|
|
|
|
@@ -2324,7 +2394,7 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
|
|
|
ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
|
|
|
ib_sge->length = min_t(u32, data_left,
|
|
|
ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
|
|
|
- ib_sge->lkey = isert_conn->conn_mr->lkey;
|
|
|
+ ib_sge->lkey = device->mr->lkey;
|
|
|
|
|
|
isert_dbg("RDMA ib_sge: addr: 0x%llx length: %u lkey: %x\n",
|
|
|
ib_sge->addr, ib_sge->length, ib_sge->lkey);
|
|
@@ -2346,7 +2416,7 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
|
|
{
|
|
|
struct se_cmd *se_cmd = &cmd->se_cmd;
|
|
|
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
|
|
|
- struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
|
|
|
+ struct isert_conn *isert_conn = conn->context;
|
|
|
struct isert_data_buf *data = &wr->data;
|
|
|
struct ib_send_wr *send_wr;
|
|
|
struct ib_sge *ib_sge;
|
|
@@ -2485,7 +2555,8 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
|
|
|
enum isert_indicator ind,
|
|
|
struct ib_sge *sge)
|
|
|
{
|
|
|
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
|
|
|
+ struct isert_device *device = isert_conn->device;
|
|
|
+ struct ib_device *ib_dev = device->ib_device;
|
|
|
struct ib_mr *mr;
|
|
|
struct ib_fast_reg_page_list *frpl;
|
|
|
struct ib_send_wr fr_wr, inv_wr;
|
|
@@ -2494,7 +2565,7 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
|
|
|
u32 page_off;
|
|
|
|
|
|
if (mem->dma_nents == 1) {
|
|
|
- sge->lkey = isert_conn->conn_mr->lkey;
|
|
|
+ sge->lkey = device->mr->lkey;
|
|
|
sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]);
|
|
|
sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]);
|
|
|
isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
|
|
@@ -2542,7 +2613,7 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
|
|
|
else
|
|
|
wr->next = &fr_wr;
|
|
|
|
|
|
- ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
|
|
|
+ ret = ib_post_send(isert_conn->qp, wr, &bad_wr);
|
|
|
if (ret) {
|
|
|
isert_err("fast registration failed, ret:%d\n", ret);
|
|
|
return ret;
|
|
@@ -2655,7 +2726,7 @@ isert_reg_sig_mr(struct isert_conn *isert_conn,
|
|
|
else
|
|
|
wr->next = &sig_wr;
|
|
|
|
|
|
- ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
|
|
|
+ ret = ib_post_send(isert_conn->qp, wr, &bad_wr);
|
|
|
if (ret) {
|
|
|
isert_err("fast registration failed, ret:%d\n", ret);
|
|
|
goto err;
|
|
@@ -2685,14 +2756,14 @@ isert_handle_prot_cmd(struct isert_conn *isert_conn,
|
|
|
struct isert_cmd *isert_cmd,
|
|
|
struct isert_rdma_wr *wr)
|
|
|
{
|
|
|
- struct isert_device *device = isert_conn->conn_device;
|
|
|
+ struct isert_device *device = isert_conn->device;
|
|
|
struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd;
|
|
|
int ret;
|
|
|
|
|
|
if (!wr->fr_desc->pi_ctx) {
|
|
|
ret = isert_create_pi_ctx(wr->fr_desc,
|
|
|
device->ib_device,
|
|
|
- isert_conn->conn_pd);
|
|
|
+ device->pd);
|
|
|
if (ret) {
|
|
|
isert_err("conn %p failed to allocate pi_ctx\n",
|
|
|
isert_conn);
|
|
@@ -2763,11 +2834,11 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
|
|
return ret;
|
|
|
|
|
|
if (wr->data.dma_nents != 1 || isert_prot_cmd(isert_conn, se_cmd)) {
|
|
|
- spin_lock_irqsave(&isert_conn->conn_lock, flags);
|
|
|
- fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
|
|
|
+ spin_lock_irqsave(&isert_conn->pool_lock, flags);
|
|
|
+ fr_desc = list_first_entry(&isert_conn->fr_pool,
|
|
|
struct fast_reg_descriptor, list);
|
|
|
list_del(&fr_desc->list);
|
|
|
- spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
|
|
|
+ spin_unlock_irqrestore(&isert_conn->pool_lock, flags);
|
|
|
wr->fr_desc = fr_desc;
|
|
|
}
|
|
|
|
|
@@ -2814,9 +2885,9 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
|
|
|
|
|
unmap_cmd:
|
|
|
if (fr_desc) {
|
|
|
- spin_lock_irqsave(&isert_conn->conn_lock, flags);
|
|
|
- list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
|
|
|
- spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
|
|
|
+ spin_lock_irqsave(&isert_conn->pool_lock, flags);
|
|
|
+ list_add_tail(&fr_desc->list, &isert_conn->fr_pool);
|
|
|
+ spin_unlock_irqrestore(&isert_conn->pool_lock, flags);
|
|
|
}
|
|
|
isert_unmap_data_buf(isert_conn, &wr->data);
|
|
|
|
|
@@ -2829,8 +2900,8 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
|
|
|
struct se_cmd *se_cmd = &cmd->se_cmd;
|
|
|
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
|
|
|
struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
|
|
|
- struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
|
|
|
- struct isert_device *device = isert_conn->conn_device;
|
|
|
+ struct isert_conn *isert_conn = conn->context;
|
|
|
+ struct isert_device *device = isert_conn->device;
|
|
|
struct ib_send_wr *wr_failed;
|
|
|
int rc;
|
|
|
|
|
@@ -2859,7 +2930,7 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
|
|
|
wr->send_wr_num += 1;
|
|
|
}
|
|
|
|
|
|
- rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
|
|
|
+ rc = ib_post_send(isert_conn->qp, wr->send_wr, &wr_failed);
|
|
|
if (rc)
|
|
|
isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
|
|
|
|
|
@@ -2879,8 +2950,8 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
|
|
|
struct se_cmd *se_cmd = &cmd->se_cmd;
|
|
|
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
|
|
|
struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
|
|
|
- struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
|
|
|
- struct isert_device *device = isert_conn->conn_device;
|
|
|
+ struct isert_conn *isert_conn = conn->context;
|
|
|
+ struct isert_device *device = isert_conn->device;
|
|
|
struct ib_send_wr *wr_failed;
|
|
|
int rc;
|
|
|
|
|
@@ -2893,7 +2964,7 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
|
- rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
|
|
|
+ rc = ib_post_send(isert_conn->qp, wr->send_wr, &wr_failed);
|
|
|
if (rc)
|
|
|
isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
|
|
|
|
|
@@ -2987,7 +3058,7 @@ isert_setup_id(struct isert_np *isert_np)
|
|
|
goto out_id;
|
|
|
}
|
|
|
|
|
|
- ret = rdma_listen(id, ISERT_RDMA_LISTEN_BACKLOG);
|
|
|
+ ret = rdma_listen(id, 0);
|
|
|
if (ret) {
|
|
|
isert_err("rdma_listen() failed: %d\n", ret);
|
|
|
goto out_id;
|
|
@@ -3046,7 +3117,7 @@ out:
|
|
|
static int
|
|
|
isert_rdma_accept(struct isert_conn *isert_conn)
|
|
|
{
|
|
|
- struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
|
|
|
+ struct rdma_cm_id *cm_id = isert_conn->cm_id;
|
|
|
struct rdma_conn_param cp;
|
|
|
int ret;
|
|
|
|
|
@@ -3067,7 +3138,7 @@ isert_rdma_accept(struct isert_conn *isert_conn)
|
|
|
static int
|
|
|
isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
|
|
|
{
|
|
|
- struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
|
|
|
+ struct isert_conn *isert_conn = conn->context;
|
|
|
int ret;
|
|
|
|
|
|
isert_info("before login_req comp conn: %p\n", isert_conn);
|
|
@@ -3090,8 +3161,8 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
|
|
|
|
|
|
isert_rx_login_req(isert_conn);
|
|
|
|
|
|
- isert_info("before conn_login_comp conn: %p\n", conn);
|
|
|
- ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
|
|
|
+ isert_info("before login_comp conn: %p\n", conn);
|
|
|
+ ret = wait_for_completion_interruptible(&isert_conn->login_comp);
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
|
|
@@ -3104,7 +3175,7 @@ static void
|
|
|
isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
|
|
|
struct isert_conn *isert_conn)
|
|
|
{
|
|
|
- struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
|
|
|
+ struct rdma_cm_id *cm_id = isert_conn->cm_id;
|
|
|
struct rdma_route *cm_route = &cm_id->route;
|
|
|
struct sockaddr_in *sock_in;
|
|
|
struct sockaddr_in6 *sock_in6;
|
|
@@ -3137,13 +3208,13 @@ isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
|
|
|
static int
|
|
|
isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
|
|
|
{
|
|
|
- struct isert_np *isert_np = (struct isert_np *)np->np_context;
|
|
|
+ struct isert_np *isert_np = np->np_context;
|
|
|
struct isert_conn *isert_conn;
|
|
|
- int max_accept = 0, ret;
|
|
|
+ int ret;
|
|
|
|
|
|
accept_wait:
|
|
|
ret = down_interruptible(&isert_np->np_sem);
|
|
|
- if (ret || max_accept > 5)
|
|
|
+ if (ret)
|
|
|
return -ENODEV;
|
|
|
|
|
|
spin_lock_bh(&np->np_thread_lock);
|
|
@@ -3162,17 +3233,15 @@ accept_wait:
|
|
|
mutex_lock(&isert_np->np_accept_mutex);
|
|
|
if (list_empty(&isert_np->np_accept_list)) {
|
|
|
mutex_unlock(&isert_np->np_accept_mutex);
|
|
|
- max_accept++;
|
|
|
goto accept_wait;
|
|
|
}
|
|
|
isert_conn = list_first_entry(&isert_np->np_accept_list,
|
|
|
- struct isert_conn, conn_accept_node);
|
|
|
- list_del_init(&isert_conn->conn_accept_node);
|
|
|
+ struct isert_conn, accept_node);
|
|
|
+ list_del_init(&isert_conn->accept_node);
|
|
|
mutex_unlock(&isert_np->np_accept_mutex);
|
|
|
|
|
|
conn->context = isert_conn;
|
|
|
isert_conn->conn = conn;
|
|
|
- max_accept = 0;
|
|
|
|
|
|
isert_set_conn_info(np, conn, isert_conn);
|
|
|
|
|
@@ -3184,7 +3253,7 @@ accept_wait:
|
|
|
static void
|
|
|
isert_free_np(struct iscsi_np *np)
|
|
|
{
|
|
|
- struct isert_np *isert_np = (struct isert_np *)np->np_context;
|
|
|
+ struct isert_np *isert_np = np->np_context;
|
|
|
struct isert_conn *isert_conn, *n;
|
|
|
|
|
|
if (isert_np->np_cm_id)
|
|
@@ -3202,7 +3271,7 @@ isert_free_np(struct iscsi_np *np)
|
|
|
isert_info("Still have isert connections, cleaning up...\n");
|
|
|
list_for_each_entry_safe(isert_conn, n,
|
|
|
&isert_np->np_accept_list,
|
|
|
- conn_accept_node) {
|
|
|
+ accept_node) {
|
|
|
isert_info("cleaning isert_conn %p state (%d)\n",
|
|
|
isert_conn, isert_conn->state);
|
|
|
isert_connect_release(isert_conn);
|
|
@@ -3222,11 +3291,11 @@ static void isert_release_work(struct work_struct *work)
|
|
|
|
|
|
isert_info("Starting release conn %p\n", isert_conn);
|
|
|
|
|
|
- wait_for_completion(&isert_conn->conn_wait);
|
|
|
+ wait_for_completion(&isert_conn->wait);
|
|
|
|
|
|
- mutex_lock(&isert_conn->conn_mutex);
|
|
|
+ mutex_lock(&isert_conn->mutex);
|
|
|
isert_conn->state = ISER_CONN_DOWN;
|
|
|
- mutex_unlock(&isert_conn->conn_mutex);
|
|
|
+ mutex_unlock(&isert_conn->mutex);
|
|
|
|
|
|
isert_info("Destroying conn %p\n", isert_conn);
|
|
|
isert_put_conn(isert_conn);
|
|
@@ -3264,15 +3333,15 @@ isert_wait4flush(struct isert_conn *isert_conn)
|
|
|
|
|
|
isert_info("conn %p\n", isert_conn);
|
|
|
|
|
|
- init_completion(&isert_conn->conn_wait_comp_err);
|
|
|
+ init_completion(&isert_conn->wait_comp_err);
|
|
|
isert_conn->beacon.wr_id = ISER_BEACON_WRID;
|
|
|
/* post an indication that all flush errors were consumed */
|
|
|
- if (ib_post_recv(isert_conn->conn_qp, &isert_conn->beacon, &bad_wr)) {
|
|
|
+ if (ib_post_recv(isert_conn->qp, &isert_conn->beacon, &bad_wr)) {
|
|
|
isert_err("conn %p failed to post beacon", isert_conn);
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
- wait_for_completion(&isert_conn->conn_wait_comp_err);
|
|
|
+ wait_for_completion(&isert_conn->wait_comp_err);
|
|
|
}
|
|
|
|
|
|
static void isert_wait_conn(struct iscsi_conn *conn)
|
|
@@ -3281,17 +3350,17 @@ static void isert_wait_conn(struct iscsi_conn *conn)
|
|
|
|
|
|
isert_info("Starting conn %p\n", isert_conn);
|
|
|
|
|
|
- mutex_lock(&isert_conn->conn_mutex);
|
|
|
+ mutex_lock(&isert_conn->mutex);
|
|
|
/*
|
|
|
- * Only wait for conn_wait_comp_err if the isert_conn made it
|
|
|
+ * Only wait for wait_comp_err if the isert_conn made it
|
|
|
* into full feature phase..
|
|
|
*/
|
|
|
if (isert_conn->state == ISER_CONN_INIT) {
|
|
|
- mutex_unlock(&isert_conn->conn_mutex);
|
|
|
+ mutex_unlock(&isert_conn->mutex);
|
|
|
return;
|
|
|
}
|
|
|
isert_conn_terminate(isert_conn);
|
|
|
- mutex_unlock(&isert_conn->conn_mutex);
|
|
|
+ mutex_unlock(&isert_conn->mutex);
|
|
|
|
|
|
isert_wait4cmds(conn);
|
|
|
isert_wait4flush(isert_conn);
|
|
@@ -3370,7 +3439,7 @@ static void __exit isert_exit(void)
|
|
|
}
|
|
|
|
|
|
MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
|
|
|
-MODULE_VERSION("0.1");
|
|
|
+MODULE_VERSION("1.0");
|
|
|
MODULE_AUTHOR("nab@Linux-iSCSI.org");
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|