|
@@ -38,6 +38,10 @@
|
|
#define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
|
|
#define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
|
|
ISERT_MAX_CONN)
|
|
ISERT_MAX_CONN)
|
|
|
|
|
|
|
|
+int isert_debug_level = 0;
|
|
|
|
+module_param_named(debug_level, isert_debug_level, int, 0644);
|
|
|
|
+MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)");
|
|
|
|
+
|
|
static DEFINE_MUTEX(device_list_mutex);
|
|
static DEFINE_MUTEX(device_list_mutex);
|
|
static LIST_HEAD(device_list);
|
|
static LIST_HEAD(device_list);
|
|
static struct workqueue_struct *isert_comp_wq;
|
|
static struct workqueue_struct *isert_comp_wq;
|
|
@@ -74,13 +78,13 @@ isert_qp_event_callback(struct ib_event *e, void *context)
|
|
{
|
|
{
|
|
struct isert_conn *isert_conn = (struct isert_conn *)context;
|
|
struct isert_conn *isert_conn = (struct isert_conn *)context;
|
|
|
|
|
|
- pr_err("isert_qp_event_callback event: %d\n", e->event);
|
|
|
|
|
|
+ isert_err("isert_qp_event_callback event: %d\n", e->event);
|
|
switch (e->event) {
|
|
switch (e->event) {
|
|
case IB_EVENT_COMM_EST:
|
|
case IB_EVENT_COMM_EST:
|
|
rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST);
|
|
rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST);
|
|
break;
|
|
break;
|
|
case IB_EVENT_QP_LAST_WQE_REACHED:
|
|
case IB_EVENT_QP_LAST_WQE_REACHED:
|
|
- pr_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED:\n");
|
|
|
|
|
|
+ isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED:\n");
|
|
break;
|
|
break;
|
|
default:
|
|
default:
|
|
break;
|
|
break;
|
|
@@ -94,11 +98,11 @@ isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
|
|
|
|
|
|
ret = ib_query_device(ib_dev, devattr);
|
|
ret = ib_query_device(ib_dev, devattr);
|
|
if (ret) {
|
|
if (ret) {
|
|
- pr_err("ib_query_device() failed: %d\n", ret);
|
|
|
|
|
|
+ isert_err("ib_query_device() failed: %d\n", ret);
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
- pr_debug("devattr->max_sge: %d\n", devattr->max_sge);
|
|
|
|
- pr_debug("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
|
|
|
|
|
|
+ isert_dbg("devattr->max_sge: %d\n", devattr->max_sge);
|
|
|
|
+ isert_dbg("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -118,7 +122,7 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
|
|
min = i;
|
|
min = i;
|
|
comp = &device->comps[min];
|
|
comp = &device->comps[min];
|
|
comp->active_qps++;
|
|
comp->active_qps++;
|
|
- pr_info("conn %p, using comp %p min_index: %d\n",
|
|
|
|
|
|
+ isert_info("conn %p, using comp %p min_index: %d\n",
|
|
isert_conn, comp, min);
|
|
isert_conn, comp, min);
|
|
mutex_unlock(&device_list_mutex);
|
|
mutex_unlock(&device_list_mutex);
|
|
|
|
|
|
@@ -145,18 +149,18 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
|
|
if (device->pi_capable)
|
|
if (device->pi_capable)
|
|
attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
|
|
attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
|
|
|
|
|
|
- pr_debug("isert_conn_setup_qp cma_id->device: %p\n",
|
|
|
|
|
|
+ isert_dbg("isert_conn_setup_qp cma_id->device: %p\n",
|
|
cma_id->device);
|
|
cma_id->device);
|
|
- pr_debug("isert_conn_setup_qp conn_pd->device: %p\n",
|
|
|
|
|
|
+ isert_dbg("isert_conn_setup_qp conn_pd->device: %p\n",
|
|
isert_conn->conn_pd->device);
|
|
isert_conn->conn_pd->device);
|
|
|
|
|
|
ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
|
|
ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
|
|
if (ret) {
|
|
if (ret) {
|
|
- pr_err("rdma_create_qp failed for cma_id %d\n", ret);
|
|
|
|
|
|
+ isert_err("rdma_create_qp failed for cma_id %d\n", ret);
|
|
goto err;
|
|
goto err;
|
|
}
|
|
}
|
|
isert_conn->conn_qp = cma_id->qp;
|
|
isert_conn->conn_qp = cma_id->qp;
|
|
- pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
|
|
|
|
|
|
+ isert_dbg("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
err:
|
|
err:
|
|
@@ -170,7 +174,7 @@ err:
|
|
static void
|
|
static void
|
|
isert_cq_event_callback(struct ib_event *e, void *context)
|
|
isert_cq_event_callback(struct ib_event *e, void *context)
|
|
{
|
|
{
|
|
- pr_debug("isert_cq_event_callback event: %d\n", e->event);
|
|
|
|
|
|
+ isert_dbg("isert_cq_event_callback event: %d\n", e->event);
|
|
}
|
|
}
|
|
|
|
|
|
static int
|
|
static int
|
|
@@ -274,7 +278,7 @@ isert_create_device_ib_res(struct isert_device *device)
|
|
|
|
|
|
device->comps_used = min(ISERT_MAX_CQ, min_t(int, num_online_cpus(),
|
|
device->comps_used = min(ISERT_MAX_CQ, min_t(int, num_online_cpus(),
|
|
device->ib_device->num_comp_vectors));
|
|
device->ib_device->num_comp_vectors));
|
|
- pr_info("Using %d CQs, %s supports %d vectors support "
|
|
|
|
|
|
+ isert_info("Using %d CQs, %s supports %d vectors support "
|
|
"Fast registration %d pi_capable %d\n",
|
|
"Fast registration %d pi_capable %d\n",
|
|
device->comps_used, device->ib_device->name,
|
|
device->comps_used, device->ib_device->name,
|
|
device->ib_device->num_comp_vectors, device->use_fastreg,
|
|
device->ib_device->num_comp_vectors, device->use_fastreg,
|
|
@@ -283,7 +287,7 @@ isert_create_device_ib_res(struct isert_device *device)
|
|
device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp),
|
|
device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp),
|
|
GFP_KERNEL);
|
|
GFP_KERNEL);
|
|
if (!device->comps) {
|
|
if (!device->comps) {
|
|
- pr_err("Unable to allocate completion contexts\n");
|
|
|
|
|
|
+ isert_err("Unable to allocate completion contexts\n");
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -329,7 +333,7 @@ isert_free_device_ib_res(struct isert_device *device)
|
|
{
|
|
{
|
|
int i;
|
|
int i;
|
|
|
|
|
|
- pr_info("device %p\n", device);
|
|
|
|
|
|
+ isert_info("device %p\n", device);
|
|
|
|
|
|
for (i = 0; i < device->comps_used; i++) {
|
|
for (i = 0; i < device->comps_used; i++) {
|
|
struct isert_comp *comp = &device->comps[i];
|
|
struct isert_comp *comp = &device->comps[i];
|
|
@@ -401,7 +405,7 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
|
|
if (list_empty(&isert_conn->conn_fr_pool))
|
|
if (list_empty(&isert_conn->conn_fr_pool))
|
|
return;
|
|
return;
|
|
|
|
|
|
- pr_debug("Freeing conn %p fastreg pool", isert_conn);
|
|
|
|
|
|
+ isert_dbg("Freeing conn %p fastreg pool", isert_conn);
|
|
|
|
|
|
list_for_each_entry_safe(fr_desc, tmp,
|
|
list_for_each_entry_safe(fr_desc, tmp,
|
|
&isert_conn->conn_fr_pool, list) {
|
|
&isert_conn->conn_fr_pool, list) {
|
|
@@ -419,7 +423,7 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
|
|
}
|
|
}
|
|
|
|
|
|
if (i < isert_conn->conn_fr_pool_size)
|
|
if (i < isert_conn->conn_fr_pool_size)
|
|
- pr_warn("Pool still has %d regions registered\n",
|
|
|
|
|
|
+ isert_warn("Pool still has %d regions registered\n",
|
|
isert_conn->conn_fr_pool_size - i);
|
|
isert_conn->conn_fr_pool_size - i);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -434,14 +438,14 @@ isert_create_pi_ctx(struct fast_reg_descriptor *desc,
|
|
|
|
|
|
pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
|
|
pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
|
|
if (!pi_ctx) {
|
|
if (!pi_ctx) {
|
|
- pr_err("Failed to allocate pi context\n");
|
|
|
|
|
|
+ isert_err("Failed to allocate pi context\n");
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
}
|
|
}
|
|
|
|
|
|
pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(device,
|
|
pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(device,
|
|
ISCSI_ISER_SG_TABLESIZE);
|
|
ISCSI_ISER_SG_TABLESIZE);
|
|
if (IS_ERR(pi_ctx->prot_frpl)) {
|
|
if (IS_ERR(pi_ctx->prot_frpl)) {
|
|
- pr_err("Failed to allocate prot frpl err=%ld\n",
|
|
|
|
|
|
+ isert_err("Failed to allocate prot frpl err=%ld\n",
|
|
PTR_ERR(pi_ctx->prot_frpl));
|
|
PTR_ERR(pi_ctx->prot_frpl));
|
|
ret = PTR_ERR(pi_ctx->prot_frpl);
|
|
ret = PTR_ERR(pi_ctx->prot_frpl);
|
|
goto err_pi_ctx;
|
|
goto err_pi_ctx;
|
|
@@ -449,7 +453,7 @@ isert_create_pi_ctx(struct fast_reg_descriptor *desc,
|
|
|
|
|
|
pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
|
|
pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
|
|
if (IS_ERR(pi_ctx->prot_mr)) {
|
|
if (IS_ERR(pi_ctx->prot_mr)) {
|
|
- pr_err("Failed to allocate prot frmr err=%ld\n",
|
|
|
|
|
|
+ isert_err("Failed to allocate prot frmr err=%ld\n",
|
|
PTR_ERR(pi_ctx->prot_mr));
|
|
PTR_ERR(pi_ctx->prot_mr));
|
|
ret = PTR_ERR(pi_ctx->prot_mr);
|
|
ret = PTR_ERR(pi_ctx->prot_mr);
|
|
goto err_prot_frpl;
|
|
goto err_prot_frpl;
|
|
@@ -461,7 +465,7 @@ isert_create_pi_ctx(struct fast_reg_descriptor *desc,
|
|
mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
|
|
mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
|
|
pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
|
|
pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
|
|
if (IS_ERR(pi_ctx->sig_mr)) {
|
|
if (IS_ERR(pi_ctx->sig_mr)) {
|
|
- pr_err("Failed to allocate signature enabled mr err=%ld\n",
|
|
|
|
|
|
+ isert_err("Failed to allocate signature enabled mr err=%ld\n",
|
|
PTR_ERR(pi_ctx->sig_mr));
|
|
PTR_ERR(pi_ctx->sig_mr));
|
|
ret = PTR_ERR(pi_ctx->sig_mr);
|
|
ret = PTR_ERR(pi_ctx->sig_mr);
|
|
goto err_prot_mr;
|
|
goto err_prot_mr;
|
|
@@ -492,21 +496,21 @@ isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
|
|
fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
|
|
fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
|
|
ISCSI_ISER_SG_TABLESIZE);
|
|
ISCSI_ISER_SG_TABLESIZE);
|
|
if (IS_ERR(fr_desc->data_frpl)) {
|
|
if (IS_ERR(fr_desc->data_frpl)) {
|
|
- pr_err("Failed to allocate data frpl err=%ld\n",
|
|
|
|
|
|
+ isert_err("Failed to allocate data frpl err=%ld\n",
|
|
PTR_ERR(fr_desc->data_frpl));
|
|
PTR_ERR(fr_desc->data_frpl));
|
|
return PTR_ERR(fr_desc->data_frpl);
|
|
return PTR_ERR(fr_desc->data_frpl);
|
|
}
|
|
}
|
|
|
|
|
|
fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
|
|
fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
|
|
if (IS_ERR(fr_desc->data_mr)) {
|
|
if (IS_ERR(fr_desc->data_mr)) {
|
|
- pr_err("Failed to allocate data frmr err=%ld\n",
|
|
|
|
|
|
+ isert_err("Failed to allocate data frmr err=%ld\n",
|
|
PTR_ERR(fr_desc->data_mr));
|
|
PTR_ERR(fr_desc->data_mr));
|
|
ret = PTR_ERR(fr_desc->data_mr);
|
|
ret = PTR_ERR(fr_desc->data_mr);
|
|
goto err_data_frpl;
|
|
goto err_data_frpl;
|
|
}
|
|
}
|
|
fr_desc->ind |= ISERT_DATA_KEY_VALID;
|
|
fr_desc->ind |= ISERT_DATA_KEY_VALID;
|
|
|
|
|
|
- pr_debug("Created fr_desc %p\n", fr_desc);
|
|
|
|
|
|
+ isert_dbg("Created fr_desc %p\n", fr_desc);
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
@@ -535,7 +539,7 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
|
|
for (i = 0; i < tag_num; i++) {
|
|
for (i = 0; i < tag_num; i++) {
|
|
fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
|
|
fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
|
|
if (!fr_desc) {
|
|
if (!fr_desc) {
|
|
- pr_err("Failed to allocate fast_reg descriptor\n");
|
|
|
|
|
|
+ isert_err("Failed to allocate fast_reg descriptor\n");
|
|
ret = -ENOMEM;
|
|
ret = -ENOMEM;
|
|
goto err;
|
|
goto err;
|
|
}
|
|
}
|
|
@@ -543,7 +547,7 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
|
|
ret = isert_create_fr_desc(device->ib_device,
|
|
ret = isert_create_fr_desc(device->ib_device,
|
|
isert_conn->conn_pd, fr_desc);
|
|
isert_conn->conn_pd, fr_desc);
|
|
if (ret) {
|
|
if (ret) {
|
|
- pr_err("Failed to create fastreg descriptor err=%d\n",
|
|
|
|
|
|
+ isert_err("Failed to create fastreg descriptor err=%d\n",
|
|
ret);
|
|
ret);
|
|
kfree(fr_desc);
|
|
kfree(fr_desc);
|
|
goto err;
|
|
goto err;
|
|
@@ -553,7 +557,7 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
|
|
isert_conn->conn_fr_pool_size++;
|
|
isert_conn->conn_fr_pool_size++;
|
|
}
|
|
}
|
|
|
|
|
|
- pr_debug("Creating conn %p fastreg pool size=%d",
|
|
|
|
|
|
+ isert_dbg("Creating conn %p fastreg pool size=%d",
|
|
isert_conn, isert_conn->conn_fr_pool_size);
|
|
isert_conn, isert_conn->conn_fr_pool_size);
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
@@ -576,17 +580,17 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
|
|
spin_lock_bh(&np->np_thread_lock);
|
|
spin_lock_bh(&np->np_thread_lock);
|
|
if (!np->enabled) {
|
|
if (!np->enabled) {
|
|
spin_unlock_bh(&np->np_thread_lock);
|
|
spin_unlock_bh(&np->np_thread_lock);
|
|
- pr_debug("iscsi_np is not enabled, reject connect request\n");
|
|
|
|
|
|
+ isert_dbg("iscsi_np is not enabled, reject connect request\n");
|
|
return rdma_reject(cma_id, NULL, 0);
|
|
return rdma_reject(cma_id, NULL, 0);
|
|
}
|
|
}
|
|
spin_unlock_bh(&np->np_thread_lock);
|
|
spin_unlock_bh(&np->np_thread_lock);
|
|
|
|
|
|
- pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
|
|
|
|
|
|
+ isert_dbg("Entering isert_connect_request cma_id: %p, context: %p\n",
|
|
cma_id, cma_id->context);
|
|
cma_id, cma_id->context);
|
|
|
|
|
|
isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
|
|
isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
|
|
if (!isert_conn) {
|
|
if (!isert_conn) {
|
|
- pr_err("Unable to allocate isert_conn\n");
|
|
|
|
|
|
+ isert_err("Unable to allocate isert_conn\n");
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
}
|
|
}
|
|
isert_conn->state = ISER_CONN_INIT;
|
|
isert_conn->state = ISER_CONN_INIT;
|
|
@@ -604,7 +608,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
|
|
isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
|
|
isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
|
|
ISER_RX_LOGIN_SIZE, GFP_KERNEL);
|
|
ISER_RX_LOGIN_SIZE, GFP_KERNEL);
|
|
if (!isert_conn->login_buf) {
|
|
if (!isert_conn->login_buf) {
|
|
- pr_err("Unable to allocate isert_conn->login_buf\n");
|
|
|
|
|
|
+ isert_err("Unable to allocate isert_conn->login_buf\n");
|
|
ret = -ENOMEM;
|
|
ret = -ENOMEM;
|
|
goto out;
|
|
goto out;
|
|
}
|
|
}
|
|
@@ -612,7 +616,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
|
|
isert_conn->login_req_buf = isert_conn->login_buf;
|
|
isert_conn->login_req_buf = isert_conn->login_buf;
|
|
isert_conn->login_rsp_buf = isert_conn->login_buf +
|
|
isert_conn->login_rsp_buf = isert_conn->login_buf +
|
|
ISCSI_DEF_MAX_RECV_SEG_LEN;
|
|
ISCSI_DEF_MAX_RECV_SEG_LEN;
|
|
- pr_debug("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
|
|
|
|
|
|
+ isert_dbg("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
|
|
isert_conn->login_buf, isert_conn->login_req_buf,
|
|
isert_conn->login_buf, isert_conn->login_req_buf,
|
|
isert_conn->login_rsp_buf);
|
|
isert_conn->login_rsp_buf);
|
|
|
|
|
|
@@ -622,7 +626,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
|
|
|
|
|
|
ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
|
|
ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
|
|
if (ret) {
|
|
if (ret) {
|
|
- pr_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
|
|
|
|
|
|
+ isert_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
|
|
ret);
|
|
ret);
|
|
isert_conn->login_req_dma = 0;
|
|
isert_conn->login_req_dma = 0;
|
|
goto out_login_buf;
|
|
goto out_login_buf;
|
|
@@ -634,7 +638,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
|
|
|
|
|
|
ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
|
|
ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
|
|
if (ret) {
|
|
if (ret) {
|
|
- pr_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
|
|
|
|
|
|
+ isert_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
|
|
ret);
|
|
ret);
|
|
isert_conn->login_rsp_dma = 0;
|
|
isert_conn->login_rsp_dma = 0;
|
|
goto out_req_dma_map;
|
|
goto out_req_dma_map;
|
|
@@ -650,13 +654,13 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
|
|
isert_conn->initiator_depth = min_t(u8,
|
|
isert_conn->initiator_depth = min_t(u8,
|
|
event->param.conn.initiator_depth,
|
|
event->param.conn.initiator_depth,
|
|
device->dev_attr.max_qp_init_rd_atom);
|
|
device->dev_attr.max_qp_init_rd_atom);
|
|
- pr_debug("Using initiator_depth: %u\n", isert_conn->initiator_depth);
|
|
|
|
|
|
+ isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth);
|
|
|
|
|
|
isert_conn->conn_device = device;
|
|
isert_conn->conn_device = device;
|
|
isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device);
|
|
isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device);
|
|
if (IS_ERR(isert_conn->conn_pd)) {
|
|
if (IS_ERR(isert_conn->conn_pd)) {
|
|
ret = PTR_ERR(isert_conn->conn_pd);
|
|
ret = PTR_ERR(isert_conn->conn_pd);
|
|
- pr_err("ib_alloc_pd failed for conn %p: ret=%d\n",
|
|
|
|
|
|
+ isert_err("ib_alloc_pd failed for conn %p: ret=%d\n",
|
|
isert_conn, ret);
|
|
isert_conn, ret);
|
|
goto out_pd;
|
|
goto out_pd;
|
|
}
|
|
}
|
|
@@ -665,7 +669,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
|
|
IB_ACCESS_LOCAL_WRITE);
|
|
IB_ACCESS_LOCAL_WRITE);
|
|
if (IS_ERR(isert_conn->conn_mr)) {
|
|
if (IS_ERR(isert_conn->conn_mr)) {
|
|
ret = PTR_ERR(isert_conn->conn_mr);
|
|
ret = PTR_ERR(isert_conn->conn_mr);
|
|
- pr_err("ib_get_dma_mr failed for conn %p: ret=%d\n",
|
|
|
|
|
|
+ isert_err("ib_get_dma_mr failed for conn %p: ret=%d\n",
|
|
isert_conn, ret);
|
|
isert_conn, ret);
|
|
goto out_mr;
|
|
goto out_mr;
|
|
}
|
|
}
|
|
@@ -686,7 +690,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
|
|
list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
|
|
list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
|
|
mutex_unlock(&isert_np->np_accept_mutex);
|
|
mutex_unlock(&isert_np->np_accept_mutex);
|
|
|
|
|
|
- pr_debug("isert_connect_request() up np_sem np: %p\n", np);
|
|
|
|
|
|
+ isert_info("np %p: Allow accept_np to continue\n", np);
|
|
up(&isert_np->np_sem);
|
|
up(&isert_np->np_sem);
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
@@ -716,7 +720,7 @@ isert_connect_release(struct isert_conn *isert_conn)
|
|
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
|
|
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
|
|
struct isert_device *device = isert_conn->conn_device;
|
|
struct isert_device *device = isert_conn->conn_device;
|
|
|
|
|
|
- pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
|
|
|
|
|
|
+ isert_dbg("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
|
|
|
|
|
|
if (device && device->use_fastreg)
|
|
if (device && device->use_fastreg)
|
|
isert_conn_free_fastreg_pool(isert_conn);
|
|
isert_conn_free_fastreg_pool(isert_conn);
|
|
@@ -727,7 +731,7 @@ isert_connect_release(struct isert_conn *isert_conn)
|
|
if (isert_conn->conn_qp) {
|
|
if (isert_conn->conn_qp) {
|
|
struct isert_comp *comp = isert_conn->conn_qp->recv_cq->cq_context;
|
|
struct isert_comp *comp = isert_conn->conn_qp->recv_cq->cq_context;
|
|
|
|
|
|
- pr_debug("dec completion context %p active_qps\n", comp);
|
|
|
|
|
|
+ isert_dbg("dec completion context %p active_qps\n", comp);
|
|
mutex_lock(&device_list_mutex);
|
|
mutex_lock(&device_list_mutex);
|
|
comp->active_qps--;
|
|
comp->active_qps--;
|
|
mutex_unlock(&device_list_mutex);
|
|
mutex_unlock(&device_list_mutex);
|
|
@@ -751,7 +755,7 @@ isert_connect_release(struct isert_conn *isert_conn)
|
|
if (device)
|
|
if (device)
|
|
isert_device_try_release(device);
|
|
isert_device_try_release(device);
|
|
|
|
|
|
- pr_debug("Leaving isert_connect_release >>>>>>>>>>>>\n");
|
|
|
|
|
|
+ isert_dbg("Leaving isert_connect_release >>>>>>>>>>>>\n");
|
|
}
|
|
}
|
|
|
|
|
|
static void
|
|
static void
|
|
@@ -759,10 +763,10 @@ isert_connected_handler(struct rdma_cm_id *cma_id)
|
|
{
|
|
{
|
|
struct isert_conn *isert_conn = cma_id->qp->qp_context;
|
|
struct isert_conn *isert_conn = cma_id->qp->qp_context;
|
|
|
|
|
|
- pr_info("conn %p\n", isert_conn);
|
|
|
|
|
|
+ isert_info("conn %p\n", isert_conn);
|
|
|
|
|
|
if (!kref_get_unless_zero(&isert_conn->conn_kref)) {
|
|
if (!kref_get_unless_zero(&isert_conn->conn_kref)) {
|
|
- pr_warn("conn %p connect_release is running\n", isert_conn);
|
|
|
|
|
|
+ isert_warn("conn %p connect_release is running\n", isert_conn);
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -778,7 +782,7 @@ isert_release_conn_kref(struct kref *kref)
|
|
struct isert_conn *isert_conn = container_of(kref,
|
|
struct isert_conn *isert_conn = container_of(kref,
|
|
struct isert_conn, conn_kref);
|
|
struct isert_conn, conn_kref);
|
|
|
|
|
|
- pr_debug("Calling isert_connect_release for final kref %s/%d\n",
|
|
|
|
|
|
+ isert_dbg("Calling isert_connect_release for final kref %s/%d\n",
|
|
current->comm, current->pid);
|
|
current->comm, current->pid);
|
|
|
|
|
|
isert_connect_release(isert_conn);
|
|
isert_connect_release(isert_conn);
|
|
@@ -812,16 +816,16 @@ isert_conn_terminate(struct isert_conn *isert_conn)
|
|
break;
|
|
break;
|
|
case ISER_CONN_UP:
|
|
case ISER_CONN_UP:
|
|
case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
|
|
case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
|
|
- pr_info("Terminating conn %p state %d\n",
|
|
|
|
|
|
+ isert_info("Terminating conn %p state %d\n",
|
|
isert_conn, isert_conn->state);
|
|
isert_conn, isert_conn->state);
|
|
isert_conn->state = ISER_CONN_TERMINATING;
|
|
isert_conn->state = ISER_CONN_TERMINATING;
|
|
err = rdma_disconnect(isert_conn->conn_cm_id);
|
|
err = rdma_disconnect(isert_conn->conn_cm_id);
|
|
if (err)
|
|
if (err)
|
|
- pr_warn("Failed rdma_disconnect isert_conn %p\n",
|
|
|
|
|
|
+ isert_warn("Failed rdma_disconnect isert_conn %p\n",
|
|
isert_conn);
|
|
isert_conn);
|
|
break;
|
|
break;
|
|
default:
|
|
default:
|
|
- pr_warn("conn %p teminating in state %d\n",
|
|
|
|
|
|
+ isert_warn("conn %p teminating in state %d\n",
|
|
isert_conn, isert_conn->state);
|
|
isert_conn, isert_conn->state);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -830,7 +834,7 @@ static int
|
|
isert_np_cma_handler(struct isert_np *isert_np,
|
|
isert_np_cma_handler(struct isert_np *isert_np,
|
|
enum rdma_cm_event_type event)
|
|
enum rdma_cm_event_type event)
|
|
{
|
|
{
|
|
- pr_debug("isert np %p, handling event %d\n", isert_np, event);
|
|
|
|
|
|
+ isert_dbg("isert np %p, handling event %d\n", isert_np, event);
|
|
|
|
|
|
switch (event) {
|
|
switch (event) {
|
|
case RDMA_CM_EVENT_DEVICE_REMOVAL:
|
|
case RDMA_CM_EVENT_DEVICE_REMOVAL:
|
|
@@ -839,13 +843,13 @@ isert_np_cma_handler(struct isert_np *isert_np,
|
|
case RDMA_CM_EVENT_ADDR_CHANGE:
|
|
case RDMA_CM_EVENT_ADDR_CHANGE:
|
|
isert_np->np_cm_id = isert_setup_id(isert_np);
|
|
isert_np->np_cm_id = isert_setup_id(isert_np);
|
|
if (IS_ERR(isert_np->np_cm_id)) {
|
|
if (IS_ERR(isert_np->np_cm_id)) {
|
|
- pr_err("isert np %p setup id failed: %ld\n",
|
|
|
|
- isert_np, PTR_ERR(isert_np->np_cm_id));
|
|
|
|
|
|
+ isert_err("isert np %p setup id failed: %ld\n",
|
|
|
|
+ isert_np, PTR_ERR(isert_np->np_cm_id));
|
|
isert_np->np_cm_id = NULL;
|
|
isert_np->np_cm_id = NULL;
|
|
}
|
|
}
|
|
break;
|
|
break;
|
|
default:
|
|
default:
|
|
- pr_err("isert np %p Unexpected event %d\n",
|
|
|
|
|
|
+ isert_err("isert np %p Unexpected event %d\n",
|
|
isert_np, event);
|
|
isert_np, event);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -868,7 +872,7 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id,
|
|
isert_conn_terminate(isert_conn);
|
|
isert_conn_terminate(isert_conn);
|
|
mutex_unlock(&isert_conn->conn_mutex);
|
|
mutex_unlock(&isert_conn->conn_mutex);
|
|
|
|
|
|
- pr_info("conn %p completing conn_wait\n", isert_conn);
|
|
|
|
|
|
+ isert_info("conn %p completing conn_wait\n", isert_conn);
|
|
complete(&isert_conn->conn_wait);
|
|
complete(&isert_conn->conn_wait);
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
@@ -887,14 +891,14 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
|
|
{
|
|
{
|
|
int ret = 0;
|
|
int ret = 0;
|
|
|
|
|
|
- pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
|
|
|
|
|
|
+ isert_dbg("isert_cma_handler: event %d status %d conn %p id %p\n",
|
|
event->event, event->status, cma_id->context, cma_id);
|
|
event->event, event->status, cma_id->context, cma_id);
|
|
|
|
|
|
switch (event->event) {
|
|
switch (event->event) {
|
|
case RDMA_CM_EVENT_CONNECT_REQUEST:
|
|
case RDMA_CM_EVENT_CONNECT_REQUEST:
|
|
ret = isert_connect_request(cma_id, event);
|
|
ret = isert_connect_request(cma_id, event);
|
|
if (ret)
|
|
if (ret)
|
|
- pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
|
|
|
|
|
|
+ isert_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
|
|
event->event, ret);
|
|
event->event, ret);
|
|
break;
|
|
break;
|
|
case RDMA_CM_EVENT_ESTABLISHED:
|
|
case RDMA_CM_EVENT_ESTABLISHED:
|
|
@@ -912,7 +916,7 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
|
|
isert_connect_error(cma_id);
|
|
isert_connect_error(cma_id);
|
|
break;
|
|
break;
|
|
default:
|
|
default:
|
|
- pr_err("Unhandled RDMA CMA event: %d\n", event->event);
|
|
|
|
|
|
+ isert_err("Unhandled RDMA CMA event: %d\n", event->event);
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -943,10 +947,10 @@ isert_post_recv(struct isert_conn *isert_conn, u32 count)
|
|
ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr,
|
|
ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr,
|
|
&rx_wr_failed);
|
|
&rx_wr_failed);
|
|
if (ret) {
|
|
if (ret) {
|
|
- pr_err("ib_post_recv() failed with ret: %d\n", ret);
|
|
|
|
|
|
+ isert_err("ib_post_recv() failed with ret: %d\n", ret);
|
|
isert_conn->post_recv_buf_count -= count;
|
|
isert_conn->post_recv_buf_count -= count;
|
|
} else {
|
|
} else {
|
|
- pr_debug("isert_post_recv(): Posted %d RX buffers\n", count);
|
|
|
|
|
|
+ isert_dbg("isert_post_recv(): Posted %d RX buffers\n", count);
|
|
isert_conn->conn_rx_desc_head = rx_head;
|
|
isert_conn->conn_rx_desc_head = rx_head;
|
|
}
|
|
}
|
|
return ret;
|
|
return ret;
|
|
@@ -971,7 +975,7 @@ isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
|
|
|
|
|
|
ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed);
|
|
ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed);
|
|
if (ret)
|
|
if (ret)
|
|
- pr_err("ib_post_send() failed, ret: %d\n", ret);
|
|
|
|
|
|
+ isert_err("ib_post_send() failed, ret: %d\n", ret);
|
|
|
|
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
@@ -994,7 +998,7 @@ isert_create_send_desc(struct isert_conn *isert_conn,
|
|
|
|
|
|
if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) {
|
|
if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) {
|
|
tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
|
|
tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
|
|
- pr_debug("tx_desc %p lkey mismatch, fixing\n", tx_desc);
|
|
|
|
|
|
+ isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1008,7 +1012,7 @@ isert_init_tx_hdrs(struct isert_conn *isert_conn,
|
|
dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
|
|
dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
|
|
ISER_HEADERS_LEN, DMA_TO_DEVICE);
|
|
ISER_HEADERS_LEN, DMA_TO_DEVICE);
|
|
if (ib_dma_mapping_error(ib_dev, dma_addr)) {
|
|
if (ib_dma_mapping_error(ib_dev, dma_addr)) {
|
|
- pr_err("ib_dma_mapping_error() failed\n");
|
|
|
|
|
|
+ isert_err("ib_dma_mapping_error() failed\n");
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1017,7 +1021,7 @@ isert_init_tx_hdrs(struct isert_conn *isert_conn,
|
|
tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
|
|
tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
|
|
tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
|
|
tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
|
|
|
|
|
|
- pr_debug("isert_init_tx_hdrs: Setup tx_sg[0].addr: 0x%llx length: %u"
|
|
|
|
|
|
+ isert_dbg("isert_init_tx_hdrs: Setup tx_sg[0].addr: 0x%llx length: %u"
|
|
" lkey: 0x%08x\n", tx_desc->tx_sg[0].addr,
|
|
" lkey: 0x%08x\n", tx_desc->tx_sg[0].addr,
|
|
tx_desc->tx_sg[0].length, tx_desc->tx_sg[0].lkey);
|
|
tx_desc->tx_sg[0].length, tx_desc->tx_sg[0].lkey);
|
|
|
|
|
|
@@ -1050,7 +1054,7 @@ isert_rdma_post_recvl(struct isert_conn *isert_conn)
|
|
sge.length = ISER_RX_LOGIN_SIZE;
|
|
sge.length = ISER_RX_LOGIN_SIZE;
|
|
sge.lkey = isert_conn->conn_mr->lkey;
|
|
sge.lkey = isert_conn->conn_mr->lkey;
|
|
|
|
|
|
- pr_debug("Setup sge: addr: %llx length: %d 0x%08x\n",
|
|
|
|
|
|
+ isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
|
|
sge.addr, sge.length, sge.lkey);
|
|
sge.addr, sge.length, sge.lkey);
|
|
|
|
|
|
memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
|
|
memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
|
|
@@ -1061,11 +1065,11 @@ isert_rdma_post_recvl(struct isert_conn *isert_conn)
|
|
isert_conn->post_recv_buf_count++;
|
|
isert_conn->post_recv_buf_count++;
|
|
ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail);
|
|
ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail);
|
|
if (ret) {
|
|
if (ret) {
|
|
- pr_err("ib_post_recv() failed: %d\n", ret);
|
|
|
|
|
|
+ isert_err("ib_post_recv() failed: %d\n", ret);
|
|
isert_conn->post_recv_buf_count--;
|
|
isert_conn->post_recv_buf_count--;
|
|
}
|
|
}
|
|
|
|
|
|
- pr_debug("ib_post_recv(): returned success >>>>>>>>>>>>>>>>>>>>>>>>\n");
|
|
|
|
|
|
+ isert_dbg("ib_post_recv(): returned success >>>>>>>>>>>>>>>>>>>>>>>>\n");
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1107,7 +1111,7 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
|
|
isert_conn->conn_device->use_fastreg) {
|
|
isert_conn->conn_device->use_fastreg) {
|
|
ret = isert_conn_create_fastreg_pool(isert_conn);
|
|
ret = isert_conn_create_fastreg_pool(isert_conn);
|
|
if (ret) {
|
|
if (ret) {
|
|
- pr_err("Conn: %p failed to create"
|
|
|
|
|
|
+ isert_err("Conn: %p failed to create"
|
|
" fastreg pool\n", isert_conn);
|
|
" fastreg pool\n", isert_conn);
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
@@ -1149,7 +1153,7 @@ isert_rx_login_req(struct isert_conn *isert_conn)
|
|
struct iscsi_login *login = conn->conn_login;
|
|
struct iscsi_login *login = conn->conn_login;
|
|
int size;
|
|
int size;
|
|
|
|
|
|
- pr_info("conn %p\n", isert_conn);
|
|
|
|
|
|
+ isert_info("conn %p\n", isert_conn);
|
|
|
|
|
|
WARN_ON_ONCE(!login);
|
|
WARN_ON_ONCE(!login);
|
|
|
|
|
|
@@ -1177,7 +1181,7 @@ isert_rx_login_req(struct isert_conn *isert_conn)
|
|
memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
|
|
memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
|
|
|
|
|
|
size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
|
|
size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
|
|
- pr_debug("Using login payload size: %d, rx_buflen: %d MAX_KEY_VALUE_PAIRS: %d\n",
|
|
|
|
|
|
+ isert_dbg("Using login payload size: %d, rx_buflen: %d MAX_KEY_VALUE_PAIRS: %d\n",
|
|
size, rx_buflen, MAX_KEY_VALUE_PAIRS);
|
|
size, rx_buflen, MAX_KEY_VALUE_PAIRS);
|
|
memcpy(login->req_buf, &rx_desc->data[0], size);
|
|
memcpy(login->req_buf, &rx_desc->data[0], size);
|
|
|
|
|
|
@@ -1197,7 +1201,7 @@ static struct iscsi_cmd
|
|
|
|
|
|
cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
|
|
cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
|
|
if (!cmd) {
|
|
if (!cmd) {
|
|
- pr_err("Unable to allocate iscsi_cmd + isert_cmd\n");
|
|
|
|
|
|
+ isert_err("Unable to allocate iscsi_cmd + isert_cmd\n");
|
|
return NULL;
|
|
return NULL;
|
|
}
|
|
}
|
|
isert_cmd = iscsit_priv_cmd(cmd);
|
|
isert_cmd = iscsit_priv_cmd(cmd);
|
|
@@ -1240,7 +1244,7 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn,
|
|
sg = &cmd->se_cmd.t_data_sg[0];
|
|
sg = &cmd->se_cmd.t_data_sg[0];
|
|
sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
|
|
sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
|
|
|
|
|
|
- pr_debug("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
|
|
|
|
|
|
+ isert_dbg("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
|
|
sg, sg_nents, &rx_desc->data[0], imm_data_len);
|
|
sg, sg_nents, &rx_desc->data[0], imm_data_len);
|
|
|
|
|
|
sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len);
|
|
sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len);
|
|
@@ -1285,12 +1289,12 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
|
|
* FIXME: Unexpected unsolicited_data out
|
|
* FIXME: Unexpected unsolicited_data out
|
|
*/
|
|
*/
|
|
if (!cmd->unsolicited_data) {
|
|
if (!cmd->unsolicited_data) {
|
|
- pr_err("Received unexpected solicited data payload\n");
|
|
|
|
|
|
+ isert_err("Received unexpected solicited data payload\n");
|
|
dump_stack();
|
|
dump_stack();
|
|
return -1;
|
|
return -1;
|
|
}
|
|
}
|
|
|
|
|
|
- pr_debug("Unsolicited DataOut unsol_data_len: %u, write_data_done: %u, data_length: %u\n",
|
|
|
|
|
|
+ isert_dbg("Unsolicited DataOut unsol_data_len: %u, write_data_done: %u, data_length: %u\n",
|
|
unsol_data_len, cmd->write_data_done, cmd->se_cmd.data_length);
|
|
unsol_data_len, cmd->write_data_done, cmd->se_cmd.data_length);
|
|
|
|
|
|
sg_off = cmd->write_data_done / PAGE_SIZE;
|
|
sg_off = cmd->write_data_done / PAGE_SIZE;
|
|
@@ -1301,11 +1305,11 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
|
|
* FIXME: Non page-aligned unsolicited_data out
|
|
* FIXME: Non page-aligned unsolicited_data out
|
|
*/
|
|
*/
|
|
if (page_off) {
|
|
if (page_off) {
|
|
- pr_err("Received unexpected non-page aligned data payload\n");
|
|
|
|
|
|
+ isert_err("Received unexpected non-page aligned data payload\n");
|
|
dump_stack();
|
|
dump_stack();
|
|
return -1;
|
|
return -1;
|
|
}
|
|
}
|
|
- pr_debug("Copying DataOut: sg_start: %p, sg_off: %u sg_nents: %u from %p %u\n",
|
|
|
|
|
|
+ isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u sg_nents: %u from %p %u\n",
|
|
sg_start, sg_off, sg_nents, &rx_desc->data[0], unsol_data_len);
|
|
sg_start, sg_off, sg_nents, &rx_desc->data[0], unsol_data_len);
|
|
|
|
|
|
sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
|
|
sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
|
|
@@ -1353,7 +1357,7 @@ isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd
|
|
|
|
|
|
text_in = kzalloc(payload_length, GFP_KERNEL);
|
|
text_in = kzalloc(payload_length, GFP_KERNEL);
|
|
if (!text_in) {
|
|
if (!text_in) {
|
|
- pr_err("Unable to allocate text_in of payload_length: %u\n",
|
|
|
|
|
|
+ isert_err("Unable to allocate text_in of payload_length: %u\n",
|
|
payload_length);
|
|
payload_length);
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
}
|
|
}
|
|
@@ -1379,7 +1383,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
|
|
|
|
|
|
if (sess->sess_ops->SessionType &&
|
|
if (sess->sess_ops->SessionType &&
|
|
(!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
|
|
(!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
|
|
- pr_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
|
|
|
|
|
|
+ isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
|
|
" ignoring\n", opcode);
|
|
" ignoring\n", opcode);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -1437,7 +1441,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
|
|
rx_desc, (struct iscsi_text *)hdr);
|
|
rx_desc, (struct iscsi_text *)hdr);
|
|
break;
|
|
break;
|
|
default:
|
|
default:
|
|
- pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
|
|
|
|
|
|
+ isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
|
|
dump_stack();
|
|
dump_stack();
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
@@ -1458,23 +1462,23 @@ isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
|
|
if (iser_hdr->flags & ISER_RSV) {
|
|
if (iser_hdr->flags & ISER_RSV) {
|
|
read_stag = be32_to_cpu(iser_hdr->read_stag);
|
|
read_stag = be32_to_cpu(iser_hdr->read_stag);
|
|
read_va = be64_to_cpu(iser_hdr->read_va);
|
|
read_va = be64_to_cpu(iser_hdr->read_va);
|
|
- pr_debug("ISER_RSV: read_stag: 0x%08x read_va: 0x%16llx\n",
|
|
|
|
|
|
+ isert_dbg("ISER_RSV: read_stag: 0x%08x read_va: 0x%16llx\n",
|
|
read_stag, (unsigned long long)read_va);
|
|
read_stag, (unsigned long long)read_va);
|
|
}
|
|
}
|
|
if (iser_hdr->flags & ISER_WSV) {
|
|
if (iser_hdr->flags & ISER_WSV) {
|
|
write_stag = be32_to_cpu(iser_hdr->write_stag);
|
|
write_stag = be32_to_cpu(iser_hdr->write_stag);
|
|
write_va = be64_to_cpu(iser_hdr->write_va);
|
|
write_va = be64_to_cpu(iser_hdr->write_va);
|
|
- pr_debug("ISER_WSV: write__stag: 0x%08x write_va: 0x%16llx\n",
|
|
|
|
|
|
+ isert_dbg("ISER_WSV: write__stag: 0x%08x write_va: 0x%16llx\n",
|
|
write_stag, (unsigned long long)write_va);
|
|
write_stag, (unsigned long long)write_va);
|
|
}
|
|
}
|
|
|
|
|
|
- pr_debug("ISER ISCSI_CTRL PDU\n");
|
|
|
|
|
|
+ isert_dbg("ISER ISCSI_CTRL PDU\n");
|
|
break;
|
|
break;
|
|
case ISER_HELLO:
|
|
case ISER_HELLO:
|
|
- pr_err("iSER Hello message\n");
|
|
|
|
|
|
+ isert_err("iSER Hello message\n");
|
|
break;
|
|
break;
|
|
default:
|
|
default:
|
|
- pr_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
|
|
|
|
|
|
+ isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1494,19 +1498,19 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
|
|
if ((char *)desc == isert_conn->login_req_buf) {
|
|
if ((char *)desc == isert_conn->login_req_buf) {
|
|
rx_dma = isert_conn->login_req_dma;
|
|
rx_dma = isert_conn->login_req_dma;
|
|
rx_buflen = ISER_RX_LOGIN_SIZE;
|
|
rx_buflen = ISER_RX_LOGIN_SIZE;
|
|
- pr_debug("ISER login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
|
|
|
|
|
|
+ isert_dbg("ISER login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
|
|
rx_dma, rx_buflen);
|
|
rx_dma, rx_buflen);
|
|
} else {
|
|
} else {
|
|
rx_dma = desc->dma_addr;
|
|
rx_dma = desc->dma_addr;
|
|
rx_buflen = ISER_RX_PAYLOAD_SIZE;
|
|
rx_buflen = ISER_RX_PAYLOAD_SIZE;
|
|
- pr_debug("ISER req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
|
|
|
|
|
|
+ isert_dbg("ISER req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
|
|
rx_dma, rx_buflen);
|
|
rx_dma, rx_buflen);
|
|
}
|
|
}
|
|
|
|
|
|
ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE);
|
|
ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE);
|
|
|
|
|
|
hdr = &desc->iscsi_header;
|
|
hdr = &desc->iscsi_header;
|
|
- pr_debug("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
|
|
|
|
|
|
+ isert_dbg("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
|
|
hdr->opcode, hdr->itt, hdr->flags,
|
|
hdr->opcode, hdr->itt, hdr->flags,
|
|
(int)(xfer_len - ISER_HEADERS_LEN));
|
|
(int)(xfer_len - ISER_HEADERS_LEN));
|
|
|
|
|
|
@@ -1529,7 +1533,7 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
|
|
DMA_FROM_DEVICE);
|
|
DMA_FROM_DEVICE);
|
|
|
|
|
|
isert_conn->post_recv_buf_count--;
|
|
isert_conn->post_recv_buf_count--;
|
|
- pr_debug("iSERT: Decremented post_recv_buf_count: %d\n",
|
|
|
|
|
|
+ isert_dbg("iSERT: Decremented post_recv_buf_count: %d\n",
|
|
isert_conn->post_recv_buf_count);
|
|
isert_conn->post_recv_buf_count);
|
|
|
|
|
|
if ((char *)desc == isert_conn->login_req_buf)
|
|
if ((char *)desc == isert_conn->login_req_buf)
|
|
@@ -1541,7 +1545,7 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
|
|
ISERT_MIN_POSTED_RX);
|
|
ISERT_MIN_POSTED_RX);
|
|
err = isert_post_recv(isert_conn, count);
|
|
err = isert_post_recv(isert_conn, count);
|
|
if (err) {
|
|
if (err) {
|
|
- pr_err("isert_post_recv() count: %d failed, %d\n",
|
|
|
|
|
|
+ isert_err("isert_post_recv() count: %d failed, %d\n",
|
|
count, err);
|
|
count, err);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -1570,11 +1574,11 @@ isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
|
|
data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents,
|
|
data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents,
|
|
data->dma_dir);
|
|
data->dma_dir);
|
|
if (unlikely(!data->dma_nents)) {
|
|
if (unlikely(!data->dma_nents)) {
|
|
- pr_err("Cmd: unable to dma map SGs %p\n", sg);
|
|
|
|
|
|
+ isert_err("Cmd: unable to dma map SGs %p\n", sg);
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
|
|
|
|
- pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
|
|
|
|
|
|
+ isert_dbg("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
|
|
isert_cmd, data->dma_nents, data->sg, data->nents, data->len);
|
|
isert_cmd, data->dma_nents, data->sg, data->nents, data->len);
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
@@ -1596,21 +1600,21 @@ isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
|
|
{
|
|
{
|
|
struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
|
|
struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
|
|
|
|
|
|
- pr_debug("isert_unmap_cmd: %p\n", isert_cmd);
|
|
|
|
|
|
+ isert_dbg("isert_unmap_cmd: %p\n", isert_cmd);
|
|
|
|
|
|
if (wr->data.sg) {
|
|
if (wr->data.sg) {
|
|
- pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd);
|
|
|
|
|
|
+ isert_dbg("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd);
|
|
isert_unmap_data_buf(isert_conn, &wr->data);
|
|
isert_unmap_data_buf(isert_conn, &wr->data);
|
|
}
|
|
}
|
|
|
|
|
|
if (wr->send_wr) {
|
|
if (wr->send_wr) {
|
|
- pr_debug("isert_unmap_cmd: %p free send_wr\n", isert_cmd);
|
|
|
|
|
|
+ isert_dbg("isert_unmap_cmd: %p free send_wr\n", isert_cmd);
|
|
kfree(wr->send_wr);
|
|
kfree(wr->send_wr);
|
|
wr->send_wr = NULL;
|
|
wr->send_wr = NULL;
|
|
}
|
|
}
|
|
|
|
|
|
if (wr->ib_sge) {
|
|
if (wr->ib_sge) {
|
|
- pr_debug("isert_unmap_cmd: %p free ib_sge\n", isert_cmd);
|
|
|
|
|
|
+ isert_dbg("isert_unmap_cmd: %p free ib_sge\n", isert_cmd);
|
|
kfree(wr->ib_sge);
|
|
kfree(wr->ib_sge);
|
|
wr->ib_sge = NULL;
|
|
wr->ib_sge = NULL;
|
|
}
|
|
}
|
|
@@ -1622,10 +1626,10 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
|
|
struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
|
|
struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
|
|
LIST_HEAD(unmap_list);
|
|
LIST_HEAD(unmap_list);
|
|
|
|
|
|
- pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd);
|
|
|
|
|
|
+ isert_dbg("unreg_fastreg_cmd: %p\n", isert_cmd);
|
|
|
|
|
|
if (wr->fr_desc) {
|
|
if (wr->fr_desc) {
|
|
- pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n",
|
|
|
|
|
|
+ isert_dbg("unreg_fastreg_cmd: %p free fr_desc %p\n",
|
|
isert_cmd, wr->fr_desc);
|
|
isert_cmd, wr->fr_desc);
|
|
if (wr->fr_desc->ind & ISERT_PROTECTED) {
|
|
if (wr->fr_desc->ind & ISERT_PROTECTED) {
|
|
isert_unmap_data_buf(isert_conn, &wr->prot);
|
|
isert_unmap_data_buf(isert_conn, &wr->prot);
|
|
@@ -1638,7 +1642,7 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
|
|
}
|
|
}
|
|
|
|
|
|
if (wr->data.sg) {
|
|
if (wr->data.sg) {
|
|
- pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd);
|
|
|
|
|
|
+ isert_dbg("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd);
|
|
isert_unmap_data_buf(isert_conn, &wr->data);
|
|
isert_unmap_data_buf(isert_conn, &wr->data);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1654,7 +1658,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
|
|
struct iscsi_conn *conn = isert_conn->conn;
|
|
struct iscsi_conn *conn = isert_conn->conn;
|
|
struct isert_device *device = isert_conn->conn_device;
|
|
struct isert_device *device = isert_conn->conn_device;
|
|
|
|
|
|
- pr_debug("Entering isert_put_cmd: %p\n", isert_cmd);
|
|
|
|
|
|
+ isert_dbg("Entering isert_put_cmd: %p\n", isert_cmd);
|
|
|
|
|
|
switch (cmd->iscsi_opcode) {
|
|
switch (cmd->iscsi_opcode) {
|
|
case ISCSI_OP_SCSI_CMD:
|
|
case ISCSI_OP_SCSI_CMD:
|
|
@@ -1704,7 +1708,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
|
|
* associated cmd->se_cmd needs to be released.
|
|
* associated cmd->se_cmd needs to be released.
|
|
*/
|
|
*/
|
|
if (cmd->se_cmd.se_tfo != NULL) {
|
|
if (cmd->se_cmd.se_tfo != NULL) {
|
|
- pr_debug("Calling transport_generic_free_cmd from"
|
|
|
|
|
|
+ isert_dbg("Calling transport_generic_free_cmd from"
|
|
" isert_put_cmd for 0x%02x\n",
|
|
" isert_put_cmd for 0x%02x\n",
|
|
cmd->iscsi_opcode);
|
|
cmd->iscsi_opcode);
|
|
transport_generic_free_cmd(&cmd->se_cmd, 0);
|
|
transport_generic_free_cmd(&cmd->se_cmd, 0);
|
|
@@ -1723,7 +1727,7 @@ static void
|
|
isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
|
|
isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
|
|
{
|
|
{
|
|
if (tx_desc->dma_addr != 0) {
|
|
if (tx_desc->dma_addr != 0) {
|
|
- pr_debug("Calling ib_dma_unmap_single for tx_desc->dma_addr\n");
|
|
|
|
|
|
+ isert_dbg("Calling ib_dma_unmap_single for tx_desc->dma_addr\n");
|
|
ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
|
|
ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
|
|
ISER_HEADERS_LEN, DMA_TO_DEVICE);
|
|
ISER_HEADERS_LEN, DMA_TO_DEVICE);
|
|
tx_desc->dma_addr = 0;
|
|
tx_desc->dma_addr = 0;
|
|
@@ -1735,7 +1739,7 @@ isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
|
|
struct ib_device *ib_dev, bool comp_err)
|
|
struct ib_device *ib_dev, bool comp_err)
|
|
{
|
|
{
|
|
if (isert_cmd->pdu_buf_dma != 0) {
|
|
if (isert_cmd->pdu_buf_dma != 0) {
|
|
- pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n");
|
|
|
|
|
|
+ isert_dbg("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n");
|
|
ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma,
|
|
ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma,
|
|
isert_cmd->pdu_buf_len, DMA_TO_DEVICE);
|
|
isert_cmd->pdu_buf_len, DMA_TO_DEVICE);
|
|
isert_cmd->pdu_buf_dma = 0;
|
|
isert_cmd->pdu_buf_dma = 0;
|
|
@@ -1753,7 +1757,7 @@ isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
|
|
|
|
|
|
ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
|
|
ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
|
|
if (ret) {
|
|
if (ret) {
|
|
- pr_err("ib_check_mr_status failed, ret %d\n", ret);
|
|
|
|
|
|
+ isert_err("ib_check_mr_status failed, ret %d\n", ret);
|
|
goto fail_mr_status;
|
|
goto fail_mr_status;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1776,7 +1780,7 @@ isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
|
|
do_div(sec_offset_err, block_size);
|
|
do_div(sec_offset_err, block_size);
|
|
se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba;
|
|
se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba;
|
|
|
|
|
|
- pr_err("isert: PI error found type %d at sector 0x%llx "
|
|
|
|
|
|
+ isert_err("isert: PI error found type %d at sector 0x%llx "
|
|
"expected 0x%x vs actual 0x%x\n",
|
|
"expected 0x%x vs actual 0x%x\n",
|
|
mr_status.sig_err.err_type,
|
|
mr_status.sig_err.err_type,
|
|
(unsigned long long)se_cmd->bad_sector,
|
|
(unsigned long long)se_cmd->bad_sector,
|
|
@@ -1837,7 +1841,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
|
|
cmd->write_data_done = wr->data.len;
|
|
cmd->write_data_done = wr->data.len;
|
|
wr->send_wr_num = 0;
|
|
wr->send_wr_num = 0;
|
|
|
|
|
|
- pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
|
|
|
|
|
|
+ isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
|
|
spin_lock_bh(&cmd->istate_lock);
|
|
spin_lock_bh(&cmd->istate_lock);
|
|
cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
|
|
cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
|
|
cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
|
|
cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
|
|
@@ -1861,7 +1865,7 @@ isert_do_control_comp(struct work_struct *work)
|
|
|
|
|
|
switch (cmd->i_state) {
|
|
switch (cmd->i_state) {
|
|
case ISTATE_SEND_TASKMGTRSP:
|
|
case ISTATE_SEND_TASKMGTRSP:
|
|
- pr_debug("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n");
|
|
|
|
|
|
+ isert_dbg("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n");
|
|
|
|
|
|
iscsit_tmr_post_handler(cmd, cmd->conn);
|
|
iscsit_tmr_post_handler(cmd, cmd->conn);
|
|
|
|
|
|
@@ -1869,13 +1873,13 @@ isert_do_control_comp(struct work_struct *work)
|
|
isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
|
|
isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
|
|
break;
|
|
break;
|
|
case ISTATE_SEND_REJECT:
|
|
case ISTATE_SEND_REJECT:
|
|
- pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
|
|
|
|
|
|
+ isert_dbg("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
|
|
|
|
|
|
cmd->i_state = ISTATE_SENT_STATUS;
|
|
cmd->i_state = ISTATE_SENT_STATUS;
|
|
isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
|
|
isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
|
|
break;
|
|
break;
|
|
case ISTATE_SEND_LOGOUTRSP:
|
|
case ISTATE_SEND_LOGOUTRSP:
|
|
- pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
|
|
|
|
|
|
+ isert_dbg("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
|
|
|
|
|
|
iscsit_logout_post_handler(cmd, cmd->conn);
|
|
iscsit_logout_post_handler(cmd, cmd->conn);
|
|
break;
|
|
break;
|
|
@@ -1884,7 +1888,7 @@ isert_do_control_comp(struct work_struct *work)
|
|
isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
|
|
isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
|
|
break;
|
|
break;
|
|
default:
|
|
default:
|
|
- pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state);
|
|
|
|
|
|
+ isert_err("Unknown do_control_comp i_state %d\n", cmd->i_state);
|
|
dump_stack();
|
|
dump_stack();
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
@@ -1929,25 +1933,25 @@ isert_send_completion(struct iser_tx_desc *tx_desc,
|
|
|
|
|
|
switch (wr->iser_ib_op) {
|
|
switch (wr->iser_ib_op) {
|
|
case ISER_IB_RECV:
|
|
case ISER_IB_RECV:
|
|
- pr_err("isert_send_completion: Got ISER_IB_RECV\n");
|
|
|
|
|
|
+ isert_err("isert_send_completion: Got ISER_IB_RECV\n");
|
|
dump_stack();
|
|
dump_stack();
|
|
break;
|
|
break;
|
|
case ISER_IB_SEND:
|
|
case ISER_IB_SEND:
|
|
- pr_debug("isert_send_completion: Got ISER_IB_SEND\n");
|
|
|
|
|
|
+ isert_dbg("isert_send_completion: Got ISER_IB_SEND\n");
|
|
isert_response_completion(tx_desc, isert_cmd,
|
|
isert_response_completion(tx_desc, isert_cmd,
|
|
isert_conn, ib_dev);
|
|
isert_conn, ib_dev);
|
|
break;
|
|
break;
|
|
case ISER_IB_RDMA_WRITE:
|
|
case ISER_IB_RDMA_WRITE:
|
|
- pr_debug("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
|
|
|
|
|
|
+ isert_dbg("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
|
|
isert_completion_rdma_write(tx_desc, isert_cmd);
|
|
isert_completion_rdma_write(tx_desc, isert_cmd);
|
|
break;
|
|
break;
|
|
case ISER_IB_RDMA_READ:
|
|
case ISER_IB_RDMA_READ:
|
|
- pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
|
|
|
|
|
|
+ isert_dbg("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
|
|
|
|
|
|
isert_completion_rdma_read(tx_desc, isert_cmd);
|
|
isert_completion_rdma_read(tx_desc, isert_cmd);
|
|
break;
|
|
break;
|
|
default:
|
|
default:
|
|
- pr_err("Unknown wr->iser_ib_op: 0x%02x\n", wr->iser_ib_op);
|
|
|
|
|
|
+ isert_err("Unknown wr->iser_ib_op: 0x%02x\n", wr->iser_ib_op);
|
|
dump_stack();
|
|
dump_stack();
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
@@ -1980,7 +1984,7 @@ static void
|
|
isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc)
|
|
isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc)
|
|
{
|
|
{
|
|
if (wc->wr_id == ISER_BEACON_WRID) {
|
|
if (wc->wr_id == ISER_BEACON_WRID) {
|
|
- pr_info("conn %p completing conn_wait_comp_err\n",
|
|
|
|
|
|
+ isert_info("conn %p completing conn_wait_comp_err\n",
|
|
isert_conn);
|
|
isert_conn);
|
|
complete(&isert_conn->conn_wait_comp_err);
|
|
complete(&isert_conn->conn_wait_comp_err);
|
|
} else
|
|
} else
|
|
@@ -2020,10 +2024,10 @@ isert_handle_wc(struct ib_wc *wc)
|
|
}
|
|
}
|
|
} else {
|
|
} else {
|
|
if (wc->status != IB_WC_WR_FLUSH_ERR)
|
|
if (wc->status != IB_WC_WR_FLUSH_ERR)
|
|
- pr_err("wr id %llx status %d vend_err %x\n",
|
|
|
|
|
|
+ isert_err("wr id %llx status %d vend_err %x\n",
|
|
wc->wr_id, wc->status, wc->vendor_err);
|
|
wc->wr_id, wc->status, wc->vendor_err);
|
|
else
|
|
else
|
|
- pr_debug("flush error: wr id %llx\n", wc->wr_id);
|
|
|
|
|
|
+ isert_dbg("flush error: wr id %llx\n", wc->wr_id);
|
|
|
|
|
|
if (wc->wr_id != ISER_FASTREG_LI_WRID)
|
|
if (wc->wr_id != ISER_FASTREG_LI_WRID)
|
|
isert_cq_comp_err(isert_conn, wc);
|
|
isert_cq_comp_err(isert_conn, wc);
|
|
@@ -2068,7 +2072,7 @@ isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
|
|
ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr,
|
|
ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr,
|
|
&wr_failed);
|
|
&wr_failed);
|
|
if (ret) {
|
|
if (ret) {
|
|
- pr_err("ib_post_send failed with %d\n", ret);
|
|
|
|
|
|
+ isert_err("ib_post_send failed with %d\n", ret);
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
return ret;
|
|
return ret;
|
|
@@ -2117,7 +2121,7 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
|
|
|
|
|
|
isert_init_send_wr(isert_conn, isert_cmd, send_wr);
|
|
isert_init_send_wr(isert_conn, isert_cmd, send_wr);
|
|
|
|
|
|
- pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
|
|
|
|
|
|
+ isert_dbg("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
|
|
|
|
|
|
return isert_post_response(isert_conn, isert_cmd);
|
|
return isert_post_response(isert_conn, isert_cmd);
|
|
}
|
|
}
|
|
@@ -2148,13 +2152,13 @@ isert_get_sup_prot_ops(struct iscsi_conn *conn)
|
|
|
|
|
|
if (conn->tpg->tpg_attrib.t10_pi) {
|
|
if (conn->tpg->tpg_attrib.t10_pi) {
|
|
if (device->pi_capable) {
|
|
if (device->pi_capable) {
|
|
- pr_info("conn %p PI offload enabled\n", isert_conn);
|
|
|
|
|
|
+ isert_info("conn %p PI offload enabled\n", isert_conn);
|
|
isert_conn->pi_support = true;
|
|
isert_conn->pi_support = true;
|
|
return TARGET_PROT_ALL;
|
|
return TARGET_PROT_ALL;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
- pr_info("conn %p PI offload disabled\n", isert_conn);
|
|
|
|
|
|
+ isert_info("conn %p PI offload disabled\n", isert_conn);
|
|
isert_conn->pi_support = false;
|
|
isert_conn->pi_support = false;
|
|
|
|
|
|
return TARGET_PROT_NORMAL;
|
|
return TARGET_PROT_NORMAL;
|
|
@@ -2175,7 +2179,7 @@ isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
|
|
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
|
|
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
|
|
isert_init_send_wr(isert_conn, isert_cmd, send_wr);
|
|
isert_init_send_wr(isert_conn, isert_cmd, send_wr);
|
|
|
|
|
|
- pr_debug("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
|
|
|
|
|
|
+ isert_dbg("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
|
|
|
|
|
|
return isert_post_response(isert_conn, isert_cmd);
|
|
return isert_post_response(isert_conn, isert_cmd);
|
|
}
|
|
}
|
|
@@ -2193,7 +2197,7 @@ isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
|
|
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
|
|
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
|
|
isert_init_send_wr(isert_conn, isert_cmd, send_wr);
|
|
isert_init_send_wr(isert_conn, isert_cmd, send_wr);
|
|
|
|
|
|
- pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
|
|
|
|
|
|
+ isert_dbg("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
|
|
|
|
|
|
return isert_post_response(isert_conn, isert_cmd);
|
|
return isert_post_response(isert_conn, isert_cmd);
|
|
}
|
|
}
|
|
@@ -2211,7 +2215,7 @@ isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
|
|
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
|
|
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
|
|
isert_init_send_wr(isert_conn, isert_cmd, send_wr);
|
|
isert_init_send_wr(isert_conn, isert_cmd, send_wr);
|
|
|
|
|
|
- pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
|
|
|
|
|
|
+ isert_dbg("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
|
|
|
|
|
|
return isert_post_response(isert_conn, isert_cmd);
|
|
return isert_post_response(isert_conn, isert_cmd);
|
|
}
|
|
}
|
|
@@ -2243,7 +2247,7 @@ isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
|
|
|
|
|
|
isert_init_send_wr(isert_conn, isert_cmd, send_wr);
|
|
isert_init_send_wr(isert_conn, isert_cmd, send_wr);
|
|
|
|
|
|
- pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
|
|
|
|
|
|
+ isert_dbg("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
|
|
|
|
|
|
return isert_post_response(isert_conn, isert_cmd);
|
|
return isert_post_response(isert_conn, isert_cmd);
|
|
}
|
|
}
|
|
@@ -2283,7 +2287,7 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
|
|
}
|
|
}
|
|
isert_init_send_wr(isert_conn, isert_cmd, send_wr);
|
|
isert_init_send_wr(isert_conn, isert_cmd, send_wr);
|
|
|
|
|
|
- pr_debug("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
|
|
|
|
|
|
+ isert_dbg("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
|
|
|
|
|
|
return isert_post_response(isert_conn, isert_cmd);
|
|
return isert_post_response(isert_conn, isert_cmd);
|
|
}
|
|
}
|
|
@@ -2311,7 +2315,7 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
|
|
* Perform mapping of TCM scatterlist memory ib_sge dma_addr.
|
|
* Perform mapping of TCM scatterlist memory ib_sge dma_addr.
|
|
*/
|
|
*/
|
|
for_each_sg(sg_start, tmp_sg, sg_nents, i) {
|
|
for_each_sg(sg_start, tmp_sg, sg_nents, i) {
|
|
- pr_debug("ISER RDMA from SGL dma_addr: 0x%16llx dma_len: %u, page_off: %u\n",
|
|
|
|
|
|
+ isert_dbg("ISER RDMA from SGL dma_addr: 0x%16llx dma_len: %u, page_off: %u\n",
|
|
(unsigned long long)tmp_sg->dma_address,
|
|
(unsigned long long)tmp_sg->dma_address,
|
|
tmp_sg->length, page_off);
|
|
tmp_sg->length, page_off);
|
|
|
|
|
|
@@ -2320,15 +2324,15 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
|
|
ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
|
|
ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
|
|
ib_sge->lkey = isert_conn->conn_mr->lkey;
|
|
ib_sge->lkey = isert_conn->conn_mr->lkey;
|
|
|
|
|
|
- pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n",
|
|
|
|
|
|
+ isert_dbg("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n",
|
|
ib_sge->addr, ib_sge->length, ib_sge->lkey);
|
|
ib_sge->addr, ib_sge->length, ib_sge->lkey);
|
|
page_off = 0;
|
|
page_off = 0;
|
|
data_left -= ib_sge->length;
|
|
data_left -= ib_sge->length;
|
|
ib_sge++;
|
|
ib_sge++;
|
|
- pr_debug("Incrementing ib_sge pointer to %p\n", ib_sge);
|
|
|
|
|
|
+ isert_dbg("Incrementing ib_sge pointer to %p\n", ib_sge);
|
|
}
|
|
}
|
|
|
|
|
|
- pr_debug("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
|
|
|
|
|
|
+ isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
|
|
send_wr->sg_list, send_wr->num_sge);
|
|
send_wr->sg_list, send_wr->num_sge);
|
|
|
|
|
|
return sg_nents;
|
|
return sg_nents;
|
|
@@ -2361,7 +2365,7 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
|
|
|
|
|
ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL);
|
|
ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL);
|
|
if (!ib_sge) {
|
|
if (!ib_sge) {
|
|
- pr_warn("Unable to allocate ib_sge\n");
|
|
|
|
|
|
+ isert_warn("Unable to allocate ib_sge\n");
|
|
ret = -ENOMEM;
|
|
ret = -ENOMEM;
|
|
goto unmap_cmd;
|
|
goto unmap_cmd;
|
|
}
|
|
}
|
|
@@ -2371,7 +2375,7 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
|
wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
|
|
wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
|
|
GFP_KERNEL);
|
|
GFP_KERNEL);
|
|
if (!wr->send_wr) {
|
|
if (!wr->send_wr) {
|
|
- pr_debug("Unable to allocate wr->send_wr\n");
|
|
|
|
|
|
+ isert_dbg("Unable to allocate wr->send_wr\n");
|
|
ret = -ENOMEM;
|
|
ret = -ENOMEM;
|
|
goto unmap_cmd;
|
|
goto unmap_cmd;
|
|
}
|
|
}
|
|
@@ -2435,7 +2439,7 @@ isert_map_fr_pagelist(struct ib_device *ib_dev,
|
|
chunk_start = start_addr;
|
|
chunk_start = start_addr;
|
|
end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg);
|
|
end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg);
|
|
|
|
|
|
- pr_debug("SGL[%d] dma_addr: 0x%16llx len: %u\n",
|
|
|
|
|
|
+ isert_dbg("SGL[%d] dma_addr: 0x%16llx len: %u\n",
|
|
i, (unsigned long long)tmp_sg->dma_address,
|
|
i, (unsigned long long)tmp_sg->dma_address,
|
|
tmp_sg->length);
|
|
tmp_sg->length);
|
|
|
|
|
|
@@ -2448,7 +2452,7 @@ isert_map_fr_pagelist(struct ib_device *ib_dev,
|
|
page = chunk_start & PAGE_MASK;
|
|
page = chunk_start & PAGE_MASK;
|
|
do {
|
|
do {
|
|
fr_pl[n_pages++] = page;
|
|
fr_pl[n_pages++] = page;
|
|
- pr_debug("Mapped page_list[%d] page_addr: 0x%16llx\n",
|
|
|
|
|
|
+ isert_dbg("Mapped page_list[%d] page_addr: 0x%16llx\n",
|
|
n_pages - 1, page);
|
|
n_pages - 1, page);
|
|
page += PAGE_SIZE;
|
|
page += PAGE_SIZE;
|
|
} while (page < end_addr);
|
|
} while (page < end_addr);
|
|
@@ -2477,7 +2481,7 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
|
|
sge->lkey = isert_conn->conn_mr->lkey;
|
|
sge->lkey = isert_conn->conn_mr->lkey;
|
|
sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]);
|
|
sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]);
|
|
sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]);
|
|
sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]);
|
|
- pr_debug("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n",
|
|
|
|
|
|
+ isert_dbg("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n",
|
|
__func__, __LINE__, sge->addr, sge->length,
|
|
__func__, __LINE__, sge->addr, sge->length,
|
|
sge->lkey);
|
|
sge->lkey);
|
|
return 0;
|
|
return 0;
|
|
@@ -2495,7 +2499,7 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
|
|
|
|
|
|
page_off = mem->offset % PAGE_SIZE;
|
|
page_off = mem->offset % PAGE_SIZE;
|
|
|
|
|
|
- pr_debug("Use fr_desc %p sg_nents %d offset %u\n",
|
|
|
|
|
|
+ isert_dbg("Use fr_desc %p sg_nents %d offset %u\n",
|
|
fr_desc, mem->nents, mem->offset);
|
|
fr_desc, mem->nents, mem->offset);
|
|
|
|
|
|
pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents,
|
|
pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents,
|
|
@@ -2531,7 +2535,7 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
|
|
|
|
|
|
ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
|
|
ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
|
|
if (ret) {
|
|
if (ret) {
|
|
- pr_err("fast registration failed, ret:%d\n", ret);
|
|
|
|
|
|
+ isert_err("fast registration failed, ret:%d\n", ret);
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
fr_desc->ind &= ~ind;
|
|
fr_desc->ind &= ~ind;
|
|
@@ -2540,7 +2544,7 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
|
|
sge->addr = frpl->page_list[0] + page_off;
|
|
sge->addr = frpl->page_list[0] + page_off;
|
|
sge->length = mem->len;
|
|
sge->length = mem->len;
|
|
|
|
|
|
- pr_debug("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n",
|
|
|
|
|
|
+ isert_dbg("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n",
|
|
__func__, __LINE__, sge->addr, sge->length,
|
|
__func__, __LINE__, sge->addr, sge->length,
|
|
sge->lkey);
|
|
sge->lkey);
|
|
|
|
|
|
@@ -2588,7 +2592,7 @@ isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
|
|
isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
|
|
isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
|
|
break;
|
|
break;
|
|
default:
|
|
default:
|
|
- pr_err("Unsupported PI operation %d\n", se_cmd->prot_op);
|
|
|
|
|
|
+ isert_err("Unsupported PI operation %d\n", se_cmd->prot_op);
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2652,7 +2656,7 @@ isert_reg_sig_mr(struct isert_conn *isert_conn,
|
|
|
|
|
|
ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
|
|
ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
|
|
if (ret) {
|
|
if (ret) {
|
|
- pr_err("fast registration failed, ret:%d\n", ret);
|
|
|
|
|
|
+ isert_err("fast registration failed, ret:%d\n", ret);
|
|
goto err;
|
|
goto err;
|
|
}
|
|
}
|
|
fr_desc->ind &= ~ISERT_SIG_KEY_VALID;
|
|
fr_desc->ind &= ~ISERT_SIG_KEY_VALID;
|
|
@@ -2668,7 +2672,7 @@ isert_reg_sig_mr(struct isert_conn *isert_conn,
|
|
*/
|
|
*/
|
|
rdma_wr->ib_sg[SIG].length += se_cmd->prot_length;
|
|
rdma_wr->ib_sg[SIG].length += se_cmd->prot_length;
|
|
|
|
|
|
- pr_debug("sig_sge: addr: 0x%llx length: %u lkey: %x\n",
|
|
|
|
|
|
+ isert_dbg("sig_sge: addr: 0x%llx length: %u lkey: %x\n",
|
|
rdma_wr->ib_sg[SIG].addr, rdma_wr->ib_sg[SIG].length,
|
|
rdma_wr->ib_sg[SIG].addr, rdma_wr->ib_sg[SIG].length,
|
|
rdma_wr->ib_sg[SIG].lkey);
|
|
rdma_wr->ib_sg[SIG].lkey);
|
|
err:
|
|
err:
|
|
@@ -2689,7 +2693,7 @@ isert_handle_prot_cmd(struct isert_conn *isert_conn,
|
|
device->ib_device,
|
|
device->ib_device,
|
|
isert_conn->conn_pd);
|
|
isert_conn->conn_pd);
|
|
if (ret) {
|
|
if (ret) {
|
|
- pr_err("conn %p failed to allocate pi_ctx\n",
|
|
|
|
|
|
+ isert_err("conn %p failed to allocate pi_ctx\n",
|
|
isert_conn);
|
|
isert_conn);
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
@@ -2702,7 +2706,7 @@ isert_handle_prot_cmd(struct isert_conn *isert_conn,
|
|
se_cmd->prot_length,
|
|
se_cmd->prot_length,
|
|
0, wr->iser_ib_op, &wr->prot);
|
|
0, wr->iser_ib_op, &wr->prot);
|
|
if (ret) {
|
|
if (ret) {
|
|
- pr_err("conn %p failed to map protection buffer\n",
|
|
|
|
|
|
+ isert_err("conn %p failed to map protection buffer\n",
|
|
isert_conn);
|
|
isert_conn);
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
@@ -2711,7 +2715,7 @@ isert_handle_prot_cmd(struct isert_conn *isert_conn,
|
|
ret = isert_fast_reg_mr(isert_conn, wr->fr_desc, &wr->prot,
|
|
ret = isert_fast_reg_mr(isert_conn, wr->fr_desc, &wr->prot,
|
|
ISERT_PROT_KEY_VALID, &wr->ib_sg[PROT]);
|
|
ISERT_PROT_KEY_VALID, &wr->ib_sg[PROT]);
|
|
if (ret) {
|
|
if (ret) {
|
|
- pr_err("conn %p failed to fast reg mr\n",
|
|
|
|
|
|
+ isert_err("conn %p failed to fast reg mr\n",
|
|
isert_conn);
|
|
isert_conn);
|
|
goto unmap_prot_cmd;
|
|
goto unmap_prot_cmd;
|
|
}
|
|
}
|
|
@@ -2719,7 +2723,7 @@ isert_handle_prot_cmd(struct isert_conn *isert_conn,
|
|
|
|
|
|
ret = isert_reg_sig_mr(isert_conn, se_cmd, wr, wr->fr_desc);
|
|
ret = isert_reg_sig_mr(isert_conn, se_cmd, wr, wr->fr_desc);
|
|
if (ret) {
|
|
if (ret) {
|
|
- pr_err("conn %p failed to fast reg mr\n",
|
|
|
|
|
|
+ isert_err("conn %p failed to fast reg mr\n",
|
|
isert_conn);
|
|
isert_conn);
|
|
goto unmap_prot_cmd;
|
|
goto unmap_prot_cmd;
|
|
}
|
|
}
|
|
@@ -2829,12 +2833,12 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
|
|
struct ib_send_wr *wr_failed;
|
|
struct ib_send_wr *wr_failed;
|
|
int rc;
|
|
int rc;
|
|
|
|
|
|
- pr_debug("Cmd: %p RDMA_WRITE data_length: %u\n",
|
|
|
|
|
|
+ isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n",
|
|
isert_cmd, se_cmd->data_length);
|
|
isert_cmd, se_cmd->data_length);
|
|
wr->iser_ib_op = ISER_IB_RDMA_WRITE;
|
|
wr->iser_ib_op = ISER_IB_RDMA_WRITE;
|
|
rc = device->reg_rdma_mem(conn, cmd, wr);
|
|
rc = device->reg_rdma_mem(conn, cmd, wr);
|
|
if (rc) {
|
|
if (rc) {
|
|
- pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
|
|
|
|
|
|
+ isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
|
|
return rc;
|
|
return rc;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2855,13 +2859,13 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
|
|
|
|
|
|
rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
|
|
rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
|
|
if (rc)
|
|
if (rc)
|
|
- pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
|
|
|
|
|
|
+ isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
|
|
|
|
|
|
if (!isert_prot_cmd(isert_conn, se_cmd))
|
|
if (!isert_prot_cmd(isert_conn, se_cmd))
|
|
- pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
|
|
|
|
|
|
+ isert_dbg("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
|
|
"READ\n", isert_cmd);
|
|
"READ\n", isert_cmd);
|
|
else
|
|
else
|
|
- pr_debug("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
|
|
|
|
|
|
+ isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
|
|
isert_cmd);
|
|
isert_cmd);
|
|
|
|
|
|
return 1;
|
|
return 1;
|
|
@@ -2878,20 +2882,20 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
|
|
struct ib_send_wr *wr_failed;
|
|
struct ib_send_wr *wr_failed;
|
|
int rc;
|
|
int rc;
|
|
|
|
|
|
- pr_debug("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
|
|
|
|
|
|
+ isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
|
|
isert_cmd, se_cmd->data_length, cmd->write_data_done);
|
|
isert_cmd, se_cmd->data_length, cmd->write_data_done);
|
|
wr->iser_ib_op = ISER_IB_RDMA_READ;
|
|
wr->iser_ib_op = ISER_IB_RDMA_READ;
|
|
rc = device->reg_rdma_mem(conn, cmd, wr);
|
|
rc = device->reg_rdma_mem(conn, cmd, wr);
|
|
if (rc) {
|
|
if (rc) {
|
|
- pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
|
|
|
|
|
|
+ isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
|
|
return rc;
|
|
return rc;
|
|
}
|
|
}
|
|
|
|
|
|
rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
|
|
rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
|
|
if (rc)
|
|
if (rc)
|
|
- pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
|
|
|
|
|
|
+ isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
|
|
|
|
|
|
- pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
|
|
|
|
|
|
+ isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
|
|
isert_cmd);
|
|
isert_cmd);
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
@@ -2907,7 +2911,7 @@ isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
|
|
ret = isert_put_nopin(cmd, conn, false);
|
|
ret = isert_put_nopin(cmd, conn, false);
|
|
break;
|
|
break;
|
|
default:
|
|
default:
|
|
- pr_err("Unknown immediate state: 0x%02x\n", state);
|
|
|
|
|
|
+ isert_err("Unknown immediate state: 0x%02x\n", state);
|
|
ret = -EINVAL;
|
|
ret = -EINVAL;
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
@@ -2947,7 +2951,7 @@ isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
|
|
ret = isert_put_response(conn, cmd);
|
|
ret = isert_put_response(conn, cmd);
|
|
break;
|
|
break;
|
|
default:
|
|
default:
|
|
- pr_err("Unknown response state: 0x%02x\n", state);
|
|
|
|
|
|
+ isert_err("Unknown response state: 0x%02x\n", state);
|
|
ret = -EINVAL;
|
|
ret = -EINVAL;
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
@@ -2964,26 +2968,26 @@ isert_setup_id(struct isert_np *isert_np)
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
sa = (struct sockaddr *)&np->np_sockaddr;
|
|
sa = (struct sockaddr *)&np->np_sockaddr;
|
|
- pr_debug("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
|
|
|
|
|
|
+ isert_dbg("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
|
|
|
|
|
|
id = rdma_create_id(isert_cma_handler, isert_np,
|
|
id = rdma_create_id(isert_cma_handler, isert_np,
|
|
RDMA_PS_TCP, IB_QPT_RC);
|
|
RDMA_PS_TCP, IB_QPT_RC);
|
|
if (IS_ERR(id)) {
|
|
if (IS_ERR(id)) {
|
|
- pr_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
|
|
|
|
|
|
+ isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
|
|
ret = PTR_ERR(id);
|
|
ret = PTR_ERR(id);
|
|
goto out;
|
|
goto out;
|
|
}
|
|
}
|
|
- pr_debug("id %p context %p\n", id, id->context);
|
|
|
|
|
|
+ isert_dbg("id %p context %p\n", id, id->context);
|
|
|
|
|
|
ret = rdma_bind_addr(id, sa);
|
|
ret = rdma_bind_addr(id, sa);
|
|
if (ret) {
|
|
if (ret) {
|
|
- pr_err("rdma_bind_addr() failed: %d\n", ret);
|
|
|
|
|
|
+ isert_err("rdma_bind_addr() failed: %d\n", ret);
|
|
goto out_id;
|
|
goto out_id;
|
|
}
|
|
}
|
|
|
|
|
|
ret = rdma_listen(id, ISERT_RDMA_LISTEN_BACKLOG);
|
|
ret = rdma_listen(id, ISERT_RDMA_LISTEN_BACKLOG);
|
|
if (ret) {
|
|
if (ret) {
|
|
- pr_err("rdma_listen() failed: %d\n", ret);
|
|
|
|
|
|
+ isert_err("rdma_listen() failed: %d\n", ret);
|
|
goto out_id;
|
|
goto out_id;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -3004,7 +3008,7 @@ isert_setup_np(struct iscsi_np *np,
|
|
|
|
|
|
isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
|
|
isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
|
|
if (!isert_np) {
|
|
if (!isert_np) {
|
|
- pr_err("Unable to allocate struct isert_np\n");
|
|
|
|
|
|
+ isert_err("Unable to allocate struct isert_np\n");
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
}
|
|
}
|
|
sema_init(&isert_np->np_sem, 0);
|
|
sema_init(&isert_np->np_sem, 0);
|
|
@@ -3049,15 +3053,15 @@ isert_rdma_accept(struct isert_conn *isert_conn)
|
|
cp.retry_count = 7;
|
|
cp.retry_count = 7;
|
|
cp.rnr_retry_count = 7;
|
|
cp.rnr_retry_count = 7;
|
|
|
|
|
|
- pr_debug("Before rdma_accept >>>>>>>>>>>>>>>>>>>>.\n");
|
|
|
|
|
|
+ isert_dbg("Before rdma_accept >>>>>>>>>>>>>>>>>>>>.\n");
|
|
|
|
|
|
ret = rdma_accept(cm_id, &cp);
|
|
ret = rdma_accept(cm_id, &cp);
|
|
if (ret) {
|
|
if (ret) {
|
|
- pr_err("rdma_accept() failed with: %d\n", ret);
|
|
|
|
|
|
+ isert_err("rdma_accept() failed with: %d\n", ret);
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
- pr_debug("After rdma_accept >>>>>>>>>>>>>>>>>>>>>.\n");
|
|
|
|
|
|
+ isert_dbg("After rdma_accept >>>>>>>>>>>>>>>>>>>>>.\n");
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -3068,10 +3072,10 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
|
|
struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
|
|
struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
- pr_info("before login_req comp conn: %p\n", isert_conn);
|
|
|
|
|
|
+ isert_info("before login_req comp conn: %p\n", isert_conn);
|
|
ret = wait_for_completion_interruptible(&isert_conn->login_req_comp);
|
|
ret = wait_for_completion_interruptible(&isert_conn->login_req_comp);
|
|
if (ret) {
|
|
if (ret) {
|
|
- pr_err("isert_conn %p interrupted before got login req\n",
|
|
|
|
|
|
+ isert_err("isert_conn %p interrupted before got login req\n",
|
|
isert_conn);
|
|
isert_conn);
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
@@ -3088,12 +3092,12 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
|
|
|
|
|
|
isert_rx_login_req(isert_conn);
|
|
isert_rx_login_req(isert_conn);
|
|
|
|
|
|
- pr_info("before conn_login_comp conn: %p\n", conn);
|
|
|
|
|
|
+ isert_info("before conn_login_comp conn: %p\n", conn);
|
|
ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
|
|
ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
|
|
if (ret)
|
|
if (ret)
|
|
return ret;
|
|
return ret;
|
|
|
|
|
|
- pr_info("processing login->req: %p\n", login->req);
|
|
|
|
|
|
+ isert_info("processing login->req: %p\n", login->req);
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -3147,7 +3151,7 @@ accept_wait:
|
|
spin_lock_bh(&np->np_thread_lock);
|
|
spin_lock_bh(&np->np_thread_lock);
|
|
if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
|
|
if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
|
|
spin_unlock_bh(&np->np_thread_lock);
|
|
spin_unlock_bh(&np->np_thread_lock);
|
|
- pr_debug("np_thread_state %d for isert_accept_np\n",
|
|
|
|
|
|
+ isert_dbg("np_thread_state %d for isert_accept_np\n",
|
|
np->np_thread_state);
|
|
np->np_thread_state);
|
|
/**
|
|
/**
|
|
* No point in stalling here when np_thread
|
|
* No point in stalling here when np_thread
|
|
@@ -3174,7 +3178,7 @@ accept_wait:
|
|
|
|
|
|
isert_set_conn_info(np, conn, isert_conn);
|
|
isert_set_conn_info(np, conn, isert_conn);
|
|
|
|
|
|
- pr_debug("Processing isert_conn: %p\n", isert_conn);
|
|
|
|
|
|
+ isert_dbg("Processing isert_conn: %p\n", isert_conn);
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -3197,11 +3201,11 @@ isert_free_np(struct iscsi_np *np)
|
|
*/
|
|
*/
|
|
mutex_lock(&isert_np->np_accept_mutex);
|
|
mutex_lock(&isert_np->np_accept_mutex);
|
|
if (!list_empty(&isert_np->np_accept_list)) {
|
|
if (!list_empty(&isert_np->np_accept_list)) {
|
|
- pr_info("Still have isert connections, cleaning up...\n");
|
|
|
|
|
|
+ isert_info("Still have isert connections, cleaning up...\n");
|
|
list_for_each_entry_safe(isert_conn, n,
|
|
list_for_each_entry_safe(isert_conn, n,
|
|
&isert_np->np_accept_list,
|
|
&isert_np->np_accept_list,
|
|
conn_accept_node) {
|
|
conn_accept_node) {
|
|
- pr_info("cleaning isert_conn %p state (%d)\n",
|
|
|
|
|
|
+ isert_info("cleaning isert_conn %p state (%d)\n",
|
|
isert_conn, isert_conn->state);
|
|
isert_conn, isert_conn->state);
|
|
isert_connect_release(isert_conn);
|
|
isert_connect_release(isert_conn);
|
|
}
|
|
}
|
|
@@ -3218,7 +3222,7 @@ static void isert_release_work(struct work_struct *work)
|
|
struct isert_conn,
|
|
struct isert_conn,
|
|
release_work);
|
|
release_work);
|
|
|
|
|
|
- pr_info("Starting release conn %p\n", isert_conn);
|
|
|
|
|
|
+ isert_info("Starting release conn %p\n", isert_conn);
|
|
|
|
|
|
wait_for_completion(&isert_conn->conn_wait);
|
|
wait_for_completion(&isert_conn->conn_wait);
|
|
|
|
|
|
@@ -3226,7 +3230,7 @@ static void isert_release_work(struct work_struct *work)
|
|
isert_conn->state = ISER_CONN_DOWN;
|
|
isert_conn->state = ISER_CONN_DOWN;
|
|
mutex_unlock(&isert_conn->conn_mutex);
|
|
mutex_unlock(&isert_conn->conn_mutex);
|
|
|
|
|
|
- pr_info("Destroying conn %p\n", isert_conn);
|
|
|
|
|
|
+ isert_info("Destroying conn %p\n", isert_conn);
|
|
isert_put_conn(isert_conn);
|
|
isert_put_conn(isert_conn);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -3236,7 +3240,7 @@ isert_wait4logout(struct isert_conn *isert_conn)
|
|
struct iscsi_conn *conn = isert_conn->conn;
|
|
struct iscsi_conn *conn = isert_conn->conn;
|
|
|
|
|
|
if (isert_conn->logout_posted) {
|
|
if (isert_conn->logout_posted) {
|
|
- pr_info("conn %p wait for conn_logout_comp\n", isert_conn);
|
|
|
|
|
|
+ isert_info("conn %p wait for conn_logout_comp\n", isert_conn);
|
|
wait_for_completion_timeout(&conn->conn_logout_comp,
|
|
wait_for_completion_timeout(&conn->conn_logout_comp,
|
|
SECONDS_FOR_LOGOUT_COMP * HZ);
|
|
SECONDS_FOR_LOGOUT_COMP * HZ);
|
|
}
|
|
}
|
|
@@ -3260,7 +3264,7 @@ isert_wait4flush(struct isert_conn *isert_conn)
|
|
isert_conn->beacon.wr_id = ISER_BEACON_WRID;
|
|
isert_conn->beacon.wr_id = ISER_BEACON_WRID;
|
|
/* post an indication that all flush errors were consumed */
|
|
/* post an indication that all flush errors were consumed */
|
|
if (ib_post_recv(isert_conn->conn_qp, &isert_conn->beacon, &bad_wr)) {
|
|
if (ib_post_recv(isert_conn->conn_qp, &isert_conn->beacon, &bad_wr)) {
|
|
- pr_err("conn %p failed to post beacon", isert_conn);
|
|
|
|
|
|
+ isert_err("conn %p failed to post beacon", isert_conn);
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -3271,7 +3275,7 @@ static void isert_wait_conn(struct iscsi_conn *conn)
|
|
{
|
|
{
|
|
struct isert_conn *isert_conn = conn->context;
|
|
struct isert_conn *isert_conn = conn->context;
|
|
|
|
|
|
- pr_debug("isert_wait_conn: Starting \n");
|
|
|
|
|
|
+ isert_dbg("isert_wait_conn: Starting\n");
|
|
|
|
|
|
mutex_lock(&isert_conn->conn_mutex);
|
|
mutex_lock(&isert_conn->conn_mutex);
|
|
/*
|
|
/*
|
|
@@ -3327,7 +3331,7 @@ static int __init isert_init(void)
|
|
|
|
|
|
isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0);
|
|
isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0);
|
|
if (!isert_comp_wq) {
|
|
if (!isert_comp_wq) {
|
|
- pr_err("Unable to allocate isert_comp_wq\n");
|
|
|
|
|
|
+ isert_err("Unable to allocate isert_comp_wq\n");
|
|
ret = -ENOMEM;
|
|
ret = -ENOMEM;
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
}
|
|
}
|
|
@@ -3335,13 +3339,13 @@ static int __init isert_init(void)
|
|
isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND,
|
|
isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND,
|
|
WQ_UNBOUND_MAX_ACTIVE);
|
|
WQ_UNBOUND_MAX_ACTIVE);
|
|
if (!isert_release_wq) {
|
|
if (!isert_release_wq) {
|
|
- pr_err("Unable to allocate isert_release_wq\n");
|
|
|
|
|
|
+ isert_err("Unable to allocate isert_release_wq\n");
|
|
ret = -ENOMEM;
|
|
ret = -ENOMEM;
|
|
goto destroy_comp_wq;
|
|
goto destroy_comp_wq;
|
|
}
|
|
}
|
|
|
|
|
|
iscsit_register_transport(&iser_target_transport);
|
|
iscsit_register_transport(&iser_target_transport);
|
|
- pr_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
|
|
|
|
|
|
+ isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
@@ -3357,7 +3361,7 @@ static void __exit isert_exit(void)
|
|
destroy_workqueue(isert_release_wq);
|
|
destroy_workqueue(isert_release_wq);
|
|
destroy_workqueue(isert_comp_wq);
|
|
destroy_workqueue(isert_comp_wq);
|
|
iscsit_unregister_transport(&iser_target_transport);
|
|
iscsit_unregister_transport(&iser_target_transport);
|
|
- pr_debug("iSER_TARGET[0] - Released iser_target_transport\n");
|
|
|
|
|
|
+ isert_dbg("iSER_TARGET[0] - Released iser_target_transport\n");
|
|
}
|
|
}
|
|
|
|
|
|
MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
|
|
MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
|