|
@@ -1787,7 +1787,7 @@ static int cma_ib_handler(struct ib_cm_id *cm_id,
|
|
|
const struct ib_cm_event *ib_event)
|
|
|
{
|
|
|
struct rdma_id_private *id_priv = cm_id->context;
|
|
|
- struct rdma_cm_event event;
|
|
|
+ struct rdma_cm_event event = {};
|
|
|
int ret = 0;
|
|
|
|
|
|
mutex_lock(&id_priv->handler_mutex);
|
|
@@ -1797,7 +1797,6 @@ static int cma_ib_handler(struct ib_cm_id *cm_id,
|
|
|
id_priv->state != RDMA_CM_DISCONNECT))
|
|
|
goto out;
|
|
|
|
|
|
- memset(&event, 0, sizeof event);
|
|
|
switch (ib_event->event) {
|
|
|
case IB_CM_REQ_ERROR:
|
|
|
case IB_CM_REP_ERROR:
|
|
@@ -2003,7 +2002,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id,
|
|
|
const struct ib_cm_event *ib_event)
|
|
|
{
|
|
|
struct rdma_id_private *listen_id, *conn_id = NULL;
|
|
|
- struct rdma_cm_event event;
|
|
|
+ struct rdma_cm_event event = {};
|
|
|
struct net_device *net_dev;
|
|
|
u8 offset;
|
|
|
int ret;
|
|
@@ -2023,7 +2022,6 @@ static int cma_req_handler(struct ib_cm_id *cm_id,
|
|
|
goto err1;
|
|
|
}
|
|
|
|
|
|
- memset(&event, 0, sizeof event);
|
|
|
offset = cma_user_data_offset(listen_id);
|
|
|
event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
|
|
|
if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) {
|
|
@@ -2132,7 +2130,7 @@ EXPORT_SYMBOL(rdma_read_gids);
|
|
|
static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
|
|
|
{
|
|
|
struct rdma_id_private *id_priv = iw_id->context;
|
|
|
- struct rdma_cm_event event;
|
|
|
+ struct rdma_cm_event event = {};
|
|
|
int ret = 0;
|
|
|
struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
|
|
|
struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
|
|
@@ -2141,7 +2139,6 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
|
|
|
if (id_priv->state != RDMA_CM_CONNECT)
|
|
|
goto out;
|
|
|
|
|
|
- memset(&event, 0, sizeof event);
|
|
|
switch (iw_event->event) {
|
|
|
case IW_CM_EVENT_CLOSE:
|
|
|
event.event = RDMA_CM_EVENT_DISCONNECTED;
|
|
@@ -2201,11 +2198,17 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
|
|
|
{
|
|
|
struct rdma_cm_id *new_cm_id;
|
|
|
struct rdma_id_private *listen_id, *conn_id;
|
|
|
- struct rdma_cm_event event;
|
|
|
+ struct rdma_cm_event event = {};
|
|
|
int ret = -ECONNABORTED;
|
|
|
struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
|
|
|
struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
|
|
|
|
|
|
+ event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
|
|
|
+ event.param.conn.private_data = iw_event->private_data;
|
|
|
+ event.param.conn.private_data_len = iw_event->private_data_len;
|
|
|
+ event.param.conn.initiator_depth = iw_event->ird;
|
|
|
+ event.param.conn.responder_resources = iw_event->ord;
|
|
|
+
|
|
|
listen_id = cm_id->context;
|
|
|
|
|
|
mutex_lock(&listen_id->handler_mutex);
|
|
@@ -2247,13 +2250,6 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
|
|
|
memcpy(cma_src_addr(conn_id), laddr, rdma_addr_size(laddr));
|
|
|
memcpy(cma_dst_addr(conn_id), raddr, rdma_addr_size(raddr));
|
|
|
|
|
|
- memset(&event, 0, sizeof event);
|
|
|
- event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
|
|
|
- event.param.conn.private_data = iw_event->private_data;
|
|
|
- event.param.conn.private_data_len = iw_event->private_data_len;
|
|
|
- event.param.conn.initiator_depth = iw_event->ird;
|
|
|
- event.param.conn.responder_resources = iw_event->ord;
|
|
|
-
|
|
|
/*
|
|
|
* Protect against the user destroying conn_id from another thread
|
|
|
* until we're done accessing it.
|
|
@@ -2860,9 +2856,8 @@ static void addr_handler(int status, struct sockaddr *src_addr,
|
|
|
struct rdma_dev_addr *dev_addr, void *context)
|
|
|
{
|
|
|
struct rdma_id_private *id_priv = context;
|
|
|
- struct rdma_cm_event event;
|
|
|
+ struct rdma_cm_event event = {};
|
|
|
|
|
|
- memset(&event, 0, sizeof event);
|
|
|
mutex_lock(&id_priv->handler_mutex);
|
|
|
if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY,
|
|
|
RDMA_CM_ADDR_RESOLVED))
|
|
@@ -3491,7 +3486,7 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
|
|
|
const struct ib_cm_event *ib_event)
|
|
|
{
|
|
|
struct rdma_id_private *id_priv = cm_id->context;
|
|
|
- struct rdma_cm_event event;
|
|
|
+ struct rdma_cm_event event = {};
|
|
|
const struct ib_cm_sidr_rep_event_param *rep =
|
|
|
&ib_event->param.sidr_rep_rcvd;
|
|
|
int ret = 0;
|
|
@@ -3500,7 +3495,6 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
|
|
|
if (id_priv->state != RDMA_CM_CONNECT)
|
|
|
goto out;
|
|
|
|
|
|
- memset(&event, 0, sizeof event);
|
|
|
switch (ib_event->event) {
|
|
|
case IB_CM_SIDR_REQ_ERROR:
|
|
|
event.event = RDMA_CM_EVENT_UNREACHABLE;
|
|
@@ -3972,7 +3966,7 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
|
|
|
{
|
|
|
struct rdma_id_private *id_priv;
|
|
|
struct cma_multicast *mc = multicast->context;
|
|
|
- struct rdma_cm_event event;
|
|
|
+ struct rdma_cm_event event = {};
|
|
|
int ret = 0;
|
|
|
|
|
|
id_priv = mc->id_priv;
|
|
@@ -3996,7 +3990,6 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
|
|
|
}
|
|
|
mutex_unlock(&id_priv->qp_mutex);
|
|
|
|
|
|
- memset(&event, 0, sizeof event);
|
|
|
event.status = status;
|
|
|
event.param.ud.private_data = mc->context;
|
|
|
if (!status) {
|
|
@@ -4441,7 +4434,7 @@ free_cma_dev:
|
|
|
|
|
|
static int cma_remove_id_dev(struct rdma_id_private *id_priv)
|
|
|
{
|
|
|
- struct rdma_cm_event event;
|
|
|
+ struct rdma_cm_event event = {};
|
|
|
enum rdma_cm_state state;
|
|
|
int ret = 0;
|
|
|
|
|
@@ -4457,7 +4450,6 @@ static int cma_remove_id_dev(struct rdma_id_private *id_priv)
|
|
|
if (!cma_comp(id_priv, RDMA_CM_DEVICE_REMOVAL))
|
|
|
goto out;
|
|
|
|
|
|
- memset(&event, 0, sizeof event);
|
|
|
event.event = RDMA_CM_EVENT_DEVICE_REMOVAL;
|
|
|
ret = id_priv->id.event_handler(&id_priv->id, &event);
|
|
|
out:
|