|
@@ -65,6 +65,34 @@ MODULE_LICENSE("Dual BSD/GPL");
|
|
|
#define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
|
|
|
#define CMA_IBOE_PACKET_LIFETIME 18
|
|
|
|
|
|
+static const char * const cma_events[] = {
|
|
|
+ [RDMA_CM_EVENT_ADDR_RESOLVED] = "address resolved",
|
|
|
+ [RDMA_CM_EVENT_ADDR_ERROR] = "address error",
|
|
|
+ [RDMA_CM_EVENT_ROUTE_RESOLVED] = "route resolved ",
|
|
|
+ [RDMA_CM_EVENT_ROUTE_ERROR] = "route error",
|
|
|
+ [RDMA_CM_EVENT_CONNECT_REQUEST] = "connect request",
|
|
|
+ [RDMA_CM_EVENT_CONNECT_RESPONSE] = "connect response",
|
|
|
+ [RDMA_CM_EVENT_CONNECT_ERROR] = "connect error",
|
|
|
+ [RDMA_CM_EVENT_UNREACHABLE] = "unreachable",
|
|
|
+ [RDMA_CM_EVENT_REJECTED] = "rejected",
|
|
|
+ [RDMA_CM_EVENT_ESTABLISHED] = "established",
|
|
|
+ [RDMA_CM_EVENT_DISCONNECTED] = "disconnected",
|
|
|
+ [RDMA_CM_EVENT_DEVICE_REMOVAL] = "device removal",
|
|
|
+ [RDMA_CM_EVENT_MULTICAST_JOIN] = "multicast join",
|
|
|
+ [RDMA_CM_EVENT_MULTICAST_ERROR] = "multicast error",
|
|
|
+ [RDMA_CM_EVENT_ADDR_CHANGE] = "address change",
|
|
|
+ [RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit",
|
|
|
+};
|
|
|
+
|
|
|
+const char *rdma_event_msg(enum rdma_cm_event_type event)
|
|
|
+{
|
|
|
+ size_t index = event;
|
|
|
+
|
|
|
+ return (index < ARRAY_SIZE(cma_events) && cma_events[index]) ?
|
|
|
+ cma_events[index] : "unrecognized event";
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(rdma_event_msg);
|
|
|
+
|
|
|
static void cma_add_one(struct ib_device *device);
|
|
|
static void cma_remove_one(struct ib_device *device);
|
|
|
|
|
@@ -349,18 +377,35 @@ static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_a
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+static inline int cma_validate_port(struct ib_device *device, u8 port,
|
|
|
+ union ib_gid *gid, int dev_type)
|
|
|
+{
|
|
|
+ u8 found_port;
|
|
|
+ int ret = -ENODEV;
|
|
|
+
|
|
|
+ if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port))
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port))
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ ret = ib_find_cached_gid(device, gid, &found_port, NULL);
|
|
|
+ if (port != found_port)
|
|
|
+ return -ENODEV;
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
static int cma_acquire_dev(struct rdma_id_private *id_priv,
|
|
|
struct rdma_id_private *listen_id_priv)
|
|
|
{
|
|
|
struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
|
|
|
struct cma_device *cma_dev;
|
|
|
- union ib_gid gid, iboe_gid;
|
|
|
+ union ib_gid gid, iboe_gid, *gidp;
|
|
|
int ret = -ENODEV;
|
|
|
- u8 port, found_port;
|
|
|
- enum rdma_link_layer dev_ll = dev_addr->dev_type == ARPHRD_INFINIBAND ?
|
|
|
- IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
|
|
|
+ u8 port;
|
|
|
|
|
|
- if (dev_ll != IB_LINK_LAYER_INFINIBAND &&
|
|
|
+ if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
|
|
|
id_priv->id.ps == RDMA_PS_IPOIB)
|
|
|
return -EINVAL;
|
|
|
|
|
@@ -370,41 +415,36 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv,
|
|
|
|
|
|
memcpy(&gid, dev_addr->src_dev_addr +
|
|
|
rdma_addr_gid_offset(dev_addr), sizeof gid);
|
|
|
- if (listen_id_priv &&
|
|
|
- rdma_port_get_link_layer(listen_id_priv->id.device,
|
|
|
- listen_id_priv->id.port_num) == dev_ll) {
|
|
|
+
|
|
|
+ if (listen_id_priv) {
|
|
|
cma_dev = listen_id_priv->cma_dev;
|
|
|
port = listen_id_priv->id.port_num;
|
|
|
- if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB &&
|
|
|
- rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET)
|
|
|
- ret = ib_find_cached_gid(cma_dev->device, &iboe_gid,
|
|
|
- &found_port, NULL);
|
|
|
- else
|
|
|
- ret = ib_find_cached_gid(cma_dev->device, &gid,
|
|
|
- &found_port, NULL);
|
|
|
+ gidp = rdma_protocol_roce(cma_dev->device, port) ?
|
|
|
+ &iboe_gid : &gid;
|
|
|
|
|
|
- if (!ret && (port == found_port)) {
|
|
|
- id_priv->id.port_num = found_port;
|
|
|
+ ret = cma_validate_port(cma_dev->device, port, gidp,
|
|
|
+ dev_addr->dev_type);
|
|
|
+ if (!ret) {
|
|
|
+ id_priv->id.port_num = port;
|
|
|
goto out;
|
|
|
}
|
|
|
}
|
|
|
+
|
|
|
list_for_each_entry(cma_dev, &dev_list, list) {
|
|
|
for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) {
|
|
|
if (listen_id_priv &&
|
|
|
listen_id_priv->cma_dev == cma_dev &&
|
|
|
listen_id_priv->id.port_num == port)
|
|
|
continue;
|
|
|
- if (rdma_port_get_link_layer(cma_dev->device, port) == dev_ll) {
|
|
|
- if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB &&
|
|
|
- rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET)
|
|
|
- ret = ib_find_cached_gid(cma_dev->device, &iboe_gid, &found_port, NULL);
|
|
|
- else
|
|
|
- ret = ib_find_cached_gid(cma_dev->device, &gid, &found_port, NULL);
|
|
|
-
|
|
|
- if (!ret && (port == found_port)) {
|
|
|
- id_priv->id.port_num = found_port;
|
|
|
- goto out;
|
|
|
- }
|
|
|
+
|
|
|
+ gidp = rdma_protocol_roce(cma_dev->device, port) ?
|
|
|
+ &iboe_gid : &gid;
|
|
|
+
|
|
|
+ ret = cma_validate_port(cma_dev->device, port, gidp,
|
|
|
+ dev_addr->dev_type);
|
|
|
+ if (!ret) {
|
|
|
+ id_priv->id.port_num = port;
|
|
|
+ goto out;
|
|
|
}
|
|
|
}
|
|
|
}
|
|
@@ -435,10 +475,10 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
|
|
|
pkey = ntohs(addr->sib_pkey);
|
|
|
|
|
|
list_for_each_entry(cur_dev, &dev_list, list) {
|
|
|
- if (rdma_node_get_transport(cur_dev->device->node_type) != RDMA_TRANSPORT_IB)
|
|
|
- continue;
|
|
|
-
|
|
|
for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
|
|
|
+ if (!rdma_cap_af_ib(cur_dev->device, p))
|
|
|
+ continue;
|
|
|
+
|
|
|
if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index))
|
|
|
continue;
|
|
|
|
|
@@ -633,10 +673,9 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
|
|
|
if (ret)
|
|
|
goto out;
|
|
|
|
|
|
- if (rdma_node_get_transport(id_priv->cma_dev->device->node_type)
|
|
|
- == RDMA_TRANSPORT_IB &&
|
|
|
- rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num)
|
|
|
- == IB_LINK_LAYER_ETHERNET) {
|
|
|
+ BUG_ON(id_priv->cma_dev->device != id_priv->id.device);
|
|
|
+
|
|
|
+ if (rdma_protocol_roce(id_priv->id.device, id_priv->id.port_num)) {
|
|
|
ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr.smac, NULL);
|
|
|
|
|
|
if (ret)
|
|
@@ -700,11 +739,10 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
|
|
|
int ret;
|
|
|
u16 pkey;
|
|
|
|
|
|
- if (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num) ==
|
|
|
- IB_LINK_LAYER_INFINIBAND)
|
|
|
- pkey = ib_addr_get_pkey(dev_addr);
|
|
|
- else
|
|
|
+ if (rdma_cap_eth_ah(id_priv->id.device, id_priv->id.port_num))
|
|
|
pkey = 0xffff;
|
|
|
+ else
|
|
|
+ pkey = ib_addr_get_pkey(dev_addr);
|
|
|
|
|
|
ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num,
|
|
|
pkey, &qp_attr->pkey_index);
|
|
@@ -735,8 +773,7 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
|
|
|
int ret = 0;
|
|
|
|
|
|
id_priv = container_of(id, struct rdma_id_private, id);
|
|
|
- switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
|
|
|
- case RDMA_TRANSPORT_IB:
|
|
|
+ if (rdma_cap_ib_cm(id->device, id->port_num)) {
|
|
|
if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD))
|
|
|
ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask);
|
|
|
else
|
|
@@ -745,19 +782,15 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
|
|
|
|
|
|
if (qp_attr->qp_state == IB_QPS_RTR)
|
|
|
qp_attr->rq_psn = id_priv->seq_num;
|
|
|
- break;
|
|
|
- case RDMA_TRANSPORT_IWARP:
|
|
|
+ } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
|
|
|
if (!id_priv->cm_id.iw) {
|
|
|
qp_attr->qp_access_flags = 0;
|
|
|
*qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
|
|
|
} else
|
|
|
ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
|
|
|
qp_attr_mask);
|
|
|
- break;
|
|
|
- default:
|
|
|
+ } else
|
|
|
ret = -ENOSYS;
|
|
|
- break;
|
|
|
- }
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
@@ -935,13 +968,9 @@ static inline int cma_user_data_offset(struct rdma_id_private *id_priv)
|
|
|
|
|
|
static void cma_cancel_route(struct rdma_id_private *id_priv)
|
|
|
{
|
|
|
- switch (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num)) {
|
|
|
- case IB_LINK_LAYER_INFINIBAND:
|
|
|
+ if (rdma_cap_ib_sa(id_priv->id.device, id_priv->id.port_num)) {
|
|
|
if (id_priv->query)
|
|
|
ib_sa_cancel_query(id_priv->query_id, id_priv->query);
|
|
|
- break;
|
|
|
- default:
|
|
|
- break;
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -1013,17 +1042,12 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
|
|
|
mc = container_of(id_priv->mc_list.next,
|
|
|
struct cma_multicast, list);
|
|
|
list_del(&mc->list);
|
|
|
- switch (rdma_port_get_link_layer(id_priv->cma_dev->device, id_priv->id.port_num)) {
|
|
|
- case IB_LINK_LAYER_INFINIBAND:
|
|
|
+ if (rdma_cap_ib_mcast(id_priv->cma_dev->device,
|
|
|
+ id_priv->id.port_num)) {
|
|
|
ib_sa_free_multicast(mc->multicast.ib);
|
|
|
kfree(mc);
|
|
|
- break;
|
|
|
- case IB_LINK_LAYER_ETHERNET:
|
|
|
+ } else
|
|
|
kref_put(&mc->mcref, release_mc);
|
|
|
- break;
|
|
|
- default:
|
|
|
- break;
|
|
|
- }
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -1044,17 +1068,12 @@ void rdma_destroy_id(struct rdma_cm_id *id)
|
|
|
mutex_unlock(&id_priv->handler_mutex);
|
|
|
|
|
|
if (id_priv->cma_dev) {
|
|
|
- switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
|
|
|
- case RDMA_TRANSPORT_IB:
|
|
|
+ if (rdma_cap_ib_cm(id_priv->id.device, 1)) {
|
|
|
if (id_priv->cm_id.ib)
|
|
|
ib_destroy_cm_id(id_priv->cm_id.ib);
|
|
|
- break;
|
|
|
- case RDMA_TRANSPORT_IWARP:
|
|
|
+ } else if (rdma_cap_iw_cm(id_priv->id.device, 1)) {
|
|
|
if (id_priv->cm_id.iw)
|
|
|
iw_destroy_cm_id(id_priv->cm_id.iw);
|
|
|
- break;
|
|
|
- default:
|
|
|
- break;
|
|
|
}
|
|
|
cma_leave_mc_groups(id_priv);
|
|
|
cma_release_dev(id_priv);
|
|
@@ -1632,8 +1651,7 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
|
|
|
struct rdma_cm_id *id;
|
|
|
int ret;
|
|
|
|
|
|
- if (cma_family(id_priv) == AF_IB &&
|
|
|
- rdma_node_get_transport(cma_dev->device->node_type) != RDMA_TRANSPORT_IB)
|
|
|
+ if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1))
|
|
|
return;
|
|
|
|
|
|
id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps,
|
|
@@ -1974,26 +1992,15 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
|
|
|
return -EINVAL;
|
|
|
|
|
|
atomic_inc(&id_priv->refcount);
|
|
|
- switch (rdma_node_get_transport(id->device->node_type)) {
|
|
|
- case RDMA_TRANSPORT_IB:
|
|
|
- switch (rdma_port_get_link_layer(id->device, id->port_num)) {
|
|
|
- case IB_LINK_LAYER_INFINIBAND:
|
|
|
- ret = cma_resolve_ib_route(id_priv, timeout_ms);
|
|
|
- break;
|
|
|
- case IB_LINK_LAYER_ETHERNET:
|
|
|
- ret = cma_resolve_iboe_route(id_priv);
|
|
|
- break;
|
|
|
- default:
|
|
|
- ret = -ENOSYS;
|
|
|
- }
|
|
|
- break;
|
|
|
- case RDMA_TRANSPORT_IWARP:
|
|
|
+ if (rdma_cap_ib_sa(id->device, id->port_num))
|
|
|
+ ret = cma_resolve_ib_route(id_priv, timeout_ms);
|
|
|
+ else if (rdma_protocol_roce(id->device, id->port_num))
|
|
|
+ ret = cma_resolve_iboe_route(id_priv);
|
|
|
+ else if (rdma_protocol_iwarp(id->device, id->port_num))
|
|
|
ret = cma_resolve_iw_route(id_priv, timeout_ms);
|
|
|
- break;
|
|
|
- default:
|
|
|
+ else
|
|
|
ret = -ENOSYS;
|
|
|
- break;
|
|
|
- }
|
|
|
+
|
|
|
if (ret)
|
|
|
goto err;
|
|
|
|
|
@@ -2035,7 +2042,7 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv)
|
|
|
mutex_lock(&lock);
|
|
|
list_for_each_entry(cur_dev, &dev_list, list) {
|
|
|
if (cma_family(id_priv) == AF_IB &&
|
|
|
- rdma_node_get_transport(cur_dev->device->node_type) != RDMA_TRANSPORT_IB)
|
|
|
+ !rdma_cap_ib_cm(cur_dev->device, 1))
|
|
|
continue;
|
|
|
|
|
|
if (!cma_dev)
|
|
@@ -2067,7 +2074,7 @@ port_found:
|
|
|
goto out;
|
|
|
|
|
|
id_priv->id.route.addr.dev_addr.dev_type =
|
|
|
- (rdma_port_get_link_layer(cma_dev->device, p) == IB_LINK_LAYER_INFINIBAND) ?
|
|
|
+ (rdma_protocol_ib(cma_dev->device, p)) ?
|
|
|
ARPHRD_INFINIBAND : ARPHRD_ETHER;
|
|
|
|
|
|
rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
|
|
@@ -2544,18 +2551,15 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
|
|
|
|
|
|
id_priv->backlog = backlog;
|
|
|
if (id->device) {
|
|
|
- switch (rdma_node_get_transport(id->device->node_type)) {
|
|
|
- case RDMA_TRANSPORT_IB:
|
|
|
+ if (rdma_cap_ib_cm(id->device, 1)) {
|
|
|
ret = cma_ib_listen(id_priv);
|
|
|
if (ret)
|
|
|
goto err;
|
|
|
- break;
|
|
|
- case RDMA_TRANSPORT_IWARP:
|
|
|
+ } else if (rdma_cap_iw_cm(id->device, 1)) {
|
|
|
ret = cma_iw_listen(id_priv, backlog);
|
|
|
if (ret)
|
|
|
goto err;
|
|
|
- break;
|
|
|
- default:
|
|
|
+ } else {
|
|
|
ret = -ENOSYS;
|
|
|
goto err;
|
|
|
}
|
|
@@ -2891,20 +2895,15 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
|
|
|
id_priv->srq = conn_param->srq;
|
|
|
}
|
|
|
|
|
|
- switch (rdma_node_get_transport(id->device->node_type)) {
|
|
|
- case RDMA_TRANSPORT_IB:
|
|
|
+ if (rdma_cap_ib_cm(id->device, id->port_num)) {
|
|
|
if (id->qp_type == IB_QPT_UD)
|
|
|
ret = cma_resolve_ib_udp(id_priv, conn_param);
|
|
|
else
|
|
|
ret = cma_connect_ib(id_priv, conn_param);
|
|
|
- break;
|
|
|
- case RDMA_TRANSPORT_IWARP:
|
|
|
+ } else if (rdma_cap_iw_cm(id->device, id->port_num))
|
|
|
ret = cma_connect_iw(id_priv, conn_param);
|
|
|
- break;
|
|
|
- default:
|
|
|
+ else
|
|
|
ret = -ENOSYS;
|
|
|
- break;
|
|
|
- }
|
|
|
if (ret)
|
|
|
goto err;
|
|
|
|
|
@@ -3007,8 +3006,7 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
|
|
|
id_priv->srq = conn_param->srq;
|
|
|
}
|
|
|
|
|
|
- switch (rdma_node_get_transport(id->device->node_type)) {
|
|
|
- case RDMA_TRANSPORT_IB:
|
|
|
+ if (rdma_cap_ib_cm(id->device, id->port_num)) {
|
|
|
if (id->qp_type == IB_QPT_UD) {
|
|
|
if (conn_param)
|
|
|
ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
|
|
@@ -3024,14 +3022,10 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
|
|
|
else
|
|
|
ret = cma_rep_recv(id_priv);
|
|
|
}
|
|
|
- break;
|
|
|
- case RDMA_TRANSPORT_IWARP:
|
|
|
+ } else if (rdma_cap_iw_cm(id->device, id->port_num))
|
|
|
ret = cma_accept_iw(id_priv, conn_param);
|
|
|
- break;
|
|
|
- default:
|
|
|
+ else
|
|
|
ret = -ENOSYS;
|
|
|
- break;
|
|
|
- }
|
|
|
|
|
|
if (ret)
|
|
|
goto reject;
|
|
@@ -3075,8 +3069,7 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
|
|
|
if (!id_priv->cm_id.ib)
|
|
|
return -EINVAL;
|
|
|
|
|
|
- switch (rdma_node_get_transport(id->device->node_type)) {
|
|
|
- case RDMA_TRANSPORT_IB:
|
|
|
+ if (rdma_cap_ib_cm(id->device, id->port_num)) {
|
|
|
if (id->qp_type == IB_QPT_UD)
|
|
|
ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0,
|
|
|
private_data, private_data_len);
|
|
@@ -3084,15 +3077,12 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
|
|
|
ret = ib_send_cm_rej(id_priv->cm_id.ib,
|
|
|
IB_CM_REJ_CONSUMER_DEFINED, NULL,
|
|
|
0, private_data, private_data_len);
|
|
|
- break;
|
|
|
- case RDMA_TRANSPORT_IWARP:
|
|
|
+ } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
|
|
|
ret = iw_cm_reject(id_priv->cm_id.iw,
|
|
|
private_data, private_data_len);
|
|
|
- break;
|
|
|
- default:
|
|
|
+ } else
|
|
|
ret = -ENOSYS;
|
|
|
- break;
|
|
|
- }
|
|
|
+
|
|
|
return ret;
|
|
|
}
|
|
|
EXPORT_SYMBOL(rdma_reject);
|
|
@@ -3106,22 +3096,18 @@ int rdma_disconnect(struct rdma_cm_id *id)
|
|
|
if (!id_priv->cm_id.ib)
|
|
|
return -EINVAL;
|
|
|
|
|
|
- switch (rdma_node_get_transport(id->device->node_type)) {
|
|
|
- case RDMA_TRANSPORT_IB:
|
|
|
+ if (rdma_cap_ib_cm(id->device, id->port_num)) {
|
|
|
ret = cma_modify_qp_err(id_priv);
|
|
|
if (ret)
|
|
|
goto out;
|
|
|
/* Initiate or respond to a disconnect. */
|
|
|
if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0))
|
|
|
ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0);
|
|
|
- break;
|
|
|
- case RDMA_TRANSPORT_IWARP:
|
|
|
+ } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
|
|
|
ret = iw_cm_disconnect(id_priv->cm_id.iw, 0);
|
|
|
- break;
|
|
|
- default:
|
|
|
+ } else
|
|
|
ret = -EINVAL;
|
|
|
- break;
|
|
|
- }
|
|
|
+
|
|
|
out:
|
|
|
return ret;
|
|
|
}
|
|
@@ -3367,24 +3353,13 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
|
|
|
list_add(&mc->list, &id_priv->mc_list);
|
|
|
spin_unlock(&id_priv->lock);
|
|
|
|
|
|
- switch (rdma_node_get_transport(id->device->node_type)) {
|
|
|
- case RDMA_TRANSPORT_IB:
|
|
|
- switch (rdma_port_get_link_layer(id->device, id->port_num)) {
|
|
|
- case IB_LINK_LAYER_INFINIBAND:
|
|
|
- ret = cma_join_ib_multicast(id_priv, mc);
|
|
|
- break;
|
|
|
- case IB_LINK_LAYER_ETHERNET:
|
|
|
- kref_init(&mc->mcref);
|
|
|
- ret = cma_iboe_join_multicast(id_priv, mc);
|
|
|
- break;
|
|
|
- default:
|
|
|
- ret = -EINVAL;
|
|
|
- }
|
|
|
- break;
|
|
|
- default:
|
|
|
+ if (rdma_protocol_roce(id->device, id->port_num)) {
|
|
|
+ kref_init(&mc->mcref);
|
|
|
+ ret = cma_iboe_join_multicast(id_priv, mc);
|
|
|
+ } else if (rdma_cap_ib_mcast(id->device, id->port_num))
|
|
|
+ ret = cma_join_ib_multicast(id_priv, mc);
|
|
|
+ else
|
|
|
ret = -ENOSYS;
|
|
|
- break;
|
|
|
- }
|
|
|
|
|
|
if (ret) {
|
|
|
spin_lock_irq(&id_priv->lock);
|
|
@@ -3412,19 +3387,15 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
|
|
|
ib_detach_mcast(id->qp,
|
|
|
&mc->multicast.ib->rec.mgid,
|
|
|
be16_to_cpu(mc->multicast.ib->rec.mlid));
|
|
|
- if (rdma_node_get_transport(id_priv->cma_dev->device->node_type) == RDMA_TRANSPORT_IB) {
|
|
|
- switch (rdma_port_get_link_layer(id->device, id->port_num)) {
|
|
|
- case IB_LINK_LAYER_INFINIBAND:
|
|
|
- ib_sa_free_multicast(mc->multicast.ib);
|
|
|
- kfree(mc);
|
|
|
- break;
|
|
|
- case IB_LINK_LAYER_ETHERNET:
|
|
|
- kref_put(&mc->mcref, release_mc);
|
|
|
- break;
|
|
|
- default:
|
|
|
- break;
|
|
|
- }
|
|
|
- }
|
|
|
+
|
|
|
+ BUG_ON(id_priv->cma_dev->device != id->device);
|
|
|
+
|
|
|
+ if (rdma_cap_ib_mcast(id->device, id->port_num)) {
|
|
|
+ ib_sa_free_multicast(mc->multicast.ib);
|
|
|
+ kfree(mc);
|
|
|
+ } else if (rdma_protocol_roce(id->device, id->port_num))
|
|
|
+ kref_put(&mc->mcref, release_mc);
|
|
|
+
|
|
|
return;
|
|
|
}
|
|
|
}
|