|
@@ -2847,20 +2847,26 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
|
|
|
int ret;
|
|
|
|
|
|
id_priv = container_of(id, struct rdma_id_private, id);
|
|
|
+ memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
|
|
|
if (id_priv->state == RDMA_CM_IDLE) {
|
|
|
ret = cma_bind_addr(id, src_addr, dst_addr);
|
|
|
- if (ret)
|
|
|
+ if (ret) {
|
|
|
+ memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr));
|
|
|
return ret;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
- if (cma_family(id_priv) != dst_addr->sa_family)
|
|
|
+ if (cma_family(id_priv) != dst_addr->sa_family) {
|
|
|
+ memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr));
|
|
|
return -EINVAL;
|
|
|
+ }
|
|
|
|
|
|
- if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY))
|
|
|
+ if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) {
|
|
|
+ memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr));
|
|
|
return -EINVAL;
|
|
|
+ }
|
|
|
|
|
|
atomic_inc(&id_priv->refcount);
|
|
|
- memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
|
|
|
if (cma_any_addr(dst_addr)) {
|
|
|
ret = cma_resolve_loopback(id_priv);
|
|
|
} else {
|
|
@@ -2976,6 +2982,43 @@ err:
|
|
|
return ret == -ENOSPC ? -EADDRNOTAVAIL : ret;
|
|
|
}
|
|
|
|
|
|
+static int cma_port_is_unique(struct rdma_bind_list *bind_list,
|
|
|
+ struct rdma_id_private *id_priv)
|
|
|
+{
|
|
|
+ struct rdma_id_private *cur_id;
|
|
|
+ struct sockaddr *daddr = cma_dst_addr(id_priv);
|
|
|
+ struct sockaddr *saddr = cma_src_addr(id_priv);
|
|
|
+ __be16 dport = cma_port(daddr);
|
|
|
+
|
|
|
+ hlist_for_each_entry(cur_id, &bind_list->owners, node) {
|
|
|
+ struct sockaddr *cur_daddr = cma_dst_addr(cur_id);
|
|
|
+ struct sockaddr *cur_saddr = cma_src_addr(cur_id);
|
|
|
+ __be16 cur_dport = cma_port(cur_daddr);
|
|
|
+
|
|
|
+ if (id_priv == cur_id)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ /* different dest port -> unique */
|
|
|
+ if (!cma_any_port(cur_daddr) &&
|
|
|
+ (dport != cur_dport))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ /* different src address -> unique */
|
|
|
+ if (!cma_any_addr(saddr) &&
|
|
|
+ !cma_any_addr(cur_saddr) &&
|
|
|
+ cma_addr_cmp(saddr, cur_saddr))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ /* different dst address -> unique */
|
|
|
+ if (!cma_any_addr(cur_daddr) &&
|
|
|
+ cma_addr_cmp(daddr, cur_daddr))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ return -EADDRNOTAVAIL;
|
|
|
+ }
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static int cma_alloc_any_port(enum rdma_port_space ps,
|
|
|
struct rdma_id_private *id_priv)
|
|
|
{
|
|
@@ -2988,9 +3031,19 @@ static int cma_alloc_any_port(enum rdma_port_space ps,
|
|
|
remaining = (high - low) + 1;
|
|
|
rover = prandom_u32() % remaining + low;
|
|
|
retry:
|
|
|
- if (last_used_port != rover &&
|
|
|
- !cma_ps_find(net, ps, (unsigned short)rover)) {
|
|
|
- int ret = cma_alloc_port(ps, id_priv, rover);
|
|
|
+ if (last_used_port != rover) {
|
|
|
+ struct rdma_bind_list *bind_list;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ bind_list = cma_ps_find(net, ps, (unsigned short)rover);
|
|
|
+
|
|
|
+ if (!bind_list) {
|
|
|
+ ret = cma_alloc_port(ps, id_priv, rover);
|
|
|
+ } else {
|
|
|
+ ret = cma_port_is_unique(bind_list, id_priv);
|
|
|
+ if (!ret)
|
|
|
+ cma_bind_port(bind_list, id_priv);
|
|
|
+ }
|
|
|
/*
|
|
|
* Remember previously used port number in order to avoid
|
|
|
* re-using same port immediately after it is closed.
|