|
@@ -182,39 +182,19 @@ static struct dst_entry *rxe_find_route6(struct net_device *ndev,
|
|
|
|
|
|
#endif
|
|
|
|
|
|
-/*
|
|
|
- * Derive the net_device from the av.
|
|
|
- * For physical devices, this will just return rxe->ndev.
|
|
|
- * But for VLAN devices, it will return the vlan dev.
|
|
|
- * Caller should dev_put() the returned net_device.
|
|
|
- */
|
|
|
-static struct net_device *rxe_netdev_from_av(struct rxe_dev *rxe,
|
|
|
- int port_num,
|
|
|
- struct rxe_av *av)
|
|
|
-{
|
|
|
- union ib_gid gid;
|
|
|
- struct ib_gid_attr attr;
|
|
|
- struct net_device *ndev = rxe->ndev;
|
|
|
-
|
|
|
- if (ib_get_cached_gid(&rxe->ib_dev, port_num, av->grh.sgid_index,
|
|
|
- &gid, &attr) == 0 &&
|
|
|
- attr.ndev && attr.ndev != ndev)
|
|
|
- ndev = attr.ndev;
|
|
|
- else
|
|
|
- /* Only to ensure that caller may call dev_put() */
|
|
|
- dev_hold(ndev);
|
|
|
-
|
|
|
- return ndev;
|
|
|
-}
|
|
|
-
|
|
|
static struct dst_entry *rxe_find_route(struct rxe_dev *rxe,
|
|
|
struct rxe_qp *qp,
|
|
|
struct rxe_av *av)
|
|
|
{
|
|
|
+ const struct ib_gid_attr *attr;
|
|
|
struct dst_entry *dst = NULL;
|
|
|
struct net_device *ndev;
|
|
|
|
|
|
- ndev = rxe_netdev_from_av(rxe, qp->attr.port_num, av);
|
|
|
+ attr = rdma_get_gid_attr(&rxe->ib_dev, qp->attr.port_num,
|
|
|
+ av->grh.sgid_index);
|
|
|
+ if (IS_ERR(attr))
|
|
|
+ return NULL;
|
|
|
+ ndev = attr->ndev;
|
|
|
|
|
|
if (qp_type(qp) == IB_QPT_RC)
|
|
|
dst = sk_dst_get(qp->sk->sk);
|
|
@@ -244,8 +224,7 @@ static struct dst_entry *rxe_find_route(struct rxe_dev *rxe,
|
|
|
#endif
|
|
|
}
|
|
|
}
|
|
|
-
|
|
|
- dev_put(ndev);
|
|
|
+ rdma_put_gid_attr(attr);
|
|
|
return dst;
|
|
|
}
|
|
|
|
|
@@ -536,9 +515,13 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
|
|
|
unsigned int hdr_len;
|
|
|
struct sk_buff *skb;
|
|
|
struct net_device *ndev;
|
|
|
+ const struct ib_gid_attr *attr;
|
|
|
const int port_num = 1;
|
|
|
|
|
|
- ndev = rxe_netdev_from_av(rxe, port_num, av);
|
|
|
+ attr = rdma_get_gid_attr(&rxe->ib_dev, port_num, av->grh.sgid_index);
|
|
|
+ if (IS_ERR(attr))
|
|
|
+ return NULL;
|
|
|
+ ndev = attr->ndev;
|
|
|
|
|
|
if (av->network_type == RDMA_NETWORK_IPV4)
|
|
|
hdr_len = ETH_HLEN + sizeof(struct udphdr) +
|
|
@@ -550,10 +533,8 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
|
|
|
skb = alloc_skb(paylen + hdr_len + LL_RESERVED_SPACE(ndev),
|
|
|
GFP_ATOMIC);
|
|
|
|
|
|
- if (unlikely(!skb)) {
|
|
|
- dev_put(ndev);
|
|
|
- return NULL;
|
|
|
- }
|
|
|
+ if (unlikely(!skb))
|
|
|
+ goto out;
|
|
|
|
|
|
skb_reserve(skb, hdr_len + LL_RESERVED_SPACE(rxe->ndev));
|
|
|
|
|
@@ -568,7 +549,8 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
|
|
|
pkt->hdr = skb_put_zero(skb, paylen);
|
|
|
pkt->mask |= RXE_GRH_MASK;
|
|
|
|
|
|
- dev_put(ndev);
|
|
|
+out:
|
|
|
+ rdma_put_gid_attr(attr);
|
|
|
return skb;
|
|
|
}
|
|
|
|