|
@@ -856,6 +856,23 @@ static void path_rec_completion(int status,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static void init_path_rec(struct ipoib_dev_priv *priv, struct ipoib_path *path,
|
|
|
+ void *gid)
|
|
|
+{
|
|
|
+ path->dev = priv->dev;
|
|
|
+
|
|
|
+ if (rdma_cap_opa_ah(priv->ca, priv->port))
|
|
|
+ path->pathrec.rec_type = SA_PATH_REC_TYPE_OPA;
|
|
|
+ else
|
|
|
+ path->pathrec.rec_type = SA_PATH_REC_TYPE_IB;
|
|
|
+
|
|
|
+ memcpy(path->pathrec.dgid.raw, gid, sizeof(union ib_gid));
|
|
|
+ path->pathrec.sgid = priv->local_gid;
|
|
|
+ path->pathrec.pkey = cpu_to_be16(priv->pkey);
|
|
|
+ path->pathrec.numb_path = 1;
|
|
|
+ path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class;
|
|
|
+}
|
|
|
+
|
|
|
static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid)
|
|
|
{
|
|
|
struct ipoib_dev_priv *priv = ipoib_priv(dev);
|
|
@@ -868,21 +885,11 @@ static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid)
|
|
|
if (!path)
|
|
|
return NULL;
|
|
|
|
|
|
- path->dev = dev;
|
|
|
-
|
|
|
skb_queue_head_init(&path->queue);
|
|
|
|
|
|
INIT_LIST_HEAD(&path->neigh_list);
|
|
|
|
|
|
- if (rdma_cap_opa_ah(priv->ca, priv->port))
|
|
|
- path->pathrec.rec_type = SA_PATH_REC_TYPE_OPA;
|
|
|
- else
|
|
|
- path->pathrec.rec_type = SA_PATH_REC_TYPE_IB;
|
|
|
- memcpy(path->pathrec.dgid.raw, gid, sizeof (union ib_gid));
|
|
|
- path->pathrec.sgid = priv->local_gid;
|
|
|
- path->pathrec.pkey = cpu_to_be16(priv->pkey);
|
|
|
- path->pathrec.numb_path = 1;
|
|
|
- path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class;
|
|
|
+ init_path_rec(priv, path, gid);
|
|
|
|
|
|
return path;
|
|
|
}
|
|
@@ -1011,6 +1018,10 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
|
|
|
|
|
|
spin_lock_irqsave(&priv->lock, flags);
|
|
|
|
|
|
+ /* no broadcast means that all paths are (going to be) not valid */
|
|
|
+ if (!priv->broadcast)
|
|
|
+ goto drop_and_unlock;
|
|
|
+
|
|
|
path = __path_find(dev, phdr->hwaddr + 4);
|
|
|
if (!path || !path->valid) {
|
|
|
int new_path = 0;
|
|
@@ -1020,6 +1031,10 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
|
|
|
new_path = 1;
|
|
|
}
|
|
|
if (path) {
|
|
|
+ if (!new_path)
|
|
|
+ /* make sure there is no changes in the existing path record */
|
|
|
+ init_path_rec(priv, path, phdr->hwaddr + 4);
|
|
|
+
|
|
|
if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
|
|
|
push_pseudo_header(skb, phdr->hwaddr);
|
|
|
__skb_queue_tail(&path->queue, skb);
|
|
@@ -1036,8 +1051,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
|
|
|
} else
|
|
|
__path_add(dev, path);
|
|
|
} else {
|
|
|
- ++dev->stats.tx_dropped;
|
|
|
- dev_kfree_skb_any(skb);
|
|
|
+ goto drop_and_unlock;
|
|
|
}
|
|
|
|
|
|
spin_unlock_irqrestore(&priv->lock, flags);
|
|
@@ -1057,11 +1071,16 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
|
|
|
push_pseudo_header(skb, phdr->hwaddr);
|
|
|
__skb_queue_tail(&path->queue, skb);
|
|
|
} else {
|
|
|
- ++dev->stats.tx_dropped;
|
|
|
- dev_kfree_skb_any(skb);
|
|
|
+ goto drop_and_unlock;
|
|
|
}
|
|
|
|
|
|
spin_unlock_irqrestore(&priv->lock, flags);
|
|
|
+ return;
|
|
|
+
|
|
|
+drop_and_unlock:
|
|
|
+ ++dev->stats.tx_dropped;
|
|
|
+ dev_kfree_skb_any(skb);
|
|
|
+ spin_unlock_irqrestore(&priv->lock, flags);
|
|
|
}
|
|
|
|
|
|
static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
|