|
@@ -198,7 +198,7 @@ void ipvlan_process_multicast(struct work_struct *work)
|
|
|
unsigned int mac_hash;
|
|
|
int ret;
|
|
|
u8 pkt_type;
|
|
|
- bool hlocal, dlocal;
|
|
|
+ bool tx_pkt;
|
|
|
|
|
|
__skb_queue_head_init(&list);
|
|
|
|
|
@@ -207,8 +207,11 @@ void ipvlan_process_multicast(struct work_struct *work)
|
|
|
spin_unlock_bh(&port->backlog.lock);
|
|
|
|
|
|
while ((skb = __skb_dequeue(&list)) != NULL) {
|
|
|
+ struct net_device *dev = skb->dev;
|
|
|
+ bool consumed = false;
|
|
|
+
|
|
|
ethh = eth_hdr(skb);
|
|
|
- hlocal = ether_addr_equal(ethh->h_source, port->dev->dev_addr);
|
|
|
+ tx_pkt = IPVL_SKB_CB(skb)->tx_pkt;
|
|
|
mac_hash = ipvlan_mac_hash(ethh->h_dest);
|
|
|
|
|
|
if (ether_addr_equal(ethh->h_dest, port->dev->broadcast))
|
|
@@ -216,41 +219,45 @@ void ipvlan_process_multicast(struct work_struct *work)
|
|
|
else
|
|
|
pkt_type = PACKET_MULTICAST;
|
|
|
|
|
|
- dlocal = false;
|
|
|
rcu_read_lock();
|
|
|
list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
|
|
|
- if (hlocal && (ipvlan->dev == skb->dev)) {
|
|
|
- dlocal = true;
|
|
|
+ if (tx_pkt && (ipvlan->dev == skb->dev))
|
|
|
continue;
|
|
|
- }
|
|
|
if (!test_bit(mac_hash, ipvlan->mac_filters))
|
|
|
continue;
|
|
|
-
|
|
|
+ if (!(ipvlan->dev->flags & IFF_UP))
|
|
|
+ continue;
|
|
|
ret = NET_RX_DROP;
|
|
|
len = skb->len + ETH_HLEN;
|
|
|
nskb = skb_clone(skb, GFP_ATOMIC);
|
|
|
- if (!nskb)
|
|
|
- goto acct;
|
|
|
-
|
|
|
- nskb->pkt_type = pkt_type;
|
|
|
- nskb->dev = ipvlan->dev;
|
|
|
- if (hlocal)
|
|
|
- ret = dev_forward_skb(ipvlan->dev, nskb);
|
|
|
- else
|
|
|
- ret = netif_rx(nskb);
|
|
|
-acct:
|
|
|
+ local_bh_disable();
|
|
|
+ if (nskb) {
|
|
|
+ consumed = true;
|
|
|
+ nskb->pkt_type = pkt_type;
|
|
|
+ nskb->dev = ipvlan->dev;
|
|
|
+ if (tx_pkt)
|
|
|
+ ret = dev_forward_skb(ipvlan->dev, nskb);
|
|
|
+ else
|
|
|
+ ret = netif_rx(nskb);
|
|
|
+ }
|
|
|
ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true);
|
|
|
+ local_bh_enable();
|
|
|
}
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
- if (dlocal) {
|
|
|
+ if (tx_pkt) {
|
|
|
/* If the packet originated here, send it out. */
|
|
|
skb->dev = port->dev;
|
|
|
skb->pkt_type = pkt_type;
|
|
|
dev_queue_xmit(skb);
|
|
|
} else {
|
|
|
- kfree_skb(skb);
|
|
|
+ if (consumed)
|
|
|
+ consume_skb(skb);
|
|
|
+ else
|
|
|
+ kfree_skb(skb);
|
|
|
}
|
|
|
+ if (dev)
|
|
|
+ dev_put(dev);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -470,15 +477,24 @@ out:
|
|
|
}
|
|
|
|
|
|
static void ipvlan_multicast_enqueue(struct ipvl_port *port,
|
|
|
- struct sk_buff *skb)
|
|
|
+ struct sk_buff *skb, bool tx_pkt)
|
|
|
{
|
|
|
if (skb->protocol == htons(ETH_P_PAUSE)) {
|
|
|
kfree_skb(skb);
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
+ /* Record that the deferred packet is from TX or RX path. By
|
|
|
+ * looking at mac-addresses on packet will lead to erronus decisions.
|
|
|
+ * (This would be true for a loopback-mode on master device or a
|
|
|
+ * hair-pin mode of the switch.)
|
|
|
+ */
|
|
|
+ IPVL_SKB_CB(skb)->tx_pkt = tx_pkt;
|
|
|
+
|
|
|
spin_lock(&port->backlog.lock);
|
|
|
if (skb_queue_len(&port->backlog) < IPVLAN_QBACKLOG_LIMIT) {
|
|
|
+ if (skb->dev)
|
|
|
+ dev_hold(skb->dev);
|
|
|
__skb_queue_tail(&port->backlog, skb);
|
|
|
spin_unlock(&port->backlog.lock);
|
|
|
schedule_work(&port->wq);
|
|
@@ -537,7 +553,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
|
|
|
|
|
|
} else if (is_multicast_ether_addr(eth->h_dest)) {
|
|
|
ipvlan_skb_crossing_ns(skb, NULL);
|
|
|
- ipvlan_multicast_enqueue(ipvlan->port, skb);
|
|
|
+ ipvlan_multicast_enqueue(ipvlan->port, skb, true);
|
|
|
return NET_XMIT_SUCCESS;
|
|
|
}
|
|
|
|
|
@@ -634,7 +650,7 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
|
|
|
*/
|
|
|
if (nskb) {
|
|
|
ipvlan_skb_crossing_ns(nskb, NULL);
|
|
|
- ipvlan_multicast_enqueue(port, nskb);
|
|
|
+ ipvlan_multicast_enqueue(port, nskb, false);
|
|
|
}
|
|
|
}
|
|
|
} else {
|