|
@@ -1647,7 +1647,8 @@ static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
|
|
|
static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
|
|
|
struct udphdr *uh,
|
|
|
__be32 saddr, __be32 daddr,
|
|
|
- struct udp_table *udptable)
|
|
|
+ struct udp_table *udptable,
|
|
|
+ int proto)
|
|
|
{
|
|
|
struct sock *sk, *stack[256 / sizeof(struct sock *)];
|
|
|
struct hlist_nulls_node *node;
|
|
@@ -1656,6 +1657,7 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
|
|
|
int dif = skb->dev->ifindex;
|
|
|
unsigned int count = 0, offset = offsetof(typeof(*sk), sk_nulls_node);
|
|
|
unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
|
|
|
+ bool inner_flushed = false;
|
|
|
|
|
|
if (use_hash2) {
|
|
|
hash2_any = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum) &
|
|
@@ -1674,6 +1676,7 @@ start_lookup:
|
|
|
dif, hnum)) {
|
|
|
if (unlikely(count == ARRAY_SIZE(stack))) {
|
|
|
flush_stack(stack, count, skb, ~0);
|
|
|
+ inner_flushed = true;
|
|
|
count = 0;
|
|
|
}
|
|
|
stack[count++] = sk;
|
|
@@ -1695,7 +1698,10 @@ start_lookup:
|
|
|
if (count) {
|
|
|
flush_stack(stack, count, skb, count - 1);
|
|
|
} else {
|
|
|
- kfree_skb(skb);
|
|
|
+ if (!inner_flushed)
|
|
|
+ UDP_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI,
|
|
|
+ proto == IPPROTO_UDPLITE);
|
|
|
+ consume_skb(skb);
|
|
|
}
|
|
|
return 0;
|
|
|
}
|
|
@@ -1781,7 +1787,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
|
|
|
|
|
|
if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
|
|
|
return __udp4_lib_mcast_deliver(net, skb, uh,
|
|
|
- saddr, daddr, udptable);
|
|
|
+ saddr, daddr, udptable, proto);
|
|
|
|
|
|
sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
|
|
|
if (sk != NULL) {
|