|
@@ -1307,13 +1307,7 @@ static int first_packet_length(struct sock *sk)
|
|
res = skb ? skb->len : -1;
|
|
res = skb ? skb->len : -1;
|
|
spin_unlock_bh(&rcvq->lock);
|
|
spin_unlock_bh(&rcvq->lock);
|
|
|
|
|
|
- if (!skb_queue_empty(&list_kill)) {
|
|
|
|
- bool slow = lock_sock_fast(sk);
|
|
|
|
-
|
|
|
|
- __skb_queue_purge(&list_kill);
|
|
|
|
- sk_mem_reclaim_partial(sk);
|
|
|
|
- unlock_sock_fast(sk, slow);
|
|
|
|
- }
|
|
|
|
|
|
+ __skb_queue_purge(&list_kill);
|
|
return res;
|
|
return res;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1362,7 +1356,6 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
|
|
int err;
|
|
int err;
|
|
int is_udplite = IS_UDPLITE(sk);
|
|
int is_udplite = IS_UDPLITE(sk);
|
|
bool checksum_valid = false;
|
|
bool checksum_valid = false;
|
|
- bool slow;
|
|
|
|
|
|
|
|
if (flags & MSG_ERRQUEUE)
|
|
if (flags & MSG_ERRQUEUE)
|
|
return ip_recv_error(sk, msg, len, addr_len);
|
|
return ip_recv_error(sk, msg, len, addr_len);
|
|
@@ -1403,13 +1396,12 @@ try_again:
|
|
}
|
|
}
|
|
|
|
|
|
if (unlikely(err)) {
|
|
if (unlikely(err)) {
|
|
- trace_kfree_skb(skb, udp_recvmsg);
|
|
|
|
if (!peeked) {
|
|
if (!peeked) {
|
|
atomic_inc(&sk->sk_drops);
|
|
atomic_inc(&sk->sk_drops);
|
|
UDP_INC_STATS(sock_net(sk),
|
|
UDP_INC_STATS(sock_net(sk),
|
|
UDP_MIB_INERRORS, is_udplite);
|
|
UDP_MIB_INERRORS, is_udplite);
|
|
}
|
|
}
|
|
- skb_free_datagram_locked(sk, skb);
|
|
|
|
|
|
+ kfree_skb(skb);
|
|
return err;
|
|
return err;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1434,16 +1426,15 @@ try_again:
|
|
if (flags & MSG_TRUNC)
|
|
if (flags & MSG_TRUNC)
|
|
err = ulen;
|
|
err = ulen;
|
|
|
|
|
|
- __skb_free_datagram_locked(sk, skb, peeking ? -err : err);
|
|
|
|
|
|
+ skb_consume_udp(sk, skb, peeking ? -err : err);
|
|
return err;
|
|
return err;
|
|
|
|
|
|
csum_copy_err:
|
|
csum_copy_err:
|
|
- slow = lock_sock_fast(sk);
|
|
|
|
- if (!skb_kill_datagram(sk, skb, flags)) {
|
|
|
|
|
|
+ if (!__sk_queue_drop_skb(sk, skb, flags)) {
|
|
UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
|
|
UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
|
|
UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
|
|
UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
|
|
}
|
|
}
|
|
- unlock_sock_fast(sk, slow);
|
|
|
|
|
|
+ kfree_skb(skb);
|
|
|
|
|
|
/* starting over for a new packet, but check if we need to yield */
|
|
/* starting over for a new packet, but check if we need to yield */
|
|
cond_resched();
|
|
cond_resched();
|
|
@@ -1562,7 +1553,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
|
sk_incoming_cpu_update(sk);
|
|
sk_incoming_cpu_update(sk);
|
|
}
|
|
}
|
|
|
|
|
|
- rc = __sock_queue_rcv_skb(sk, skb);
|
|
|
|
|
|
+ rc = __udp_enqueue_schedule_skb(sk, skb);
|
|
if (rc < 0) {
|
|
if (rc < 0) {
|
|
int is_udplite = IS_UDPLITE(sk);
|
|
int is_udplite = IS_UDPLITE(sk);
|
|
|
|
|
|
@@ -1577,7 +1568,6 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
|
}
|
|
}
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
-
|
|
|
|
}
|
|
}
|
|
|
|
|
|
static struct static_key udp_encap_needed __read_mostly;
|
|
static struct static_key udp_encap_needed __read_mostly;
|
|
@@ -1599,7 +1589,6 @@ EXPORT_SYMBOL(udp_encap_enable);
|
|
int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
|
int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
|
{
|
|
{
|
|
struct udp_sock *up = udp_sk(sk);
|
|
struct udp_sock *up = udp_sk(sk);
|
|
- int rc;
|
|
|
|
int is_udplite = IS_UDPLITE(sk);
|
|
int is_udplite = IS_UDPLITE(sk);
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -1686,25 +1675,9 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
|
goto drop;
|
|
goto drop;
|
|
|
|
|
|
udp_csum_pull_header(skb);
|
|
udp_csum_pull_header(skb);
|
|
- if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
|
|
|
|
- __UDP_INC_STATS(sock_net(sk), UDP_MIB_RCVBUFERRORS,
|
|
|
|
- is_udplite);
|
|
|
|
- goto drop;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- rc = 0;
|
|
|
|
|
|
|
|
ipv4_pktinfo_prepare(sk, skb);
|
|
ipv4_pktinfo_prepare(sk, skb);
|
|
- bh_lock_sock(sk);
|
|
|
|
- if (!sock_owned_by_user(sk))
|
|
|
|
- rc = __udp_queue_rcv_skb(sk, skb);
|
|
|
|
- else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
|
|
|
|
- bh_unlock_sock(sk);
|
|
|
|
- goto drop;
|
|
|
|
- }
|
|
|
|
- bh_unlock_sock(sk);
|
|
|
|
-
|
|
|
|
- return rc;
|
|
|
|
|
|
+ return __udp_queue_rcv_skb(sk, skb);
|
|
|
|
|
|
csum_error:
|
|
csum_error:
|
|
__UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
|
|
__UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
|
|
@@ -2314,13 +2287,13 @@ struct proto udp_prot = {
|
|
.connect = ip4_datagram_connect,
|
|
.connect = ip4_datagram_connect,
|
|
.disconnect = udp_disconnect,
|
|
.disconnect = udp_disconnect,
|
|
.ioctl = udp_ioctl,
|
|
.ioctl = udp_ioctl,
|
|
|
|
+ .init = udp_init_sock,
|
|
.destroy = udp_destroy_sock,
|
|
.destroy = udp_destroy_sock,
|
|
.setsockopt = udp_setsockopt,
|
|
.setsockopt = udp_setsockopt,
|
|
.getsockopt = udp_getsockopt,
|
|
.getsockopt = udp_getsockopt,
|
|
.sendmsg = udp_sendmsg,
|
|
.sendmsg = udp_sendmsg,
|
|
.recvmsg = udp_recvmsg,
|
|
.recvmsg = udp_recvmsg,
|
|
.sendpage = udp_sendpage,
|
|
.sendpage = udp_sendpage,
|
|
- .backlog_rcv = __udp_queue_rcv_skb,
|
|
|
|
.release_cb = ip4_datagram_release_cb,
|
|
.release_cb = ip4_datagram_release_cb,
|
|
.hash = udp_lib_hash,
|
|
.hash = udp_lib_hash,
|
|
.unhash = udp_lib_unhash,
|
|
.unhash = udp_lib_unhash,
|