|
@@ -1172,6 +1172,112 @@ out:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+static void udp_rmem_release(struct sock *sk, int size, int partial)
|
|
|
+{
|
|
|
+ int amt;
|
|
|
+
|
|
|
+ atomic_sub(size, &sk->sk_rmem_alloc);
|
|
|
+
|
|
|
+ spin_lock_bh(&sk->sk_receive_queue.lock);
|
|
|
+ sk->sk_forward_alloc += size;
|
|
|
+ amt = (sk->sk_forward_alloc - partial) & ~(SK_MEM_QUANTUM - 1);
|
|
|
+ sk->sk_forward_alloc -= amt;
|
|
|
+ spin_unlock_bh(&sk->sk_receive_queue.lock);
|
|
|
+
|
|
|
+ if (amt)
|
|
|
+ __sk_mem_reduce_allocated(sk, amt >> SK_MEM_QUANTUM_SHIFT);
|
|
|
+}
|
|
|
+
|
|
|
+static void udp_rmem_free(struct sk_buff *skb)
|
|
|
+{
|
|
|
+ udp_rmem_release(skb->sk, skb->truesize, 1);
|
|
|
+}
|
|
|
+
|
|
|
+int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
|
|
|
+{
|
|
|
+ struct sk_buff_head *list = &sk->sk_receive_queue;
|
|
|
+ int rmem, delta, amt, err = -ENOMEM;
|
|
|
+ int size = skb->truesize;
|
|
|
+
|
|
|
+ /* try to avoid the costly atomic add/sub pair when the receive
|
|
|
+ * queue is full; always allow at least a packet
|
|
|
+ */
|
|
|
+ rmem = atomic_read(&sk->sk_rmem_alloc);
|
|
|
+ if (rmem && (rmem + size > sk->sk_rcvbuf))
|
|
|
+ goto drop;
|
|
|
+
|
|
|
+ /* we drop only if the receive buf is full and the receive
|
|
|
+ * queue contains some other skb
|
|
|
+ */
|
|
|
+ rmem = atomic_add_return(size, &sk->sk_rmem_alloc);
|
|
|
+ if ((rmem > sk->sk_rcvbuf) && (rmem > size))
|
|
|
+ goto uncharge_drop;
|
|
|
+
|
|
|
+ spin_lock(&list->lock);
|
|
|
+ if (size >= sk->sk_forward_alloc) {
|
|
|
+ amt = sk_mem_pages(size);
|
|
|
+ delta = amt << SK_MEM_QUANTUM_SHIFT;
|
|
|
+ if (!__sk_mem_raise_allocated(sk, delta, amt, SK_MEM_RECV)) {
|
|
|
+ err = -ENOBUFS;
|
|
|
+ spin_unlock(&list->lock);
|
|
|
+ goto uncharge_drop;
|
|
|
+ }
|
|
|
+
|
|
|
+ sk->sk_forward_alloc += delta;
|
|
|
+ }
|
|
|
+
|
|
|
+ sk->sk_forward_alloc -= size;
|
|
|
+
|
|
|
+ /* the skb owner in now the udp socket */
|
|
|
+ skb->sk = sk;
|
|
|
+ skb->destructor = udp_rmem_free;
|
|
|
+ skb->dev = NULL;
|
|
|
+ sock_skb_set_dropcount(sk, skb);
|
|
|
+
|
|
|
+ __skb_queue_tail(list, skb);
|
|
|
+ spin_unlock(&list->lock);
|
|
|
+
|
|
|
+ if (!sock_flag(sk, SOCK_DEAD))
|
|
|
+ sk->sk_data_ready(sk);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+
|
|
|
+uncharge_drop:
|
|
|
+ atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
|
|
|
+
|
|
|
+drop:
|
|
|
+ atomic_inc(&sk->sk_drops);
|
|
|
+ return err;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(__udp_enqueue_schedule_skb);
|
|
|
+
|
|
|
+static void udp_destruct_sock(struct sock *sk)
|
|
|
+{
|
|
|
+ /* reclaim completely the forward allocated memory */
|
|
|
+ __skb_queue_purge(&sk->sk_receive_queue);
|
|
|
+ udp_rmem_release(sk, 0, 0);
|
|
|
+ inet_sock_destruct(sk);
|
|
|
+}
|
|
|
+
|
|
|
+int udp_init_sock(struct sock *sk)
|
|
|
+{
|
|
|
+ sk->sk_destruct = udp_destruct_sock;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(udp_init_sock);
|
|
|
+
|
|
|
+void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len)
|
|
|
+{
|
|
|
+ if (unlikely(READ_ONCE(sk->sk_peek_off) >= 0)) {
|
|
|
+ bool slow = lock_sock_fast(sk);
|
|
|
+
|
|
|
+ sk_peek_offset_bwd(sk, len);
|
|
|
+ unlock_sock_fast(sk, slow);
|
|
|
+ }
|
|
|
+ consume_skb(skb);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(skb_consume_udp);
|
|
|
+
|
|
|
/**
|
|
|
* first_packet_length - return length of first packet in receive queue
|
|
|
* @sk: socket
|