|
@@ -121,12 +121,13 @@ static void caif_flow_ctrl(struct sock *sk, int mode)
|
|
|
* Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are
|
|
|
* not dropped, but CAIF is sending flow off instead.
|
|
|
*/
|
|
|
-static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
|
|
+static void caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
|
|
{
|
|
|
int err;
|
|
|
unsigned long flags;
|
|
|
struct sk_buff_head *list = &sk->sk_receive_queue;
|
|
|
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
|
|
|
+ bool queued = false;
|
|
|
|
|
|
if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
|
|
|
(unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
|
|
@@ -139,7 +140,8 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
|
|
|
|
|
err = sk_filter(sk, skb);
|
|
|
if (err)
|
|
|
- return err;
|
|
|
+ goto out;
|
|
|
+
|
|
|
if (!sk_rmem_schedule(sk, skb, skb->truesize) && rx_flow_is_on(cf_sk)) {
|
|
|
set_rx_flow_off(cf_sk);
|
|
|
net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n");
|
|
@@ -147,21 +149,16 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
|
|
}
|
|
|
skb->dev = NULL;
|
|
|
skb_set_owner_r(skb, sk);
|
|
|
- /* Cache the SKB length before we tack it onto the receive
|
|
|
- * queue. Once it is added it no longer belongs to us and
|
|
|
- * may be freed by other threads of control pulling packets
|
|
|
- * from the queue.
|
|
|
- */
|
|
|
spin_lock_irqsave(&list->lock, flags);
|
|
|
- if (!sock_flag(sk, SOCK_DEAD))
|
|
|
+ queued = !sock_flag(sk, SOCK_DEAD);
|
|
|
+ if (queued)
|
|
|
__skb_queue_tail(list, skb);
|
|
|
spin_unlock_irqrestore(&list->lock, flags);
|
|
|
-
|
|
|
- if (!sock_flag(sk, SOCK_DEAD))
|
|
|
+out:
|
|
|
+ if (queued)
|
|
|
sk->sk_data_ready(sk);
|
|
|
else
|
|
|
kfree_skb(skb);
|
|
|
- return 0;
|
|
|
}
|
|
|
|
|
|
/* Packet Receive Callback function called from CAIF Stack */
|