|
@@ -738,15 +738,17 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
struct tun_struct *tun = netdev_priv(dev);
|
|
struct tun_struct *tun = netdev_priv(dev);
|
|
int txq = skb->queue_mapping;
|
|
int txq = skb->queue_mapping;
|
|
struct tun_file *tfile;
|
|
struct tun_file *tfile;
|
|
|
|
+ u32 numqueues = 0;
|
|
|
|
|
|
rcu_read_lock();
|
|
rcu_read_lock();
|
|
tfile = rcu_dereference(tun->tfiles[txq]);
|
|
tfile = rcu_dereference(tun->tfiles[txq]);
|
|
|
|
+ numqueues = ACCESS_ONCE(tun->numqueues);
|
|
|
|
|
|
/* Drop packet if interface is not attached */
|
|
/* Drop packet if interface is not attached */
|
|
- if (txq >= tun->numqueues)
|
|
|
|
|
|
+ if (txq >= numqueues)
|
|
goto drop;
|
|
goto drop;
|
|
|
|
|
|
- if (tun->numqueues == 1) {
|
|
|
|
|
|
+ if (numqueues == 1) {
|
|
/* Select queue was not called for the skbuff, so we extract the
|
|
/* Select queue was not called for the skbuff, so we extract the
|
|
* RPS hash and save it into the flow_table here.
|
|
* RPS hash and save it into the flow_table here.
|
|
*/
|
|
*/
|
|
@@ -779,8 +781,8 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
/* Limit the number of packets queued by dividing txq length with the
|
|
/* Limit the number of packets queued by dividing txq length with the
|
|
* number of queues.
|
|
* number of queues.
|
|
*/
|
|
*/
|
|
- if (skb_queue_len(&tfile->socket.sk->sk_receive_queue)
|
|
|
|
- >= dev->tx_queue_len / tun->numqueues)
|
|
|
|
|
|
+ if (skb_queue_len(&tfile->socket.sk->sk_receive_queue) * numqueues
|
|
|
|
+ >= dev->tx_queue_len)
|
|
goto drop;
|
|
goto drop;
|
|
|
|
|
|
if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
|
|
if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
|