|
@@ -2739,7 +2739,8 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
|
|
|
|
|
|
qdisc_bstats_update(q, skb);
|
|
qdisc_bstats_update(q, skb);
|
|
|
|
|
|
- if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
|
|
|
|
|
|
+ skb = validate_xmit_skb(skb, dev);
|
|
|
|
+ if (skb && sch_direct_xmit(skb, q, dev, txq, root_lock)) {
|
|
if (unlikely(contended)) {
|
|
if (unlikely(contended)) {
|
|
spin_unlock(&q->busylock);
|
|
spin_unlock(&q->busylock);
|
|
contended = false;
|
|
contended = false;
|
|
@@ -2879,6 +2880,10 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
|
|
if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
|
|
if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
|
|
goto recursion_alert;
|
|
goto recursion_alert;
|
|
|
|
|
|
|
|
+ skb = validate_xmit_skb(skb, dev);
|
|
|
|
+ if (!skb)
|
|
|
|
+ goto drop;
|
|
|
|
+
|
|
HARD_TX_LOCK(dev, txq, cpu);
|
|
HARD_TX_LOCK(dev, txq, cpu);
|
|
|
|
|
|
if (!netif_xmit_stopped(txq)) {
|
|
if (!netif_xmit_stopped(txq)) {
|
|
@@ -2904,10 +2909,11 @@ recursion_alert:
|
|
}
|
|
}
|
|
|
|
|
|
rc = -ENETDOWN;
|
|
rc = -ENETDOWN;
|
|
|
|
+drop:
|
|
rcu_read_unlock_bh();
|
|
rcu_read_unlock_bh();
|
|
|
|
|
|
atomic_long_inc(&dev->tx_dropped);
|
|
atomic_long_inc(&dev->tx_dropped);
|
|
- kfree_skb(skb);
|
|
|
|
|
|
+ kfree_skb_list(skb);
|
|
return rc;
|
|
return rc;
|
|
out:
|
|
out:
|
|
rcu_read_unlock_bh();
|
|
rcu_read_unlock_bh();
|