|
|
@@ -2665,12 +2665,6 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
|
|
|
if (skb->next)
|
|
|
return skb;
|
|
|
|
|
|
- /* If device doesn't need skb->dst, release it right now while
|
|
|
- * its hot in this cpu cache
|
|
|
- */
|
|
|
- if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
|
|
|
- skb_dst_drop(skb);
|
|
|
-
|
|
|
features = netif_skb_features(skb);
|
|
|
skb = validate_xmit_vlan(skb, features);
|
|
|
if (unlikely(!skb))
|
|
|
@@ -2811,8 +2805,6 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
|
|
|
* waiting to be sent out; and the qdisc is not running -
|
|
|
* xmit the skb directly.
|
|
|
*/
|
|
|
- if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
|
|
|
- skb_dst_force(skb);
|
|
|
|
|
|
qdisc_bstats_update(q, skb);
|
|
|
|
|
|
@@ -2827,7 +2819,6 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
|
|
|
|
|
|
rc = NET_XMIT_SUCCESS;
|
|
|
} else {
|
|
|
- skb_dst_force(skb);
|
|
|
rc = q->enqueue(skb, q) & NET_XMIT_MASK;
|
|
|
if (qdisc_run_begin(q)) {
|
|
|
if (unlikely(contended)) {
|
|
|
@@ -2924,6 +2915,14 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
|
|
|
|
|
|
skb_update_prio(skb);
|
|
|
|
|
|
+ /* If device/qdisc don't need skb->dst, release it right now while
|
|
|
+ * its hot in this cpu cache.
|
|
|
+ */
|
|
|
+ if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
|
|
|
+ skb_dst_drop(skb);
|
|
|
+ else
|
|
|
+ skb_dst_force(skb);
|
|
|
+
|
|
|
txq = netdev_pick_tx(dev, skb, accel_priv);
|
|
|
q = rcu_dereference_bh(txq->qdisc);
|
|
|
|
|
|
@@ -6674,7 +6673,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
|
|
|
INIT_LIST_HEAD(&dev->adj_list.lower);
|
|
|
INIT_LIST_HEAD(&dev->all_adj_list.upper);
|
|
|
INIT_LIST_HEAD(&dev->all_adj_list.lower);
|
|
|
- dev->priv_flags = IFF_XMIT_DST_RELEASE;
|
|
|
+ dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
|
|
|
setup(dev);
|
|
|
|
|
|
dev->num_tx_queues = txqs;
|