|
@@ -2539,7 +2539,7 @@ static inline int skb_needs_linearize(struct sk_buff *skb,
|
|
|
}
|
|
|
|
|
|
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
|
|
|
- struct netdev_queue *txq, void *accel_priv)
|
|
|
+ struct netdev_queue *txq)
|
|
|
{
|
|
|
const struct net_device_ops *ops = dev->netdev_ops;
|
|
|
int rc = NETDEV_TX_OK;
|
|
@@ -2605,13 +2605,10 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
|
|
|
dev_queue_xmit_nit(skb, dev);
|
|
|
|
|
|
skb_len = skb->len;
|
|
|
- if (accel_priv)
|
|
|
- rc = ops->ndo_dfwd_start_xmit(skb, dev, accel_priv);
|
|
|
- else
|
|
|
rc = ops->ndo_start_xmit(skb, dev);
|
|
|
|
|
|
trace_net_dev_xmit(skb, rc, dev, skb_len);
|
|
|
- if (rc == NETDEV_TX_OK && txq)
|
|
|
+ if (rc == NETDEV_TX_OK)
|
|
|
txq_trans_update(txq);
|
|
|
return rc;
|
|
|
}
|
|
@@ -2627,10 +2624,7 @@ gso:
|
|
|
dev_queue_xmit_nit(nskb, dev);
|
|
|
|
|
|
skb_len = nskb->len;
|
|
|
- if (accel_priv)
|
|
|
- rc = ops->ndo_dfwd_start_xmit(nskb, dev, accel_priv);
|
|
|
- else
|
|
|
- rc = ops->ndo_start_xmit(nskb, dev);
|
|
|
+ rc = ops->ndo_start_xmit(nskb, dev);
|
|
|
trace_net_dev_xmit(nskb, rc, dev, skb_len);
|
|
|
if (unlikely(rc != NETDEV_TX_OK)) {
|
|
|
if (rc & ~NETDEV_TX_MASK)
|
|
@@ -2811,7 +2805,7 @@ EXPORT_SYMBOL(dev_loopback_xmit);
|
|
|
* the BH enable code must have IRQs enabled so that it will not deadlock.
|
|
|
* --BLG
|
|
|
*/
|
|
|
-int dev_queue_xmit(struct sk_buff *skb)
|
|
|
+int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
|
|
|
{
|
|
|
struct net_device *dev = skb->dev;
|
|
|
struct netdev_queue *txq;
|
|
@@ -2827,7 +2821,7 @@ int dev_queue_xmit(struct sk_buff *skb)
|
|
|
|
|
|
skb_update_prio(skb);
|
|
|
|
|
|
- txq = netdev_pick_tx(dev, skb);
|
|
|
+ txq = netdev_pick_tx(dev, skb, accel_priv);
|
|
|
q = rcu_dereference_bh(txq->qdisc);
|
|
|
|
|
|
#ifdef CONFIG_NET_CLS_ACT
|
|
@@ -2863,7 +2857,7 @@ int dev_queue_xmit(struct sk_buff *skb)
|
|
|
|
|
|
if (!netif_xmit_stopped(txq)) {
|
|
|
__this_cpu_inc(xmit_recursion);
|
|
|
- rc = dev_hard_start_xmit(skb, dev, txq, NULL);
|
|
|
+ rc = dev_hard_start_xmit(skb, dev, txq);
|
|
|
__this_cpu_dec(xmit_recursion);
|
|
|
if (dev_xmit_complete(rc)) {
|
|
|
HARD_TX_UNLOCK(dev, txq);
|
|
@@ -2892,8 +2886,19 @@ out:
|
|
|
rcu_read_unlock_bh();
|
|
|
return rc;
|
|
|
}
|
|
|
+
|
|
|
+int dev_queue_xmit(struct sk_buff *skb)
|
|
|
+{
|
|
|
+ return __dev_queue_xmit(skb, NULL);
|
|
|
+}
|
|
|
EXPORT_SYMBOL(dev_queue_xmit);
|
|
|
|
|
|
+int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
|
|
|
+{
|
|
|
+ return __dev_queue_xmit(skb, accel_priv);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(dev_queue_xmit_accel);
|
|
|
+
|
|
|
|
|
|
/*=======================================================================
|
|
|
Receiver routines
|