|
@@ -2485,52 +2485,6 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-struct dev_gso_cb {
|
|
|
|
- void (*destructor)(struct sk_buff *skb);
|
|
|
|
-};
|
|
|
|
-
|
|
|
|
-#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
|
|
|
|
-
|
|
|
|
-static void dev_gso_skb_destructor(struct sk_buff *skb)
|
|
|
|
-{
|
|
|
|
- struct dev_gso_cb *cb;
|
|
|
|
-
|
|
|
|
- kfree_skb_list(skb->next);
|
|
|
|
- skb->next = NULL;
|
|
|
|
-
|
|
|
|
- cb = DEV_GSO_CB(skb);
|
|
|
|
- if (cb->destructor)
|
|
|
|
- cb->destructor(skb);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/**
|
|
|
|
- * dev_gso_segment - Perform emulated hardware segmentation on skb.
|
|
|
|
- * @skb: buffer to segment
|
|
|
|
- * @features: device features as applicable to this skb
|
|
|
|
- *
|
|
|
|
- * This function segments the given skb and stores the list of segments
|
|
|
|
- * in skb->next.
|
|
|
|
- */
|
|
|
|
-static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
|
|
|
|
-{
|
|
|
|
- struct sk_buff *segs;
|
|
|
|
-
|
|
|
|
- segs = skb_gso_segment(skb, features);
|
|
|
|
-
|
|
|
|
- /* Verifying header integrity only. */
|
|
|
|
- if (!segs)
|
|
|
|
- return 0;
|
|
|
|
-
|
|
|
|
- if (IS_ERR(segs))
|
|
|
|
- return PTR_ERR(segs);
|
|
|
|
-
|
|
|
|
- skb->next = segs;
|
|
|
|
- DEV_GSO_CB(skb)->destructor = skb->destructor;
|
|
|
|
- skb->destructor = dev_gso_skb_destructor;
|
|
|
|
-
|
|
|
|
- return 0;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/* If MPLS offload request, verify we are testing hardware MPLS features
|
|
/* If MPLS offload request, verify we are testing hardware MPLS features
|
|
* instead of standard features for the netdev.
|
|
* instead of standard features for the netdev.
|
|
*/
|
|
*/
|
|
@@ -2599,118 +2553,125 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(netif_skb_features);
|
|
EXPORT_SYMBOL(netif_skb_features);
|
|
|
|
|
|
-int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
|
|
|
|
- struct netdev_queue *txq)
|
|
|
|
|
|
+static int xmit_one(struct sk_buff *skb, struct net_device *dev,
|
|
|
|
+ struct netdev_queue *txq, bool more)
|
|
|
|
+{
|
|
|
|
+ unsigned int len;
|
|
|
|
+ int rc;
|
|
|
|
+
|
|
|
|
+ if (!list_empty(&ptype_all))
|
|
|
|
+ dev_queue_xmit_nit(skb, dev);
|
|
|
|
+
|
|
|
|
+ len = skb->len;
|
|
|
|
+ trace_net_dev_start_xmit(skb, dev);
|
|
|
|
+ rc = netdev_start_xmit(skb, dev, txq, more);
|
|
|
|
+ trace_net_dev_xmit(skb, rc, dev, len);
|
|
|
|
+
|
|
|
|
+ return rc;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
|
|
|
|
+ struct netdev_queue *txq, int *ret)
|
|
{
|
|
{
|
|
|
|
+ struct sk_buff *skb = first;
|
|
int rc = NETDEV_TX_OK;
|
|
int rc = NETDEV_TX_OK;
|
|
- unsigned int skb_len;
|
|
|
|
|
|
|
|
- if (likely(!skb->next)) {
|
|
|
|
- netdev_features_t features;
|
|
|
|
|
|
+ while (skb) {
|
|
|
|
+ struct sk_buff *next = skb->next;
|
|
|
|
|
|
- /*
|
|
|
|
- * If device doesn't need skb->dst, release it right now while
|
|
|
|
- * its hot in this cpu cache
|
|
|
|
- */
|
|
|
|
- if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
|
|
|
|
- skb_dst_drop(skb);
|
|
|
|
|
|
+ skb->next = NULL;
|
|
|
|
+ rc = xmit_one(skb, dev, txq, next != NULL);
|
|
|
|
+ if (unlikely(!dev_xmit_complete(rc))) {
|
|
|
|
+ skb->next = next;
|
|
|
|
+ goto out;
|
|
|
|
+ }
|
|
|
|
|
|
- features = netif_skb_features(skb);
|
|
|
|
|
|
+ skb = next;
|
|
|
|
+ if (netif_xmit_stopped(txq) && skb) {
|
|
|
|
+ rc = NETDEV_TX_BUSY;
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
|
|
- if (vlan_tx_tag_present(skb) &&
|
|
|
|
- !vlan_hw_offload_capable(features, skb->vlan_proto)) {
|
|
|
|
- skb = __vlan_put_tag(skb, skb->vlan_proto,
|
|
|
|
- vlan_tx_tag_get(skb));
|
|
|
|
- if (unlikely(!skb))
|
|
|
|
- goto out;
|
|
|
|
|
|
+out:
|
|
|
|
+ *ret = rc;
|
|
|
|
+ return skb;
|
|
|
|
+}
|
|
|
|
|
|
|
|
+struct sk_buff *validate_xmit_vlan(struct sk_buff *skb, netdev_features_t features)
|
|
|
|
+{
|
|
|
|
+ if (vlan_tx_tag_present(skb) &&
|
|
|
|
+ !vlan_hw_offload_capable(features, skb->vlan_proto)) {
|
|
|
|
+ skb = __vlan_put_tag(skb, skb->vlan_proto,
|
|
|
|
+ vlan_tx_tag_get(skb));
|
|
|
|
+ if (skb)
|
|
skb->vlan_tci = 0;
|
|
skb->vlan_tci = 0;
|
|
- }
|
|
|
|
|
|
+ }
|
|
|
|
+ return skb;
|
|
|
|
+}
|
|
|
|
|
|
- /* If encapsulation offload request, verify we are testing
|
|
|
|
- * hardware encapsulation features instead of standard
|
|
|
|
- * features for the netdev
|
|
|
|
- */
|
|
|
|
- if (skb->encapsulation)
|
|
|
|
- features &= dev->hw_enc_features;
|
|
|
|
|
|
+struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
|
|
|
|
+{
|
|
|
|
+ netdev_features_t features;
|
|
|
|
|
|
- if (netif_needs_gso(skb, features)) {
|
|
|
|
- if (unlikely(dev_gso_segment(skb, features)))
|
|
|
|
- goto out_kfree_skb;
|
|
|
|
- if (skb->next)
|
|
|
|
- goto gso;
|
|
|
|
- } else {
|
|
|
|
- if (skb_needs_linearize(skb, features) &&
|
|
|
|
- __skb_linearize(skb))
|
|
|
|
- goto out_kfree_skb;
|
|
|
|
|
|
+ if (skb->next)
|
|
|
|
+ return skb;
|
|
|
|
|
|
- /* If packet is not checksummed and device does not
|
|
|
|
- * support checksumming for this protocol, complete
|
|
|
|
- * checksumming here.
|
|
|
|
- */
|
|
|
|
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
|
|
|
- if (skb->encapsulation)
|
|
|
|
- skb_set_inner_transport_header(skb,
|
|
|
|
- skb_checksum_start_offset(skb));
|
|
|
|
- else
|
|
|
|
- skb_set_transport_header(skb,
|
|
|
|
- skb_checksum_start_offset(skb));
|
|
|
|
- if (!(features & NETIF_F_ALL_CSUM) &&
|
|
|
|
- skb_checksum_help(skb))
|
|
|
|
- goto out_kfree_skb;
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
|
|
+ /* If device doesn't need skb->dst, release it right now while
|
|
|
|
+ * its hot in this cpu cache
|
|
|
|
+ */
|
|
|
|
+ if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
|
|
|
|
+ skb_dst_drop(skb);
|
|
|
|
|
|
- if (!list_empty(&ptype_all))
|
|
|
|
- dev_queue_xmit_nit(skb, dev);
|
|
|
|
|
|
+ features = netif_skb_features(skb);
|
|
|
|
+ skb = validate_xmit_vlan(skb, features);
|
|
|
|
+ if (unlikely(!skb))
|
|
|
|
+ goto out_null;
|
|
|
|
|
|
- skb_len = skb->len;
|
|
|
|
- trace_net_dev_start_xmit(skb, dev);
|
|
|
|
- rc = netdev_start_xmit(skb, dev);
|
|
|
|
- trace_net_dev_xmit(skb, rc, dev, skb_len);
|
|
|
|
- if (rc == NETDEV_TX_OK)
|
|
|
|
- txq_trans_update(txq);
|
|
|
|
- return rc;
|
|
|
|
- }
|
|
|
|
|
|
+ /* If encapsulation offload request, verify we are testing
|
|
|
|
+ * hardware encapsulation features instead of standard
|
|
|
|
+ * features for the netdev
|
|
|
|
+ */
|
|
|
|
+ if (skb->encapsulation)
|
|
|
|
+ features &= dev->hw_enc_features;
|
|
|
|
|
|
-gso:
|
|
|
|
- do {
|
|
|
|
- struct sk_buff *nskb = skb->next;
|
|
|
|
|
|
+ if (netif_needs_gso(skb, features)) {
|
|
|
|
+ struct sk_buff *segs;
|
|
|
|
|
|
- skb->next = nskb->next;
|
|
|
|
- nskb->next = NULL;
|
|
|
|
|
|
+ segs = skb_gso_segment(skb, features);
|
|
|
|
+ kfree_skb(skb);
|
|
|
|
+ if (IS_ERR(segs))
|
|
|
|
+ segs = NULL;
|
|
|
|
+ skb = segs;
|
|
|
|
+ } else {
|
|
|
|
+ if (skb_needs_linearize(skb, features) &&
|
|
|
|
+ __skb_linearize(skb))
|
|
|
|
+ goto out_kfree_skb;
|
|
|
|
|
|
- if (!list_empty(&ptype_all))
|
|
|
|
- dev_queue_xmit_nit(nskb, dev);
|
|
|
|
-
|
|
|
|
- skb_len = nskb->len;
|
|
|
|
- trace_net_dev_start_xmit(nskb, dev);
|
|
|
|
- rc = netdev_start_xmit(nskb, dev);
|
|
|
|
- trace_net_dev_xmit(nskb, rc, dev, skb_len);
|
|
|
|
- if (unlikely(rc != NETDEV_TX_OK)) {
|
|
|
|
- if (rc & ~NETDEV_TX_MASK)
|
|
|
|
- goto out_kfree_gso_skb;
|
|
|
|
- nskb->next = skb->next;
|
|
|
|
- skb->next = nskb;
|
|
|
|
- return rc;
|
|
|
|
|
|
+ /* If packet is not checksummed and device does not
|
|
|
|
+ * support checksumming for this protocol, complete
|
|
|
|
+ * checksumming here.
|
|
|
|
+ */
|
|
|
|
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
|
|
|
+ if (skb->encapsulation)
|
|
|
|
+ skb_set_inner_transport_header(skb,
|
|
|
|
+ skb_checksum_start_offset(skb));
|
|
|
|
+ else
|
|
|
|
+ skb_set_transport_header(skb,
|
|
|
|
+ skb_checksum_start_offset(skb));
|
|
|
|
+ if (!(features & NETIF_F_ALL_CSUM) &&
|
|
|
|
+ skb_checksum_help(skb))
|
|
|
|
+ goto out_kfree_skb;
|
|
}
|
|
}
|
|
- txq_trans_update(txq);
|
|
|
|
- if (unlikely(netif_xmit_stopped(txq) && skb->next))
|
|
|
|
- return NETDEV_TX_BUSY;
|
|
|
|
- } while (skb->next);
|
|
|
|
-
|
|
|
|
-out_kfree_gso_skb:
|
|
|
|
- if (likely(skb->next == NULL)) {
|
|
|
|
- skb->destructor = DEV_GSO_CB(skb)->destructor;
|
|
|
|
- consume_skb(skb);
|
|
|
|
- return rc;
|
|
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+ return skb;
|
|
|
|
+
|
|
out_kfree_skb:
|
|
out_kfree_skb:
|
|
kfree_skb(skb);
|
|
kfree_skb(skb);
|
|
-out:
|
|
|
|
- return rc;
|
|
|
|
|
|
+out_null:
|
|
|
|
+ return NULL;
|
|
}
|
|
}
|
|
-EXPORT_SYMBOL_GPL(dev_hard_start_xmit);
|
|
|
|
|
|
|
|
static void qdisc_pkt_len_init(struct sk_buff *skb)
|
|
static void qdisc_pkt_len_init(struct sk_buff *skb)
|
|
{
|
|
{
|
|
@@ -2922,7 +2883,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
|
|
|
|
|
|
if (!netif_xmit_stopped(txq)) {
|
|
if (!netif_xmit_stopped(txq)) {
|
|
__this_cpu_inc(xmit_recursion);
|
|
__this_cpu_inc(xmit_recursion);
|
|
- rc = dev_hard_start_xmit(skb, dev, txq);
|
|
|
|
|
|
+ skb = dev_hard_start_xmit(skb, dev, txq, &rc);
|
|
__this_cpu_dec(xmit_recursion);
|
|
__this_cpu_dec(xmit_recursion);
|
|
if (dev_xmit_complete(rc)) {
|
|
if (dev_xmit_complete(rc)) {
|
|
HARD_TX_UNLOCK(dev, txq);
|
|
HARD_TX_UNLOCK(dev, txq);
|