|
@@ -19,6 +19,11 @@
|
|
|
#include <linux/mii.h>
|
|
|
#include <linux/kthread.h>
|
|
|
#include <linux/slab.h>
|
|
|
+#include <linux/if_vlan.h>
|
|
|
+#include <linux/netdevice.h>
|
|
|
+#include <linux/netdev_features.h>
|
|
|
+#include <linux/skbuff.h>
|
|
|
+
|
|
|
#include <net/iucv/af_iucv.h>
|
|
|
#include <net/dsfield.h>
|
|
|
|
|
@@ -6438,6 +6443,32 @@ netdev_features_t qeth_fix_features(struct net_device *dev,
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(qeth_fix_features);
|
|
|
|
|
|
+netdev_features_t qeth_features_check(struct sk_buff *skb,
|
|
|
+ struct net_device *dev,
|
|
|
+ netdev_features_t features)
|
|
|
+{
|
|
|
+ /* GSO segmentation builds skbs with
|
|
|
+ * a (small) linear part for the headers, and
|
|
|
+ * page frags for the data.
|
|
|
+ * Compared to a linear skb, the header-only part consumes an
|
|
|
+ * additional buffer element. This reduces buffer utilization, and
|
|
|
+ * hurts throughput. So compress small segments into one element.
|
|
|
+ */
|
|
|
+ if (netif_needs_gso(skb, features)) {
|
|
|
+ /* match skb_segment(): */
|
|
|
+ unsigned int doffset = skb->data - skb_mac_header(skb);
|
|
|
+ unsigned int hsize = skb_shinfo(skb)->gso_size;
|
|
|
+ unsigned int hroom = skb_headroom(skb);
|
|
|
+
|
|
|
+ /* linearize only if resulting skb allocations are order-0: */
|
|
|
+ if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0))
|
|
|
+ features &= ~NETIF_F_SG;
|
|
|
+ }
|
|
|
+
|
|
|
+ return vlan_features_check(skb, features);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(qeth_features_check);
|
|
|
+
|
|
|
static int __init qeth_core_init(void)
|
|
|
{
|
|
|
int rc;
|