|
@@ -819,6 +819,7 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
|
|
|
struct vmxnet3_adapter *adapter)
|
|
|
{
|
|
|
struct Vmxnet3_TxDataDesc *tdd;
|
|
|
+ u8 protocol = 0;
|
|
|
|
|
|
if (ctx->mss) { /* TSO */
|
|
|
ctx->eth_ip_hdr_size = skb_transport_offset(skb);
|
|
@@ -831,16 +832,25 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
|
|
|
if (ctx->ipv4) {
|
|
|
const struct iphdr *iph = ip_hdr(skb);
|
|
|
|
|
|
- if (iph->protocol == IPPROTO_TCP)
|
|
|
- ctx->l4_hdr_size = tcp_hdrlen(skb);
|
|
|
- else if (iph->protocol == IPPROTO_UDP)
|
|
|
- ctx->l4_hdr_size = sizeof(struct udphdr);
|
|
|
- else
|
|
|
- ctx->l4_hdr_size = 0;
|
|
|
- } else {
|
|
|
- /* for simplicity, don't copy L4 headers */
|
|
|
+ protocol = iph->protocol;
|
|
|
+ } else if (ctx->ipv6) {
|
|
|
+ const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
|
|
|
+
|
|
|
+ protocol = ipv6h->nexthdr;
|
|
|
+ }
|
|
|
+
|
|
|
+ switch (protocol) {
|
|
|
+ case IPPROTO_TCP:
|
|
|
+ ctx->l4_hdr_size = tcp_hdrlen(skb);
|
|
|
+ break;
|
|
|
+ case IPPROTO_UDP:
|
|
|
+ ctx->l4_hdr_size = sizeof(struct udphdr);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
ctx->l4_hdr_size = 0;
|
|
|
+ break;
|
|
|
}
|
|
|
+
|
|
|
ctx->copy_size = min(ctx->eth_ip_hdr_size +
|
|
|
ctx->l4_hdr_size, skb->len);
|
|
|
} else {
|
|
@@ -887,7 +897,7 @@ vmxnet3_prepare_tso(struct sk_buff *skb,
|
|
|
iph->check = 0;
|
|
|
tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
|
|
|
IPPROTO_TCP, 0);
|
|
|
- } else {
|
|
|
+ } else if (ctx->ipv6) {
|
|
|
struct ipv6hdr *iph = ipv6_hdr(skb);
|
|
|
|
|
|
tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
|
|
@@ -938,6 +948,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
|
|
|
count = txd_estimate(skb);
|
|
|
|
|
|
ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));
|
|
|
+ ctx.ipv6 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IPV6));
|
|
|
|
|
|
ctx.mss = skb_shinfo(skb)->gso_size;
|
|
|
if (ctx.mss) {
|