|
@@ -116,10 +116,170 @@ out:
|
|
|
return segs;
|
|
|
}
|
|
|
|
|
|
+/* Compute the whole skb csum in s/w and store it, then verify GRO csum
|
|
|
+ * starting from gro_offset.
|
|
|
+ */
|
|
|
+static __sum16 gro_skb_checksum(struct sk_buff *skb)
|
|
|
+{
|
|
|
+ __sum16 sum;
|
|
|
+
|
|
|
+ skb->csum = skb_checksum(skb, 0, skb->len, 0);
|
|
|
+ NAPI_GRO_CB(skb)->csum = csum_sub(skb->csum,
|
|
|
+ csum_partial(skb->data, skb_gro_offset(skb), 0));
|
|
|
+ sum = csum_fold(NAPI_GRO_CB(skb)->csum);
|
|
|
+ if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) {
|
|
|
+ if (unlikely(!sum))
|
|
|
+ netdev_rx_csum_fault(skb->dev);
|
|
|
+ } else
|
|
|
+ skb->ip_summed = CHECKSUM_COMPLETE;
|
|
|
+
|
|
|
+ return sum;
|
|
|
+}
|
|
|
+
|
|
|
+static struct sk_buff **gre_gro_receive(struct sk_buff **head,
|
|
|
+ struct sk_buff *skb)
|
|
|
+{
|
|
|
+ struct sk_buff **pp = NULL;
|
|
|
+ struct sk_buff *p;
|
|
|
+ const struct gre_base_hdr *greh;
|
|
|
+ unsigned int hlen, grehlen;
|
|
|
+ unsigned int off;
|
|
|
+ int flush = 1;
|
|
|
+ struct packet_offload *ptype;
|
|
|
+ __be16 type;
|
|
|
+
|
|
|
+ off = skb_gro_offset(skb);
|
|
|
+ hlen = off + sizeof(*greh);
|
|
|
+ greh = skb_gro_header_fast(skb, off);
|
|
|
+ if (skb_gro_header_hard(skb, hlen)) {
|
|
|
+ greh = skb_gro_header_slow(skb, hlen, off);
|
|
|
+ if (unlikely(!greh))
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Only support version 0 and K (key), C (csum) flags. Note that
|
|
|
+ * although the support for the S (seq#) flag can be added easily
|
|
|
+ * for GRO, this is problematic for GSO hence can not be enabled
|
|
|
+ * here because a GRO pkt may end up in the forwarding path, thus
|
|
|
+ * requiring GSO support to break it up correctly.
|
|
|
+ */
|
|
|
+ if ((greh->flags & ~(GRE_KEY|GRE_CSUM)) != 0)
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ type = greh->protocol;
|
|
|
+
|
|
|
+ rcu_read_lock();
|
|
|
+ ptype = gro_find_receive_by_type(type);
|
|
|
+ if (ptype == NULL)
|
|
|
+ goto out_unlock;
|
|
|
+
|
|
|
+ grehlen = GRE_HEADER_SECTION;
|
|
|
+
|
|
|
+ if (greh->flags & GRE_KEY)
|
|
|
+ grehlen += GRE_HEADER_SECTION;
|
|
|
+
|
|
|
+ if (greh->flags & GRE_CSUM)
|
|
|
+ grehlen += GRE_HEADER_SECTION;
|
|
|
+
|
|
|
+ hlen = off + grehlen;
|
|
|
+ if (skb_gro_header_hard(skb, hlen)) {
|
|
|
+ greh = skb_gro_header_slow(skb, hlen, off);
|
|
|
+ if (unlikely(!greh))
|
|
|
+ goto out_unlock;
|
|
|
+ }
|
|
|
+ if (greh->flags & GRE_CSUM) { /* Need to verify GRE csum first */
|
|
|
+ __sum16 csum = 0;
|
|
|
+
|
|
|
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
|
|
|
+ csum = csum_fold(NAPI_GRO_CB(skb)->csum);
|
|
|
+ /* Don't trust csum error calculated/reported by h/w */
|
|
|
+ if (skb->ip_summed == CHECKSUM_NONE || csum != 0)
|
|
|
+ csum = gro_skb_checksum(skb);
|
|
|
+
|
|
|
+ /* GRE CSUM is the 1's complement of the 1's complement sum
|
|
|
+ * of the GRE hdr plus payload so it should add up to 0xffff
|
|
|
+ * (and 0 after csum_fold()) just like the IPv4 hdr csum.
|
|
|
+ */
|
|
|
+ if (csum)
|
|
|
+ goto out_unlock;
|
|
|
+ }
|
|
|
+ flush = 0;
|
|
|
+
|
|
|
+ for (p = *head; p; p = p->next) {
|
|
|
+ const struct gre_base_hdr *greh2;
|
|
|
+
|
|
|
+ if (!NAPI_GRO_CB(p)->same_flow)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ /* The following checks are needed to ensure only pkts
|
|
|
+ * from the same tunnel are considered for aggregation.
|
|
|
+ * The criteria for "the same tunnel" includes:
|
|
|
+ * 1) same version (we only support version 0 here)
|
|
|
+ * 2) same protocol (we only support ETH_P_IP for now)
|
|
|
+ * 3) same set of flags
|
|
|
+ * 4) same key if the key field is present.
|
|
|
+ */
|
|
|
+ greh2 = (struct gre_base_hdr *)(p->data + off);
|
|
|
+
|
|
|
+ if (greh2->flags != greh->flags ||
|
|
|
+ greh2->protocol != greh->protocol) {
|
|
|
+ NAPI_GRO_CB(p)->same_flow = 0;
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+ if (greh->flags & GRE_KEY) {
|
|
|
+ /* compare keys */
|
|
|
+ if (*(__be32 *)(greh2+1) != *(__be32 *)(greh+1)) {
|
|
|
+ NAPI_GRO_CB(p)->same_flow = 0;
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ skb_gro_pull(skb, grehlen);
|
|
|
+
|
|
|
+ /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
|
|
|
+ skb_gro_postpull_rcsum(skb, greh, grehlen);
|
|
|
+
|
|
|
+ pp = ptype->callbacks.gro_receive(head, skb);
|
|
|
+
|
|
|
+out_unlock:
|
|
|
+ rcu_read_unlock();
|
|
|
+out:
|
|
|
+ NAPI_GRO_CB(skb)->flush |= flush;
|
|
|
+
|
|
|
+ return pp;
|
|
|
+}
|
|
|
+
|
|
|
+int gre_gro_complete(struct sk_buff *skb, int nhoff)
|
|
|
+{
|
|
|
+ struct gre_base_hdr *greh = (struct gre_base_hdr *)(skb->data + nhoff);
|
|
|
+ struct packet_offload *ptype;
|
|
|
+ unsigned int grehlen = sizeof(*greh);
|
|
|
+ int err = -ENOENT;
|
|
|
+ __be16 type;
|
|
|
+
|
|
|
+ type = greh->protocol;
|
|
|
+ if (greh->flags & GRE_KEY)
|
|
|
+ grehlen += GRE_HEADER_SECTION;
|
|
|
+
|
|
|
+ if (greh->flags & GRE_CSUM)
|
|
|
+ grehlen += GRE_HEADER_SECTION;
|
|
|
+
|
|
|
+ rcu_read_lock();
|
|
|
+ ptype = gro_find_complete_by_type(type);
|
|
|
+ if (ptype != NULL)
|
|
|
+ err = ptype->callbacks.gro_complete(skb, nhoff + grehlen);
|
|
|
+
|
|
|
+ rcu_read_unlock();
|
|
|
+ return err;
|
|
|
+}
|
|
|
+
|
|
|
static const struct net_offload gre_offload = {
|
|
|
.callbacks = {
|
|
|
.gso_send_check = gre_gso_send_check,
|
|
|
.gso_segment = gre_gso_segment,
|
|
|
+ .gro_receive = gre_gro_receive,
|
|
|
+ .gro_complete = gre_gro_complete,
|
|
|
},
|
|
|
};
|
|
|
|