udp_offload.c 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342
  1. /*
  2. * IPV4 GSO/GRO offload support
  3. * Linux INET implementation
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License
  7. * as published by the Free Software Foundation; either version
  8. * 2 of the License, or (at your option) any later version.
  9. *
  10. * UDPv4 GSO support
  11. */
  12. #include <linux/skbuff.h>
  13. #include <net/udp.h>
  14. #include <net/protocol.h>
  15. static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
  16. netdev_features_t features,
  17. struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb,
  18. netdev_features_t features),
  19. __be16 new_protocol, bool is_ipv6)
  20. {
  21. int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
  22. bool remcsum, need_csum, offload_csum, gso_partial;
  23. struct sk_buff *segs = ERR_PTR(-EINVAL);
  24. struct udphdr *uh = udp_hdr(skb);
  25. u16 mac_offset = skb->mac_header;
  26. __be16 protocol = skb->protocol;
  27. u16 mac_len = skb->mac_len;
  28. int udp_offset, outer_hlen;
  29. __wsum partial;
  30. bool need_ipsec;
  31. if (unlikely(!pskb_may_pull(skb, tnl_hlen)))
  32. goto out;
  33. /* Adjust partial header checksum to negate old length.
  34. * We cannot rely on the value contained in uh->len as it is
  35. * possible that the actual value exceeds the boundaries of the
  36. * 16 bit length field due to the header being added outside of an
  37. * IP or IPv6 frame that was already limited to 64K - 1.
  38. */
  39. if (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL)
  40. partial = (__force __wsum)uh->len;
  41. else
  42. partial = (__force __wsum)htonl(skb->len);
  43. partial = csum_sub(csum_unfold(uh->check), partial);
  44. /* setup inner skb. */
  45. skb->encapsulation = 0;
  46. SKB_GSO_CB(skb)->encap_level = 0;
  47. __skb_pull(skb, tnl_hlen);
  48. skb_reset_mac_header(skb);
  49. skb_set_network_header(skb, skb_inner_network_offset(skb));
  50. skb->mac_len = skb_inner_network_offset(skb);
  51. skb->protocol = new_protocol;
  52. need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM);
  53. skb->encap_hdr_csum = need_csum;
  54. remcsum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TUNNEL_REMCSUM);
  55. skb->remcsum_offload = remcsum;
  56. need_ipsec = skb_dst(skb) && dst_xfrm(skb_dst(skb));
  57. /* Try to offload checksum if possible */
  58. offload_csum = !!(need_csum &&
  59. !need_ipsec &&
  60. (skb->dev->features &
  61. (is_ipv6 ? (NETIF_F_HW_CSUM | NETIF_F_IPV6_CSUM) :
  62. (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM))));
  63. features &= skb->dev->hw_enc_features;
  64. /* The only checksum offload we care about from here on out is the
  65. * outer one so strip the existing checksum feature flags and
  66. * instead set the flag based on our outer checksum offload value.
  67. */
  68. if (remcsum) {
  69. features &= ~NETIF_F_CSUM_MASK;
  70. if (!need_csum || offload_csum)
  71. features |= NETIF_F_HW_CSUM;
  72. }
  73. /* segment inner packet. */
  74. segs = gso_inner_segment(skb, features);
  75. if (IS_ERR_OR_NULL(segs)) {
  76. skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
  77. mac_len);
  78. goto out;
  79. }
  80. gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
  81. outer_hlen = skb_tnl_header_len(skb);
  82. udp_offset = outer_hlen - tnl_hlen;
  83. skb = segs;
  84. do {
  85. unsigned int len;
  86. if (remcsum)
  87. skb->ip_summed = CHECKSUM_NONE;
  88. /* Set up inner headers if we are offloading inner checksum */
  89. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  90. skb_reset_inner_headers(skb);
  91. skb->encapsulation = 1;
  92. }
  93. skb->mac_len = mac_len;
  94. skb->protocol = protocol;
  95. __skb_push(skb, outer_hlen);
  96. skb_reset_mac_header(skb);
  97. skb_set_network_header(skb, mac_len);
  98. skb_set_transport_header(skb, udp_offset);
  99. len = skb->len - udp_offset;
  100. uh = udp_hdr(skb);
  101. /* If we are only performing partial GSO the inner header
  102. * will be using a length value equal to only one MSS sized
  103. * segment instead of the entire frame.
  104. */
  105. if (gso_partial) {
  106. uh->len = htons(skb_shinfo(skb)->gso_size +
  107. SKB_GSO_CB(skb)->data_offset +
  108. skb->head - (unsigned char *)uh);
  109. } else {
  110. uh->len = htons(len);
  111. }
  112. if (!need_csum)
  113. continue;
  114. uh->check = ~csum_fold(csum_add(partial,
  115. (__force __wsum)htonl(len)));
  116. if (skb->encapsulation || !offload_csum) {
  117. uh->check = gso_make_checksum(skb, ~uh->check);
  118. if (uh->check == 0)
  119. uh->check = CSUM_MANGLED_0;
  120. } else {
  121. skb->ip_summed = CHECKSUM_PARTIAL;
  122. skb->csum_start = skb_transport_header(skb) - skb->head;
  123. skb->csum_offset = offsetof(struct udphdr, check);
  124. }
  125. } while ((skb = skb->next));
  126. out:
  127. return segs;
  128. }
  129. struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
  130. netdev_features_t features,
  131. bool is_ipv6)
  132. {
  133. __be16 protocol = skb->protocol;
  134. const struct net_offload **offloads;
  135. const struct net_offload *ops;
  136. struct sk_buff *segs = ERR_PTR(-EINVAL);
  137. struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb,
  138. netdev_features_t features);
  139. rcu_read_lock();
  140. switch (skb->inner_protocol_type) {
  141. case ENCAP_TYPE_ETHER:
  142. protocol = skb->inner_protocol;
  143. gso_inner_segment = skb_mac_gso_segment;
  144. break;
  145. case ENCAP_TYPE_IPPROTO:
  146. offloads = is_ipv6 ? inet6_offloads : inet_offloads;
  147. ops = rcu_dereference(offloads[skb->inner_ipproto]);
  148. if (!ops || !ops->callbacks.gso_segment)
  149. goto out_unlock;
  150. gso_inner_segment = ops->callbacks.gso_segment;
  151. break;
  152. default:
  153. goto out_unlock;
  154. }
  155. segs = __skb_udp_tunnel_segment(skb, features, gso_inner_segment,
  156. protocol, is_ipv6);
  157. out_unlock:
  158. rcu_read_unlock();
  159. return segs;
  160. }
  161. EXPORT_SYMBOL(skb_udp_tunnel_segment);
  162. static struct sk_buff *udp4_tunnel_segment(struct sk_buff *skb,
  163. netdev_features_t features)
  164. {
  165. struct sk_buff *segs = ERR_PTR(-EINVAL);
  166. if (skb->encapsulation &&
  167. (skb_shinfo(skb)->gso_type &
  168. (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM)))
  169. segs = skb_udp_tunnel_segment(skb, features, false);
  170. return segs;
  171. }
  172. struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
  173. struct udphdr *uh, udp_lookup_t lookup)
  174. {
  175. struct sk_buff *p, **pp = NULL;
  176. struct udphdr *uh2;
  177. unsigned int off = skb_gro_offset(skb);
  178. int flush = 1;
  179. struct sock *sk;
  180. if (NAPI_GRO_CB(skb)->encap_mark ||
  181. (skb->ip_summed != CHECKSUM_PARTIAL &&
  182. NAPI_GRO_CB(skb)->csum_cnt == 0 &&
  183. !NAPI_GRO_CB(skb)->csum_valid))
  184. goto out;
  185. /* mark that this skb passed once through the tunnel gro layer */
  186. NAPI_GRO_CB(skb)->encap_mark = 1;
  187. rcu_read_lock();
  188. sk = (*lookup)(skb, uh->source, uh->dest);
  189. if (sk && udp_sk(sk)->gro_receive)
  190. goto unflush;
  191. goto out_unlock;
  192. unflush:
  193. flush = 0;
  194. for (p = *head; p; p = p->next) {
  195. if (!NAPI_GRO_CB(p)->same_flow)
  196. continue;
  197. uh2 = (struct udphdr *)(p->data + off);
  198. /* Match ports and either checksums are either both zero
  199. * or nonzero.
  200. */
  201. if ((*(u32 *)&uh->source != *(u32 *)&uh2->source) ||
  202. (!uh->check ^ !uh2->check)) {
  203. NAPI_GRO_CB(p)->same_flow = 0;
  204. continue;
  205. }
  206. }
  207. skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
  208. skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
  209. pp = call_gro_receive_sk(udp_sk(sk)->gro_receive, sk, head, skb);
  210. out_unlock:
  211. rcu_read_unlock();
  212. out:
  213. NAPI_GRO_CB(skb)->flush |= flush;
  214. return pp;
  215. }
  216. EXPORT_SYMBOL(udp_gro_receive);
  217. static struct sk_buff **udp4_gro_receive(struct sk_buff **head,
  218. struct sk_buff *skb)
  219. {
  220. struct udphdr *uh = udp_gro_udphdr(skb);
  221. if (unlikely(!uh))
  222. goto flush;
  223. /* Don't bother verifying checksum if we're going to flush anyway. */
  224. if (NAPI_GRO_CB(skb)->flush)
  225. goto skip;
  226. if (skb_gro_checksum_validate_zero_check(skb, IPPROTO_UDP, uh->check,
  227. inet_gro_compute_pseudo))
  228. goto flush;
  229. else if (uh->check)
  230. skb_gro_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
  231. inet_gro_compute_pseudo);
  232. skip:
  233. NAPI_GRO_CB(skb)->is_ipv6 = 0;
  234. return udp_gro_receive(head, skb, uh, udp4_lib_lookup_skb);
  235. flush:
  236. NAPI_GRO_CB(skb)->flush = 1;
  237. return NULL;
  238. }
  239. int udp_gro_complete(struct sk_buff *skb, int nhoff,
  240. udp_lookup_t lookup)
  241. {
  242. __be16 newlen = htons(skb->len - nhoff);
  243. struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
  244. int err = -ENOSYS;
  245. struct sock *sk;
  246. uh->len = newlen;
  247. /* Set encapsulation before calling into inner gro_complete() functions
  248. * to make them set up the inner offsets.
  249. */
  250. skb->encapsulation = 1;
  251. rcu_read_lock();
  252. sk = (*lookup)(skb, uh->source, uh->dest);
  253. if (sk && udp_sk(sk)->gro_complete)
  254. err = udp_sk(sk)->gro_complete(sk, skb,
  255. nhoff + sizeof(struct udphdr));
  256. rcu_read_unlock();
  257. if (skb->remcsum_offload)
  258. skb_shinfo(skb)->gso_type |= SKB_GSO_TUNNEL_REMCSUM;
  259. return err;
  260. }
  261. EXPORT_SYMBOL(udp_gro_complete);
  262. static int udp4_gro_complete(struct sk_buff *skb, int nhoff)
  263. {
  264. const struct iphdr *iph = ip_hdr(skb);
  265. struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
  266. if (uh->check) {
  267. skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
  268. uh->check = ~udp_v4_check(skb->len - nhoff, iph->saddr,
  269. iph->daddr, 0);
  270. } else {
  271. skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
  272. }
  273. return udp_gro_complete(skb, nhoff, udp4_lib_lookup_skb);
  274. }
  275. static const struct net_offload udpv4_offload = {
  276. .callbacks = {
  277. .gso_segment = udp4_tunnel_segment,
  278. .gro_receive = udp4_gro_receive,
  279. .gro_complete = udp4_gro_complete,
  280. },
  281. };
  282. int __init udpv4_offload_init(void)
  283. {
  284. return inet_add_offload(&udpv4_offload, IPPROTO_UDP);
  285. }