udp_offload.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447
  1. /*
  2. * IPV4 GSO/GRO offload support
  3. * Linux INET implementation
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License
  7. * as published by the Free Software Foundation; either version
  8. * 2 of the License, or (at your option) any later version.
  9. *
  10. * UDPv4 GSO support
  11. */
  12. #include <linux/skbuff.h>
  13. #include <net/udp.h>
  14. #include <net/protocol.h>
  15. static DEFINE_SPINLOCK(udp_offload_lock);
  16. static struct udp_offload_priv __rcu *udp_offload_base __read_mostly;
  17. #define udp_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&udp_offload_lock))
  18. struct udp_offload_priv {
  19. struct udp_offload *offload;
  20. possible_net_t net;
  21. struct rcu_head rcu;
  22. struct udp_offload_priv __rcu *next;
  23. };
  24. static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
  25. netdev_features_t features,
  26. struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb,
  27. netdev_features_t features),
  28. __be16 new_protocol, bool is_ipv6)
  29. {
  30. struct sk_buff *segs = ERR_PTR(-EINVAL);
  31. u16 mac_offset = skb->mac_header;
  32. int mac_len = skb->mac_len;
  33. int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
  34. __be16 protocol = skb->protocol;
  35. netdev_features_t enc_features;
  36. int udp_offset, outer_hlen;
  37. unsigned int oldlen;
  38. bool need_csum = !!(skb_shinfo(skb)->gso_type &
  39. SKB_GSO_UDP_TUNNEL_CSUM);
  40. bool remcsum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TUNNEL_REMCSUM);
  41. bool offload_csum = false, dont_encap = (need_csum || remcsum);
  42. oldlen = (u16)~skb->len;
  43. if (unlikely(!pskb_may_pull(skb, tnl_hlen)))
  44. goto out;
  45. skb->encapsulation = 0;
  46. __skb_pull(skb, tnl_hlen);
  47. skb_reset_mac_header(skb);
  48. skb_set_network_header(skb, skb_inner_network_offset(skb));
  49. skb->mac_len = skb_inner_network_offset(skb);
  50. skb->protocol = new_protocol;
  51. skb->encap_hdr_csum = need_csum;
  52. skb->remcsum_offload = remcsum;
  53. /* Try to offload checksum if possible */
  54. offload_csum = !!(need_csum &&
  55. ((skb->dev->features & NETIF_F_HW_CSUM) ||
  56. (skb->dev->features & (is_ipv6 ?
  57. NETIF_F_IPV6_CSUM : NETIF_F_IP_CSUM))));
  58. /* segment inner packet. */
  59. enc_features = skb->dev->hw_enc_features & features;
  60. segs = gso_inner_segment(skb, enc_features);
  61. if (IS_ERR_OR_NULL(segs)) {
  62. skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
  63. mac_len);
  64. goto out;
  65. }
  66. outer_hlen = skb_tnl_header_len(skb);
  67. udp_offset = outer_hlen - tnl_hlen;
  68. skb = segs;
  69. do {
  70. struct udphdr *uh;
  71. int len;
  72. __be32 delta;
  73. if (dont_encap) {
  74. skb->encapsulation = 0;
  75. skb->ip_summed = CHECKSUM_NONE;
  76. } else {
  77. /* Only set up inner headers if we might be offloading
  78. * inner checksum.
  79. */
  80. skb_reset_inner_headers(skb);
  81. skb->encapsulation = 1;
  82. }
  83. skb->mac_len = mac_len;
  84. skb->protocol = protocol;
  85. skb_push(skb, outer_hlen);
  86. skb_reset_mac_header(skb);
  87. skb_set_network_header(skb, mac_len);
  88. skb_set_transport_header(skb, udp_offset);
  89. len = skb->len - udp_offset;
  90. uh = udp_hdr(skb);
  91. uh->len = htons(len);
  92. if (!need_csum)
  93. continue;
  94. delta = htonl(oldlen + len);
  95. uh->check = ~csum_fold((__force __wsum)
  96. ((__force u32)uh->check +
  97. (__force u32)delta));
  98. if (offload_csum) {
  99. skb->ip_summed = CHECKSUM_PARTIAL;
  100. skb->csum_start = skb_transport_header(skb) - skb->head;
  101. skb->csum_offset = offsetof(struct udphdr, check);
  102. } else if (remcsum) {
  103. /* Need to calculate checksum from scratch,
  104. * inner checksums are never when doing
  105. * remote_checksum_offload.
  106. */
  107. skb->csum = skb_checksum(skb, udp_offset,
  108. skb->len - udp_offset,
  109. 0);
  110. uh->check = csum_fold(skb->csum);
  111. if (uh->check == 0)
  112. uh->check = CSUM_MANGLED_0;
  113. } else {
  114. uh->check = gso_make_checksum(skb, ~uh->check);
  115. if (uh->check == 0)
  116. uh->check = CSUM_MANGLED_0;
  117. }
  118. } while ((skb = skb->next));
  119. out:
  120. return segs;
  121. }
  122. struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
  123. netdev_features_t features,
  124. bool is_ipv6)
  125. {
  126. __be16 protocol = skb->protocol;
  127. const struct net_offload **offloads;
  128. const struct net_offload *ops;
  129. struct sk_buff *segs = ERR_PTR(-EINVAL);
  130. struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb,
  131. netdev_features_t features);
  132. rcu_read_lock();
  133. switch (skb->inner_protocol_type) {
  134. case ENCAP_TYPE_ETHER:
  135. protocol = skb->inner_protocol;
  136. gso_inner_segment = skb_mac_gso_segment;
  137. break;
  138. case ENCAP_TYPE_IPPROTO:
  139. offloads = is_ipv6 ? inet6_offloads : inet_offloads;
  140. ops = rcu_dereference(offloads[skb->inner_ipproto]);
  141. if (!ops || !ops->callbacks.gso_segment)
  142. goto out_unlock;
  143. gso_inner_segment = ops->callbacks.gso_segment;
  144. break;
  145. default:
  146. goto out_unlock;
  147. }
  148. segs = __skb_udp_tunnel_segment(skb, features, gso_inner_segment,
  149. protocol, is_ipv6);
  150. out_unlock:
  151. rcu_read_unlock();
  152. return segs;
  153. }
  154. static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
  155. netdev_features_t features)
  156. {
  157. struct sk_buff *segs = ERR_PTR(-EINVAL);
  158. unsigned int mss;
  159. __wsum csum;
  160. struct udphdr *uh;
  161. struct iphdr *iph;
  162. if (skb->encapsulation &&
  163. (skb_shinfo(skb)->gso_type &
  164. (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))) {
  165. segs = skb_udp_tunnel_segment(skb, features, false);
  166. goto out;
  167. }
  168. if (!pskb_may_pull(skb, sizeof(struct udphdr)))
  169. goto out;
  170. mss = skb_shinfo(skb)->gso_size;
  171. if (unlikely(skb->len <= mss))
  172. goto out;
  173. if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
  174. /* Packet is from an untrusted source, reset gso_segs. */
  175. int type = skb_shinfo(skb)->gso_type;
  176. if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY |
  177. SKB_GSO_UDP_TUNNEL |
  178. SKB_GSO_UDP_TUNNEL_CSUM |
  179. SKB_GSO_TUNNEL_REMCSUM |
  180. SKB_GSO_IPIP |
  181. SKB_GSO_GRE | SKB_GSO_GRE_CSUM) ||
  182. !(type & (SKB_GSO_UDP))))
  183. goto out;
  184. skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
  185. segs = NULL;
  186. goto out;
  187. }
  188. /* Do software UFO. Complete and fill in the UDP checksum as
  189. * HW cannot do checksum of UDP packets sent as multiple
  190. * IP fragments.
  191. */
  192. uh = udp_hdr(skb);
  193. iph = ip_hdr(skb);
  194. uh->check = 0;
  195. csum = skb_checksum(skb, 0, skb->len, 0);
  196. uh->check = udp_v4_check(skb->len, iph->saddr, iph->daddr, csum);
  197. if (uh->check == 0)
  198. uh->check = CSUM_MANGLED_0;
  199. skb->ip_summed = CHECKSUM_NONE;
  200. /* Fragment the skb. IP headers of the fragments are updated in
  201. * inet_gso_segment()
  202. */
  203. segs = skb_segment(skb, features);
  204. out:
  205. return segs;
  206. }
  207. int udp_add_offload(struct net *net, struct udp_offload *uo)
  208. {
  209. struct udp_offload_priv *new_offload = kzalloc(sizeof(*new_offload), GFP_ATOMIC);
  210. if (!new_offload)
  211. return -ENOMEM;
  212. write_pnet(&new_offload->net, net);
  213. new_offload->offload = uo;
  214. spin_lock(&udp_offload_lock);
  215. new_offload->next = udp_offload_base;
  216. rcu_assign_pointer(udp_offload_base, new_offload);
  217. spin_unlock(&udp_offload_lock);
  218. return 0;
  219. }
  220. EXPORT_SYMBOL(udp_add_offload);
  221. static void udp_offload_free_routine(struct rcu_head *head)
  222. {
  223. struct udp_offload_priv *ou_priv = container_of(head, struct udp_offload_priv, rcu);
  224. kfree(ou_priv);
  225. }
  226. void udp_del_offload(struct udp_offload *uo)
  227. {
  228. struct udp_offload_priv __rcu **head = &udp_offload_base;
  229. struct udp_offload_priv *uo_priv;
  230. spin_lock(&udp_offload_lock);
  231. uo_priv = udp_deref_protected(*head);
  232. for (; uo_priv != NULL;
  233. uo_priv = udp_deref_protected(*head)) {
  234. if (uo_priv->offload == uo) {
  235. rcu_assign_pointer(*head,
  236. udp_deref_protected(uo_priv->next));
  237. goto unlock;
  238. }
  239. head = &uo_priv->next;
  240. }
  241. pr_warn("udp_del_offload: didn't find offload for port %d\n", ntohs(uo->port));
  242. unlock:
  243. spin_unlock(&udp_offload_lock);
  244. if (uo_priv)
  245. call_rcu(&uo_priv->rcu, udp_offload_free_routine);
  246. }
  247. EXPORT_SYMBOL(udp_del_offload);
  248. struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
  249. struct udphdr *uh)
  250. {
  251. struct udp_offload_priv *uo_priv;
  252. struct sk_buff *p, **pp = NULL;
  253. struct udphdr *uh2;
  254. unsigned int off = skb_gro_offset(skb);
  255. int flush = 1;
  256. if (NAPI_GRO_CB(skb)->udp_mark ||
  257. (skb->ip_summed != CHECKSUM_PARTIAL &&
  258. NAPI_GRO_CB(skb)->csum_cnt == 0 &&
  259. !NAPI_GRO_CB(skb)->csum_valid))
  260. goto out;
  261. /* mark that this skb passed once through the udp gro layer */
  262. NAPI_GRO_CB(skb)->udp_mark = 1;
  263. rcu_read_lock();
  264. uo_priv = rcu_dereference(udp_offload_base);
  265. for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) {
  266. if (net_eq(read_pnet(&uo_priv->net), dev_net(skb->dev)) &&
  267. uo_priv->offload->port == uh->dest &&
  268. uo_priv->offload->callbacks.gro_receive)
  269. goto unflush;
  270. }
  271. goto out_unlock;
  272. unflush:
  273. flush = 0;
  274. for (p = *head; p; p = p->next) {
  275. if (!NAPI_GRO_CB(p)->same_flow)
  276. continue;
  277. uh2 = (struct udphdr *)(p->data + off);
  278. /* Match ports and either checksums are either both zero
  279. * or nonzero.
  280. */
  281. if ((*(u32 *)&uh->source != *(u32 *)&uh2->source) ||
  282. (!uh->check ^ !uh2->check)) {
  283. NAPI_GRO_CB(p)->same_flow = 0;
  284. continue;
  285. }
  286. }
  287. skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
  288. skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
  289. NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto;
  290. pp = uo_priv->offload->callbacks.gro_receive(head, skb,
  291. uo_priv->offload);
  292. out_unlock:
  293. rcu_read_unlock();
  294. out:
  295. NAPI_GRO_CB(skb)->flush |= flush;
  296. return pp;
  297. }
  298. static struct sk_buff **udp4_gro_receive(struct sk_buff **head,
  299. struct sk_buff *skb)
  300. {
  301. struct udphdr *uh = udp_gro_udphdr(skb);
  302. if (unlikely(!uh))
  303. goto flush;
  304. /* Don't bother verifying checksum if we're going to flush anyway. */
  305. if (NAPI_GRO_CB(skb)->flush)
  306. goto skip;
  307. if (skb_gro_checksum_validate_zero_check(skb, IPPROTO_UDP, uh->check,
  308. inet_gro_compute_pseudo))
  309. goto flush;
  310. else if (uh->check)
  311. skb_gro_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
  312. inet_gro_compute_pseudo);
  313. skip:
  314. NAPI_GRO_CB(skb)->is_ipv6 = 0;
  315. return udp_gro_receive(head, skb, uh);
  316. flush:
  317. NAPI_GRO_CB(skb)->flush = 1;
  318. return NULL;
  319. }
  320. int udp_gro_complete(struct sk_buff *skb, int nhoff)
  321. {
  322. struct udp_offload_priv *uo_priv;
  323. __be16 newlen = htons(skb->len - nhoff);
  324. struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
  325. int err = -ENOSYS;
  326. uh->len = newlen;
  327. rcu_read_lock();
  328. uo_priv = rcu_dereference(udp_offload_base);
  329. for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) {
  330. if (net_eq(read_pnet(&uo_priv->net), dev_net(skb->dev)) &&
  331. uo_priv->offload->port == uh->dest &&
  332. uo_priv->offload->callbacks.gro_complete)
  333. break;
  334. }
  335. if (uo_priv) {
  336. NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto;
  337. err = uo_priv->offload->callbacks.gro_complete(skb,
  338. nhoff + sizeof(struct udphdr),
  339. uo_priv->offload);
  340. }
  341. rcu_read_unlock();
  342. if (skb->remcsum_offload)
  343. skb_shinfo(skb)->gso_type |= SKB_GSO_TUNNEL_REMCSUM;
  344. skb->encapsulation = 1;
  345. skb_set_inner_mac_header(skb, nhoff + sizeof(struct udphdr));
  346. return err;
  347. }
  348. static int udp4_gro_complete(struct sk_buff *skb, int nhoff)
  349. {
  350. const struct iphdr *iph = ip_hdr(skb);
  351. struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
  352. if (uh->check) {
  353. skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
  354. uh->check = ~udp_v4_check(skb->len - nhoff, iph->saddr,
  355. iph->daddr, 0);
  356. } else {
  357. skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
  358. }
  359. return udp_gro_complete(skb, nhoff);
  360. }
  361. static const struct net_offload udpv4_offload = {
  362. .callbacks = {
  363. .gso_segment = udp4_ufo_fragment,
  364. .gro_receive = udp4_gro_receive,
  365. .gro_complete = udp4_gro_complete,
  366. },
  367. };
  368. int __init udpv4_offload_init(void)
  369. {
  370. return inet_add_offload(&udpv4_offload, IPPROTO_UDP);
  371. }