udp_offload.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250
  1. /*
  2. * IPV4 GSO/GRO offload support
  3. * Linux INET implementation
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License
  7. * as published by the Free Software Foundation; either version
  8. * 2 of the License, or (at your option) any later version.
  9. *
  10. * UDPv4 GSO support
  11. */
  12. #include <linux/skbuff.h>
  13. #include <net/udp.h>
  14. #include <net/protocol.h>
  15. static DEFINE_SPINLOCK(udp_offload_lock);
  16. static struct udp_offload_priv __rcu *udp_offload_base __read_mostly;
  17. #define udp_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&udp_offload_lock))
  18. struct udp_offload_priv {
  19. struct udp_offload *offload;
  20. struct rcu_head rcu;
  21. struct udp_offload_priv __rcu *next;
  22. };
  23. static int udp4_ufo_send_check(struct sk_buff *skb)
  24. {
  25. if (!pskb_may_pull(skb, sizeof(struct udphdr)))
  26. return -EINVAL;
  27. if (likely(!skb->encapsulation)) {
  28. const struct iphdr *iph;
  29. struct udphdr *uh;
  30. iph = ip_hdr(skb);
  31. uh = udp_hdr(skb);
  32. uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len,
  33. IPPROTO_UDP, 0);
  34. skb->csum_start = skb_transport_header(skb) - skb->head;
  35. skb->csum_offset = offsetof(struct udphdr, check);
  36. skb->ip_summed = CHECKSUM_PARTIAL;
  37. }
  38. return 0;
  39. }
  40. static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
  41. netdev_features_t features)
  42. {
  43. struct sk_buff *segs = ERR_PTR(-EINVAL);
  44. unsigned int mss;
  45. int offset;
  46. __wsum csum;
  47. if (skb->encapsulation &&
  48. (skb_shinfo(skb)->gso_type &
  49. (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))) {
  50. segs = skb_udp_tunnel_segment(skb, features);
  51. goto out;
  52. }
  53. mss = skb_shinfo(skb)->gso_size;
  54. if (unlikely(skb->len <= mss))
  55. goto out;
  56. if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
  57. /* Packet is from an untrusted source, reset gso_segs. */
  58. int type = skb_shinfo(skb)->gso_type;
  59. if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY |
  60. SKB_GSO_UDP_TUNNEL |
  61. SKB_GSO_UDP_TUNNEL_CSUM |
  62. SKB_GSO_IPIP |
  63. SKB_GSO_GRE | SKB_GSO_GRE_CSUM |
  64. SKB_GSO_MPLS) ||
  65. !(type & (SKB_GSO_UDP))))
  66. goto out;
  67. skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
  68. segs = NULL;
  69. goto out;
  70. }
  71. /* Do software UFO. Complete and fill in the UDP checksum as
  72. * HW cannot do checksum of UDP packets sent as multiple
  73. * IP fragments.
  74. */
  75. offset = skb_checksum_start_offset(skb);
  76. csum = skb_checksum(skb, offset, skb->len - offset, 0);
  77. offset += skb->csum_offset;
  78. *(__sum16 *)(skb->data + offset) = csum_fold(csum);
  79. skb->ip_summed = CHECKSUM_NONE;
  80. /* Fragment the skb. IP headers of the fragments are updated in
  81. * inet_gso_segment()
  82. */
  83. segs = skb_segment(skb, features);
  84. out:
  85. return segs;
  86. }
  87. int udp_add_offload(struct udp_offload *uo)
  88. {
  89. struct udp_offload_priv *new_offload = kzalloc(sizeof(*new_offload), GFP_ATOMIC);
  90. if (!new_offload)
  91. return -ENOMEM;
  92. new_offload->offload = uo;
  93. spin_lock(&udp_offload_lock);
  94. new_offload->next = udp_offload_base;
  95. rcu_assign_pointer(udp_offload_base, new_offload);
  96. spin_unlock(&udp_offload_lock);
  97. return 0;
  98. }
  99. EXPORT_SYMBOL(udp_add_offload);
  100. static void udp_offload_free_routine(struct rcu_head *head)
  101. {
  102. struct udp_offload_priv *ou_priv = container_of(head, struct udp_offload_priv, rcu);
  103. kfree(ou_priv);
  104. }
  105. void udp_del_offload(struct udp_offload *uo)
  106. {
  107. struct udp_offload_priv __rcu **head = &udp_offload_base;
  108. struct udp_offload_priv *uo_priv;
  109. spin_lock(&udp_offload_lock);
  110. uo_priv = udp_deref_protected(*head);
  111. for (; uo_priv != NULL;
  112. uo_priv = udp_deref_protected(*head)) {
  113. if (uo_priv->offload == uo) {
  114. rcu_assign_pointer(*head,
  115. udp_deref_protected(uo_priv->next));
  116. goto unlock;
  117. }
  118. head = &uo_priv->next;
  119. }
  120. pr_warn("udp_del_offload: didn't find offload for port %d\n", ntohs(uo->port));
  121. unlock:
  122. spin_unlock(&udp_offload_lock);
  123. if (uo_priv != NULL)
  124. call_rcu(&uo_priv->rcu, udp_offload_free_routine);
  125. }
  126. EXPORT_SYMBOL(udp_del_offload);
  127. static struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
  128. {
  129. struct udp_offload_priv *uo_priv;
  130. struct sk_buff *p, **pp = NULL;
  131. struct udphdr *uh, *uh2;
  132. unsigned int hlen, off;
  133. int flush = 1;
  134. if (NAPI_GRO_CB(skb)->udp_mark ||
  135. (!skb->encapsulation && skb->ip_summed != CHECKSUM_COMPLETE))
  136. goto out;
  137. /* mark that this skb passed once through the udp gro layer */
  138. NAPI_GRO_CB(skb)->udp_mark = 1;
  139. off = skb_gro_offset(skb);
  140. hlen = off + sizeof(*uh);
  141. uh = skb_gro_header_fast(skb, off);
  142. if (skb_gro_header_hard(skb, hlen)) {
  143. uh = skb_gro_header_slow(skb, hlen, off);
  144. if (unlikely(!uh))
  145. goto out;
  146. }
  147. rcu_read_lock();
  148. uo_priv = rcu_dereference(udp_offload_base);
  149. for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) {
  150. if (uo_priv->offload->port == uh->dest &&
  151. uo_priv->offload->callbacks.gro_receive)
  152. goto unflush;
  153. }
  154. goto out_unlock;
  155. unflush:
  156. flush = 0;
  157. for (p = *head; p; p = p->next) {
  158. if (!NAPI_GRO_CB(p)->same_flow)
  159. continue;
  160. uh2 = (struct udphdr *)(p->data + off);
  161. if ((*(u32 *)&uh->source != *(u32 *)&uh2->source)) {
  162. NAPI_GRO_CB(p)->same_flow = 0;
  163. continue;
  164. }
  165. }
  166. skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
  167. skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
  168. pp = uo_priv->offload->callbacks.gro_receive(head, skb);
  169. out_unlock:
  170. rcu_read_unlock();
  171. out:
  172. NAPI_GRO_CB(skb)->flush |= flush;
  173. return pp;
  174. }
  175. static int udp_gro_complete(struct sk_buff *skb, int nhoff)
  176. {
  177. struct udp_offload_priv *uo_priv;
  178. __be16 newlen = htons(skb->len - nhoff);
  179. struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
  180. int err = -ENOSYS;
  181. uh->len = newlen;
  182. rcu_read_lock();
  183. uo_priv = rcu_dereference(udp_offload_base);
  184. for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) {
  185. if (uo_priv->offload->port == uh->dest &&
  186. uo_priv->offload->callbacks.gro_complete)
  187. break;
  188. }
  189. if (uo_priv != NULL)
  190. err = uo_priv->offload->callbacks.gro_complete(skb, nhoff + sizeof(struct udphdr));
  191. rcu_read_unlock();
  192. return err;
  193. }
  194. static const struct net_offload udpv4_offload = {
  195. .callbacks = {
  196. .gso_send_check = udp4_ufo_send_check,
  197. .gso_segment = udp4_ufo_fragment,
  198. .gro_receive = udp_gro_receive,
  199. .gro_complete = udp_gro_complete,
  200. },
  201. };
  202. int __init udpv4_offload_init(void)
  203. {
  204. return inet_add_offload(&udpv4_offload, IPPROTO_UDP);
  205. }