tcp_offload.c 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353
  1. /*
  2. * IPV4 GSO/GRO offload support
  3. * Linux INET implementation
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License
  7. * as published by the Free Software Foundation; either version
  8. * 2 of the License, or (at your option) any later version.
  9. *
  10. * TCPv4 GSO/GRO support
  11. */
  12. #include <linux/skbuff.h>
  13. #include <net/tcp.h>
  14. #include <net/protocol.h>
  15. static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
  16. unsigned int seq, unsigned int mss)
  17. {
  18. while (skb) {
  19. if (before(ts_seq, seq + mss)) {
  20. skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP;
  21. skb_shinfo(skb)->tskey = ts_seq;
  22. return;
  23. }
  24. skb = skb->next;
  25. seq += mss;
  26. }
  27. }
  28. static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
  29. netdev_features_t features)
  30. {
  31. if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
  32. return ERR_PTR(-EINVAL);
  33. if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
  34. const struct iphdr *iph = ip_hdr(skb);
  35. struct tcphdr *th = tcp_hdr(skb);
  36. /* Set up checksum pseudo header, usually expect stack to
  37. * have done this already.
  38. */
  39. th->check = 0;
  40. skb->ip_summed = CHECKSUM_PARTIAL;
  41. __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
  42. }
  43. return tcp_gso_segment(skb, features);
  44. }
  45. struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
  46. netdev_features_t features)
  47. {
  48. struct sk_buff *segs = ERR_PTR(-EINVAL);
  49. unsigned int sum_truesize = 0;
  50. struct tcphdr *th;
  51. unsigned int thlen;
  52. unsigned int seq;
  53. __be32 delta;
  54. unsigned int oldlen;
  55. unsigned int mss;
  56. struct sk_buff *gso_skb = skb;
  57. __sum16 newcheck;
  58. bool ooo_okay, copy_destructor;
  59. th = tcp_hdr(skb);
  60. thlen = th->doff * 4;
  61. if (thlen < sizeof(*th))
  62. goto out;
  63. if (!pskb_may_pull(skb, thlen))
  64. goto out;
  65. oldlen = (u16)~skb->len;
  66. __skb_pull(skb, thlen);
  67. mss = skb_shinfo(skb)->gso_size;
  68. if (unlikely(skb->len <= mss))
  69. goto out;
  70. if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
  71. /* Packet is from an untrusted source, reset gso_segs. */
  72. int type = skb_shinfo(skb)->gso_type;
  73. if (unlikely(type &
  74. ~(SKB_GSO_TCPV4 |
  75. SKB_GSO_DODGY |
  76. SKB_GSO_TCP_ECN |
  77. SKB_GSO_TCP_FIXEDID |
  78. SKB_GSO_TCPV6 |
  79. SKB_GSO_GRE |
  80. SKB_GSO_GRE_CSUM |
  81. SKB_GSO_IPIP |
  82. SKB_GSO_SIT |
  83. SKB_GSO_UDP_TUNNEL |
  84. SKB_GSO_UDP_TUNNEL_CSUM |
  85. SKB_GSO_TUNNEL_REMCSUM |
  86. 0) ||
  87. !(type & (SKB_GSO_TCPV4 |
  88. SKB_GSO_TCPV6))))
  89. goto out;
  90. skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
  91. segs = NULL;
  92. goto out;
  93. }
  94. /* GSO partial only requires splitting the frame into an MSS
  95. * multiple and possibly a remainder. So update the mss now.
  96. */
  97. if (features & NETIF_F_GSO_PARTIAL)
  98. mss = skb->len - (skb->len % mss);
  99. copy_destructor = gso_skb->destructor == tcp_wfree;
  100. ooo_okay = gso_skb->ooo_okay;
  101. /* All segments but the first should have ooo_okay cleared */
  102. skb->ooo_okay = 0;
  103. segs = skb_segment(skb, features);
  104. if (IS_ERR(segs))
  105. goto out;
  106. /* Only first segment might have ooo_okay set */
  107. segs->ooo_okay = ooo_okay;
  108. delta = htonl(oldlen + (thlen + mss));
  109. skb = segs;
  110. th = tcp_hdr(skb);
  111. seq = ntohl(th->seq);
  112. if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP))
  113. tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss);
  114. newcheck = ~csum_fold((__force __wsum)((__force u32)th->check +
  115. (__force u32)delta));
  116. while (skb->next) {
  117. th->fin = th->psh = 0;
  118. th->check = newcheck;
  119. if (skb->ip_summed == CHECKSUM_PARTIAL)
  120. gso_reset_checksum(skb, ~th->check);
  121. else
  122. th->check = gso_make_checksum(skb, ~th->check);
  123. seq += mss;
  124. if (copy_destructor) {
  125. skb->destructor = gso_skb->destructor;
  126. skb->sk = gso_skb->sk;
  127. sum_truesize += skb->truesize;
  128. }
  129. skb = skb->next;
  130. th = tcp_hdr(skb);
  131. th->seq = htonl(seq);
  132. th->cwr = 0;
  133. }
  134. /* Following permits TCP Small Queues to work well with GSO :
  135. * The callback to TCP stack will be called at the time last frag
  136. * is freed at TX completion, and not right now when gso_skb
  137. * is freed by GSO engine
  138. */
  139. if (copy_destructor) {
  140. swap(gso_skb->sk, skb->sk);
  141. swap(gso_skb->destructor, skb->destructor);
  142. sum_truesize += skb->truesize;
  143. atomic_add(sum_truesize - gso_skb->truesize,
  144. &skb->sk->sk_wmem_alloc);
  145. }
  146. delta = htonl(oldlen + (skb_tail_pointer(skb) -
  147. skb_transport_header(skb)) +
  148. skb->data_len);
  149. th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
  150. (__force u32)delta));
  151. if (skb->ip_summed == CHECKSUM_PARTIAL)
  152. gso_reset_checksum(skb, ~th->check);
  153. else
  154. th->check = gso_make_checksum(skb, ~th->check);
  155. out:
  156. return segs;
  157. }
  158. struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
  159. {
  160. struct sk_buff **pp = NULL;
  161. struct sk_buff *p;
  162. struct tcphdr *th;
  163. struct tcphdr *th2;
  164. unsigned int len;
  165. unsigned int thlen;
  166. __be32 flags;
  167. unsigned int mss = 1;
  168. unsigned int hlen;
  169. unsigned int off;
  170. int flush = 1;
  171. int i;
  172. off = skb_gro_offset(skb);
  173. hlen = off + sizeof(*th);
  174. th = skb_gro_header_fast(skb, off);
  175. if (skb_gro_header_hard(skb, hlen)) {
  176. th = skb_gro_header_slow(skb, hlen, off);
  177. if (unlikely(!th))
  178. goto out;
  179. }
  180. thlen = th->doff * 4;
  181. if (thlen < sizeof(*th))
  182. goto out;
  183. hlen = off + thlen;
  184. if (skb_gro_header_hard(skb, hlen)) {
  185. th = skb_gro_header_slow(skb, hlen, off);
  186. if (unlikely(!th))
  187. goto out;
  188. }
  189. skb_gro_pull(skb, thlen);
  190. len = skb_gro_len(skb);
  191. flags = tcp_flag_word(th);
  192. for (; (p = *head); head = &p->next) {
  193. if (!NAPI_GRO_CB(p)->same_flow)
  194. continue;
  195. th2 = tcp_hdr(p);
  196. if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
  197. NAPI_GRO_CB(p)->same_flow = 0;
  198. continue;
  199. }
  200. goto found;
  201. }
  202. goto out_check_final;
  203. found:
  204. /* Include the IP ID check below from the inner most IP hdr */
  205. flush = NAPI_GRO_CB(p)->flush;
  206. flush |= (__force int)(flags & TCP_FLAG_CWR);
  207. flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
  208. ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
  209. flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
  210. for (i = sizeof(*th); i < thlen; i += 4)
  211. flush |= *(u32 *)((u8 *)th + i) ^
  212. *(u32 *)((u8 *)th2 + i);
  213. /* When we receive our second frame we can made a decision on if we
  214. * continue this flow as an atomic flow with a fixed ID or if we use
  215. * an incrementing ID.
  216. */
  217. if (NAPI_GRO_CB(p)->flush_id != 1 ||
  218. NAPI_GRO_CB(p)->count != 1 ||
  219. !NAPI_GRO_CB(p)->is_atomic)
  220. flush |= NAPI_GRO_CB(p)->flush_id;
  221. else
  222. NAPI_GRO_CB(p)->is_atomic = false;
  223. mss = skb_shinfo(p)->gso_size;
  224. flush |= (len - 1) >= mss;
  225. flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
  226. if (flush || skb_gro_receive(head, skb)) {
  227. mss = 1;
  228. goto out_check_final;
  229. }
  230. p = *head;
  231. th2 = tcp_hdr(p);
  232. tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
  233. out_check_final:
  234. flush = len < mss;
  235. flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
  236. TCP_FLAG_RST | TCP_FLAG_SYN |
  237. TCP_FLAG_FIN));
  238. if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
  239. pp = head;
  240. out:
  241. NAPI_GRO_CB(skb)->flush |= (flush != 0);
  242. return pp;
  243. }
  244. int tcp_gro_complete(struct sk_buff *skb)
  245. {
  246. struct tcphdr *th = tcp_hdr(skb);
  247. skb->csum_start = (unsigned char *)th - skb->head;
  248. skb->csum_offset = offsetof(struct tcphdr, check);
  249. skb->ip_summed = CHECKSUM_PARTIAL;
  250. skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
  251. if (th->cwr)
  252. skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
  253. return 0;
  254. }
  255. EXPORT_SYMBOL(tcp_gro_complete);
  256. static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
  257. {
  258. /* Don't bother verifying checksum if we're going to flush anyway. */
  259. if (!NAPI_GRO_CB(skb)->flush &&
  260. skb_gro_checksum_validate(skb, IPPROTO_TCP,
  261. inet_gro_compute_pseudo)) {
  262. NAPI_GRO_CB(skb)->flush = 1;
  263. return NULL;
  264. }
  265. return tcp_gro_receive(head, skb);
  266. }
  267. static int tcp4_gro_complete(struct sk_buff *skb, int thoff)
  268. {
  269. const struct iphdr *iph = ip_hdr(skb);
  270. struct tcphdr *th = tcp_hdr(skb);
  271. th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
  272. iph->daddr, 0);
  273. skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
  274. if (NAPI_GRO_CB(skb)->is_atomic)
  275. skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
  276. return tcp_gro_complete(skb);
  277. }
  278. static const struct net_offload tcpv4_offload = {
  279. .callbacks = {
  280. .gso_segment = tcp4_gso_segment,
  281. .gro_receive = tcp4_gro_receive,
  282. .gro_complete = tcp4_gro_complete,
  283. },
  284. };
  285. int __init tcpv4_offload_init(void)
  286. {
  287. return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);
  288. }