fou.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854
  1. #include <linux/module.h>
  2. #include <linux/errno.h>
  3. #include <linux/socket.h>
  4. #include <linux/skbuff.h>
  5. #include <linux/ip.h>
  6. #include <linux/udp.h>
  7. #include <linux/types.h>
  8. #include <linux/kernel.h>
  9. #include <net/genetlink.h>
  10. #include <net/gue.h>
  11. #include <net/ip.h>
  12. #include <net/protocol.h>
  13. #include <net/udp.h>
  14. #include <net/udp_tunnel.h>
  15. #include <net/xfrm.h>
  16. #include <uapi/linux/fou.h>
  17. #include <uapi/linux/genetlink.h>
  18. static DEFINE_SPINLOCK(fou_lock);
  19. static LIST_HEAD(fou_list);
  20. struct fou {
  21. struct socket *sock;
  22. u8 protocol;
  23. u16 port;
  24. struct udp_offload udp_offloads;
  25. struct list_head list;
  26. };
  27. struct fou_cfg {
  28. u16 type;
  29. u8 protocol;
  30. struct udp_port_cfg udp_config;
  31. };
  32. static inline struct fou *fou_from_sock(struct sock *sk)
  33. {
  34. return sk->sk_user_data;
  35. }
  36. static void fou_recv_pull(struct sk_buff *skb, size_t len)
  37. {
  38. struct iphdr *iph = ip_hdr(skb);
  39. /* Remove 'len' bytes from the packet (UDP header and
  40. * FOU header if present).
  41. */
  42. iph->tot_len = htons(ntohs(iph->tot_len) - len);
  43. __skb_pull(skb, len);
  44. skb_postpull_rcsum(skb, udp_hdr(skb), len);
  45. skb_reset_transport_header(skb);
  46. }
  47. static int fou_udp_recv(struct sock *sk, struct sk_buff *skb)
  48. {
  49. struct fou *fou = fou_from_sock(sk);
  50. if (!fou)
  51. return 1;
  52. fou_recv_pull(skb, sizeof(struct udphdr));
  53. return -fou->protocol;
  54. }
  55. static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr,
  56. void *data, size_t hdrlen, u8 ipproto)
  57. {
  58. __be16 *pd = data;
  59. size_t start = ntohs(pd[0]);
  60. size_t offset = ntohs(pd[1]);
  61. size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
  62. __wsum delta;
  63. if (skb->remcsum_offload) {
  64. /* Already processed in GRO path */
  65. skb->remcsum_offload = 0;
  66. return guehdr;
  67. }
  68. if (!pskb_may_pull(skb, plen))
  69. return NULL;
  70. guehdr = (struct guehdr *)&udp_hdr(skb)[1];
  71. if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE))
  72. __skb_checksum_complete(skb);
  73. delta = remcsum_adjust((void *)guehdr + hdrlen,
  74. skb->csum, start, offset);
  75. /* Adjust skb->csum since we changed the packet */
  76. skb->csum = csum_add(skb->csum, delta);
  77. return guehdr;
  78. }
  79. static int gue_control_message(struct sk_buff *skb, struct guehdr *guehdr)
  80. {
  81. /* No support yet */
  82. kfree_skb(skb);
  83. return 0;
  84. }
  85. static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
  86. {
  87. struct fou *fou = fou_from_sock(sk);
  88. size_t len, optlen, hdrlen;
  89. struct guehdr *guehdr;
  90. void *data;
  91. u16 doffset = 0;
  92. if (!fou)
  93. return 1;
  94. len = sizeof(struct udphdr) + sizeof(struct guehdr);
  95. if (!pskb_may_pull(skb, len))
  96. goto drop;
  97. guehdr = (struct guehdr *)&udp_hdr(skb)[1];
  98. optlen = guehdr->hlen << 2;
  99. len += optlen;
  100. if (!pskb_may_pull(skb, len))
  101. goto drop;
  102. /* guehdr may change after pull */
  103. guehdr = (struct guehdr *)&udp_hdr(skb)[1];
  104. hdrlen = sizeof(struct guehdr) + optlen;
  105. if (guehdr->version != 0 || validate_gue_flags(guehdr, optlen))
  106. goto drop;
  107. hdrlen = sizeof(struct guehdr) + optlen;
  108. ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(skb)->tot_len) - len);
  109. /* Pull csum through the guehdr now . This can be used if
  110. * there is a remote checksum offload.
  111. */
  112. skb_postpull_rcsum(skb, udp_hdr(skb), len);
  113. data = &guehdr[1];
  114. if (guehdr->flags & GUE_FLAG_PRIV) {
  115. __be32 flags = *(__be32 *)(data + doffset);
  116. doffset += GUE_LEN_PRIV;
  117. if (flags & GUE_PFLAG_REMCSUM) {
  118. guehdr = gue_remcsum(skb, guehdr, data + doffset,
  119. hdrlen, guehdr->proto_ctype);
  120. if (!guehdr)
  121. goto drop;
  122. data = &guehdr[1];
  123. doffset += GUE_PLEN_REMCSUM;
  124. }
  125. }
  126. if (unlikely(guehdr->control))
  127. return gue_control_message(skb, guehdr);
  128. __skb_pull(skb, sizeof(struct udphdr) + hdrlen);
  129. skb_reset_transport_header(skb);
  130. return -guehdr->proto_ctype;
  131. drop:
  132. kfree_skb(skb);
  133. return 0;
  134. }
  135. static struct sk_buff **fou_gro_receive(struct sk_buff **head,
  136. struct sk_buff *skb)
  137. {
  138. const struct net_offload *ops;
  139. struct sk_buff **pp = NULL;
  140. u8 proto = NAPI_GRO_CB(skb)->proto;
  141. const struct net_offload **offloads;
  142. rcu_read_lock();
  143. offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
  144. ops = rcu_dereference(offloads[proto]);
  145. if (!ops || !ops->callbacks.gro_receive)
  146. goto out_unlock;
  147. pp = ops->callbacks.gro_receive(head, skb);
  148. out_unlock:
  149. rcu_read_unlock();
  150. return pp;
  151. }
  152. static int fou_gro_complete(struct sk_buff *skb, int nhoff)
  153. {
  154. const struct net_offload *ops;
  155. u8 proto = NAPI_GRO_CB(skb)->proto;
  156. int err = -ENOSYS;
  157. const struct net_offload **offloads;
  158. udp_tunnel_gro_complete(skb, nhoff);
  159. rcu_read_lock();
  160. offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
  161. ops = rcu_dereference(offloads[proto]);
  162. if (WARN_ON(!ops || !ops->callbacks.gro_complete))
  163. goto out_unlock;
  164. err = ops->callbacks.gro_complete(skb, nhoff);
  165. out_unlock:
  166. rcu_read_unlock();
  167. return err;
  168. }
  169. static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
  170. struct guehdr *guehdr, void *data,
  171. size_t hdrlen, u8 ipproto)
  172. {
  173. __be16 *pd = data;
  174. size_t start = ntohs(pd[0]);
  175. size_t offset = ntohs(pd[1]);
  176. size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
  177. __wsum delta;
  178. if (skb->remcsum_offload)
  179. return guehdr;
  180. if (!NAPI_GRO_CB(skb)->csum_valid)
  181. return NULL;
  182. /* Pull checksum that will be written */
  183. if (skb_gro_header_hard(skb, off + plen)) {
  184. guehdr = skb_gro_header_slow(skb, off + plen, off);
  185. if (!guehdr)
  186. return NULL;
  187. }
  188. delta = remcsum_adjust((void *)guehdr + hdrlen,
  189. NAPI_GRO_CB(skb)->csum, start, offset);
  190. /* Adjust skb->csum since we changed the packet */
  191. skb->csum = csum_add(skb->csum, delta);
  192. NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
  193. skb->remcsum_offload = 1;
  194. return guehdr;
  195. }
  196. static struct sk_buff **gue_gro_receive(struct sk_buff **head,
  197. struct sk_buff *skb)
  198. {
  199. const struct net_offload **offloads;
  200. const struct net_offload *ops;
  201. struct sk_buff **pp = NULL;
  202. struct sk_buff *p;
  203. struct guehdr *guehdr;
  204. size_t len, optlen, hdrlen, off;
  205. void *data;
  206. u16 doffset = 0;
  207. int flush = 1;
  208. off = skb_gro_offset(skb);
  209. len = off + sizeof(*guehdr);
  210. guehdr = skb_gro_header_fast(skb, off);
  211. if (skb_gro_header_hard(skb, len)) {
  212. guehdr = skb_gro_header_slow(skb, len, off);
  213. if (unlikely(!guehdr))
  214. goto out;
  215. }
  216. optlen = guehdr->hlen << 2;
  217. len += optlen;
  218. if (skb_gro_header_hard(skb, len)) {
  219. guehdr = skb_gro_header_slow(skb, len, off);
  220. if (unlikely(!guehdr))
  221. goto out;
  222. }
  223. if (unlikely(guehdr->control) || guehdr->version != 0 ||
  224. validate_gue_flags(guehdr, optlen))
  225. goto out;
  226. hdrlen = sizeof(*guehdr) + optlen;
  227. /* Adjust NAPI_GRO_CB(skb)->csum to account for guehdr,
  228. * this is needed if there is a remote checkcsum offload.
  229. */
  230. skb_gro_postpull_rcsum(skb, guehdr, hdrlen);
  231. data = &guehdr[1];
  232. if (guehdr->flags & GUE_FLAG_PRIV) {
  233. __be32 flags = *(__be32 *)(data + doffset);
  234. doffset += GUE_LEN_PRIV;
  235. if (flags & GUE_PFLAG_REMCSUM) {
  236. guehdr = gue_gro_remcsum(skb, off, guehdr,
  237. data + doffset, hdrlen,
  238. guehdr->proto_ctype);
  239. if (!guehdr)
  240. goto out;
  241. data = &guehdr[1];
  242. doffset += GUE_PLEN_REMCSUM;
  243. }
  244. }
  245. skb_gro_pull(skb, hdrlen);
  246. flush = 0;
  247. for (p = *head; p; p = p->next) {
  248. const struct guehdr *guehdr2;
  249. if (!NAPI_GRO_CB(p)->same_flow)
  250. continue;
  251. guehdr2 = (struct guehdr *)(p->data + off);
  252. /* Compare base GUE header to be equal (covers
  253. * hlen, version, proto_ctype, and flags.
  254. */
  255. if (guehdr->word != guehdr2->word) {
  256. NAPI_GRO_CB(p)->same_flow = 0;
  257. continue;
  258. }
  259. /* Compare optional fields are the same. */
  260. if (guehdr->hlen && memcmp(&guehdr[1], &guehdr2[1],
  261. guehdr->hlen << 2)) {
  262. NAPI_GRO_CB(p)->same_flow = 0;
  263. continue;
  264. }
  265. }
  266. rcu_read_lock();
  267. offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
  268. ops = rcu_dereference(offloads[guehdr->proto_ctype]);
  269. if (WARN_ON(!ops || !ops->callbacks.gro_receive))
  270. goto out_unlock;
  271. pp = ops->callbacks.gro_receive(head, skb);
  272. out_unlock:
  273. rcu_read_unlock();
  274. out:
  275. NAPI_GRO_CB(skb)->flush |= flush;
  276. return pp;
  277. }
  278. static int gue_gro_complete(struct sk_buff *skb, int nhoff)
  279. {
  280. const struct net_offload **offloads;
  281. struct guehdr *guehdr = (struct guehdr *)(skb->data + nhoff);
  282. const struct net_offload *ops;
  283. unsigned int guehlen;
  284. u8 proto;
  285. int err = -ENOENT;
  286. proto = guehdr->proto_ctype;
  287. guehlen = sizeof(*guehdr) + (guehdr->hlen << 2);
  288. rcu_read_lock();
  289. offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
  290. ops = rcu_dereference(offloads[proto]);
  291. if (WARN_ON(!ops || !ops->callbacks.gro_complete))
  292. goto out_unlock;
  293. err = ops->callbacks.gro_complete(skb, nhoff + guehlen);
  294. out_unlock:
  295. rcu_read_unlock();
  296. return err;
  297. }
  298. static int fou_add_to_port_list(struct fou *fou)
  299. {
  300. struct fou *fout;
  301. spin_lock(&fou_lock);
  302. list_for_each_entry(fout, &fou_list, list) {
  303. if (fou->port == fout->port) {
  304. spin_unlock(&fou_lock);
  305. return -EALREADY;
  306. }
  307. }
  308. list_add(&fou->list, &fou_list);
  309. spin_unlock(&fou_lock);
  310. return 0;
  311. }
  312. static void fou_release(struct fou *fou)
  313. {
  314. struct socket *sock = fou->sock;
  315. struct sock *sk = sock->sk;
  316. udp_del_offload(&fou->udp_offloads);
  317. list_del(&fou->list);
  318. /* Remove hooks into tunnel socket */
  319. sk->sk_user_data = NULL;
  320. sock_release(sock);
  321. kfree(fou);
  322. }
  323. static int fou_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg)
  324. {
  325. udp_sk(sk)->encap_rcv = fou_udp_recv;
  326. fou->protocol = cfg->protocol;
  327. fou->udp_offloads.callbacks.gro_receive = fou_gro_receive;
  328. fou->udp_offloads.callbacks.gro_complete = fou_gro_complete;
  329. fou->udp_offloads.port = cfg->udp_config.local_udp_port;
  330. fou->udp_offloads.ipproto = cfg->protocol;
  331. return 0;
  332. }
  333. static int gue_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg)
  334. {
  335. udp_sk(sk)->encap_rcv = gue_udp_recv;
  336. fou->udp_offloads.callbacks.gro_receive = gue_gro_receive;
  337. fou->udp_offloads.callbacks.gro_complete = gue_gro_complete;
  338. fou->udp_offloads.port = cfg->udp_config.local_udp_port;
  339. return 0;
  340. }
  341. static int fou_create(struct net *net, struct fou_cfg *cfg,
  342. struct socket **sockp)
  343. {
  344. struct fou *fou = NULL;
  345. int err;
  346. struct socket *sock = NULL;
  347. struct sock *sk;
  348. /* Open UDP socket */
  349. err = udp_sock_create(net, &cfg->udp_config, &sock);
  350. if (err < 0)
  351. goto error;
  352. /* Allocate FOU port structure */
  353. fou = kzalloc(sizeof(*fou), GFP_KERNEL);
  354. if (!fou) {
  355. err = -ENOMEM;
  356. goto error;
  357. }
  358. sk = sock->sk;
  359. fou->port = cfg->udp_config.local_udp_port;
  360. /* Initial for fou type */
  361. switch (cfg->type) {
  362. case FOU_ENCAP_DIRECT:
  363. err = fou_encap_init(sk, fou, cfg);
  364. if (err)
  365. goto error;
  366. break;
  367. case FOU_ENCAP_GUE:
  368. err = gue_encap_init(sk, fou, cfg);
  369. if (err)
  370. goto error;
  371. break;
  372. default:
  373. err = -EINVAL;
  374. goto error;
  375. }
  376. udp_sk(sk)->encap_type = 1;
  377. udp_encap_enable();
  378. sk->sk_user_data = fou;
  379. fou->sock = sock;
  380. udp_set_convert_csum(sk, true);
  381. sk->sk_allocation = GFP_ATOMIC;
  382. if (cfg->udp_config.family == AF_INET) {
  383. err = udp_add_offload(&fou->udp_offloads);
  384. if (err)
  385. goto error;
  386. }
  387. err = fou_add_to_port_list(fou);
  388. if (err)
  389. goto error;
  390. if (sockp)
  391. *sockp = sock;
  392. return 0;
  393. error:
  394. kfree(fou);
  395. if (sock)
  396. sock_release(sock);
  397. return err;
  398. }
  399. static int fou_destroy(struct net *net, struct fou_cfg *cfg)
  400. {
  401. struct fou *fou;
  402. u16 port = cfg->udp_config.local_udp_port;
  403. int err = -EINVAL;
  404. spin_lock(&fou_lock);
  405. list_for_each_entry(fou, &fou_list, list) {
  406. if (fou->port == port) {
  407. udp_del_offload(&fou->udp_offloads);
  408. fou_release(fou);
  409. err = 0;
  410. break;
  411. }
  412. }
  413. spin_unlock(&fou_lock);
  414. return err;
  415. }
  416. static struct genl_family fou_nl_family = {
  417. .id = GENL_ID_GENERATE,
  418. .hdrsize = 0,
  419. .name = FOU_GENL_NAME,
  420. .version = FOU_GENL_VERSION,
  421. .maxattr = FOU_ATTR_MAX,
  422. .netnsok = true,
  423. };
  424. static struct nla_policy fou_nl_policy[FOU_ATTR_MAX + 1] = {
  425. [FOU_ATTR_PORT] = { .type = NLA_U16, },
  426. [FOU_ATTR_AF] = { .type = NLA_U8, },
  427. [FOU_ATTR_IPPROTO] = { .type = NLA_U8, },
  428. [FOU_ATTR_TYPE] = { .type = NLA_U8, },
  429. };
  430. static int parse_nl_config(struct genl_info *info,
  431. struct fou_cfg *cfg)
  432. {
  433. memset(cfg, 0, sizeof(*cfg));
  434. cfg->udp_config.family = AF_INET;
  435. if (info->attrs[FOU_ATTR_AF]) {
  436. u8 family = nla_get_u8(info->attrs[FOU_ATTR_AF]);
  437. if (family != AF_INET && family != AF_INET6)
  438. return -EINVAL;
  439. cfg->udp_config.family = family;
  440. }
  441. if (info->attrs[FOU_ATTR_PORT]) {
  442. u16 port = nla_get_u16(info->attrs[FOU_ATTR_PORT]);
  443. cfg->udp_config.local_udp_port = port;
  444. }
  445. if (info->attrs[FOU_ATTR_IPPROTO])
  446. cfg->protocol = nla_get_u8(info->attrs[FOU_ATTR_IPPROTO]);
  447. if (info->attrs[FOU_ATTR_TYPE])
  448. cfg->type = nla_get_u8(info->attrs[FOU_ATTR_TYPE]);
  449. return 0;
  450. }
  451. static int fou_nl_cmd_add_port(struct sk_buff *skb, struct genl_info *info)
  452. {
  453. struct fou_cfg cfg;
  454. int err;
  455. err = parse_nl_config(info, &cfg);
  456. if (err)
  457. return err;
  458. return fou_create(&init_net, &cfg, NULL);
  459. }
  460. static int fou_nl_cmd_rm_port(struct sk_buff *skb, struct genl_info *info)
  461. {
  462. struct fou_cfg cfg;
  463. parse_nl_config(info, &cfg);
  464. return fou_destroy(&init_net, &cfg);
  465. }
  466. static const struct genl_ops fou_nl_ops[] = {
  467. {
  468. .cmd = FOU_CMD_ADD,
  469. .doit = fou_nl_cmd_add_port,
  470. .policy = fou_nl_policy,
  471. .flags = GENL_ADMIN_PERM,
  472. },
  473. {
  474. .cmd = FOU_CMD_DEL,
  475. .doit = fou_nl_cmd_rm_port,
  476. .policy = fou_nl_policy,
  477. .flags = GENL_ADMIN_PERM,
  478. },
  479. };
  480. size_t fou_encap_hlen(struct ip_tunnel_encap *e)
  481. {
  482. return sizeof(struct udphdr);
  483. }
  484. EXPORT_SYMBOL(fou_encap_hlen);
  485. size_t gue_encap_hlen(struct ip_tunnel_encap *e)
  486. {
  487. size_t len;
  488. bool need_priv = false;
  489. len = sizeof(struct udphdr) + sizeof(struct guehdr);
  490. if (e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) {
  491. len += GUE_PLEN_REMCSUM;
  492. need_priv = true;
  493. }
  494. len += need_priv ? GUE_LEN_PRIV : 0;
  495. return len;
  496. }
  497. EXPORT_SYMBOL(gue_encap_hlen);
  498. static void fou_build_udp(struct sk_buff *skb, struct ip_tunnel_encap *e,
  499. struct flowi4 *fl4, u8 *protocol, __be16 sport)
  500. {
  501. struct udphdr *uh;
  502. skb_push(skb, sizeof(struct udphdr));
  503. skb_reset_transport_header(skb);
  504. uh = udp_hdr(skb);
  505. uh->dest = e->dport;
  506. uh->source = sport;
  507. uh->len = htons(skb->len);
  508. uh->check = 0;
  509. udp_set_csum(!(e->flags & TUNNEL_ENCAP_FLAG_CSUM), skb,
  510. fl4->saddr, fl4->daddr, skb->len);
  511. *protocol = IPPROTO_UDP;
  512. }
  513. int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
  514. u8 *protocol, struct flowi4 *fl4)
  515. {
  516. bool csum = !!(e->flags & TUNNEL_ENCAP_FLAG_CSUM);
  517. int type = csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
  518. __be16 sport;
  519. skb = iptunnel_handle_offloads(skb, csum, type);
  520. if (IS_ERR(skb))
  521. return PTR_ERR(skb);
  522. sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
  523. skb, 0, 0, false);
  524. fou_build_udp(skb, e, fl4, protocol, sport);
  525. return 0;
  526. }
  527. EXPORT_SYMBOL(fou_build_header);
  528. int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
  529. u8 *protocol, struct flowi4 *fl4)
  530. {
  531. bool csum = !!(e->flags & TUNNEL_ENCAP_FLAG_CSUM);
  532. int type = csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
  533. struct guehdr *guehdr;
  534. size_t hdrlen, optlen = 0;
  535. __be16 sport;
  536. void *data;
  537. bool need_priv = false;
  538. if ((e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) &&
  539. skb->ip_summed == CHECKSUM_PARTIAL) {
  540. csum = false;
  541. optlen += GUE_PLEN_REMCSUM;
  542. type |= SKB_GSO_TUNNEL_REMCSUM;
  543. need_priv = true;
  544. }
  545. optlen += need_priv ? GUE_LEN_PRIV : 0;
  546. skb = iptunnel_handle_offloads(skb, csum, type);
  547. if (IS_ERR(skb))
  548. return PTR_ERR(skb);
  549. /* Get source port (based on flow hash) before skb_push */
  550. sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
  551. skb, 0, 0, false);
  552. hdrlen = sizeof(struct guehdr) + optlen;
  553. skb_push(skb, hdrlen);
  554. guehdr = (struct guehdr *)skb->data;
  555. guehdr->control = 0;
  556. guehdr->version = 0;
  557. guehdr->hlen = optlen >> 2;
  558. guehdr->flags = 0;
  559. guehdr->proto_ctype = *protocol;
  560. data = &guehdr[1];
  561. if (need_priv) {
  562. __be32 *flags = data;
  563. guehdr->flags |= GUE_FLAG_PRIV;
  564. *flags = 0;
  565. data += GUE_LEN_PRIV;
  566. if (type & SKB_GSO_TUNNEL_REMCSUM) {
  567. u16 csum_start = skb_checksum_start_offset(skb);
  568. __be16 *pd = data;
  569. if (csum_start < hdrlen)
  570. return -EINVAL;
  571. csum_start -= hdrlen;
  572. pd[0] = htons(csum_start);
  573. pd[1] = htons(csum_start + skb->csum_offset);
  574. if (!skb_is_gso(skb)) {
  575. skb->ip_summed = CHECKSUM_NONE;
  576. skb->encapsulation = 0;
  577. }
  578. *flags |= GUE_PFLAG_REMCSUM;
  579. data += GUE_PLEN_REMCSUM;
  580. }
  581. }
  582. fou_build_udp(skb, e, fl4, protocol, sport);
  583. return 0;
  584. }
  585. EXPORT_SYMBOL(gue_build_header);
  586. #ifdef CONFIG_NET_FOU_IP_TUNNELS
  587. static const struct ip_tunnel_encap_ops __read_mostly fou_iptun_ops = {
  588. .encap_hlen = fou_encap_hlen,
  589. .build_header = fou_build_header,
  590. };
  591. static const struct ip_tunnel_encap_ops __read_mostly gue_iptun_ops = {
  592. .encap_hlen = gue_encap_hlen,
  593. .build_header = gue_build_header,
  594. };
  595. static int ip_tunnel_encap_add_fou_ops(void)
  596. {
  597. int ret;
  598. ret = ip_tunnel_encap_add_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU);
  599. if (ret < 0) {
  600. pr_err("can't add fou ops\n");
  601. return ret;
  602. }
  603. ret = ip_tunnel_encap_add_ops(&gue_iptun_ops, TUNNEL_ENCAP_GUE);
  604. if (ret < 0) {
  605. pr_err("can't add gue ops\n");
  606. ip_tunnel_encap_del_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU);
  607. return ret;
  608. }
  609. return 0;
  610. }
  611. static void ip_tunnel_encap_del_fou_ops(void)
  612. {
  613. ip_tunnel_encap_del_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU);
  614. ip_tunnel_encap_del_ops(&gue_iptun_ops, TUNNEL_ENCAP_GUE);
  615. }
  616. #else
  617. static int ip_tunnel_encap_add_fou_ops(void)
  618. {
  619. return 0;
  620. }
  621. static void ip_tunnel_encap_del_fou_ops(void)
  622. {
  623. }
  624. #endif
  625. static int __init fou_init(void)
  626. {
  627. int ret;
  628. ret = genl_register_family_with_ops(&fou_nl_family,
  629. fou_nl_ops);
  630. if (ret < 0)
  631. goto exit;
  632. ret = ip_tunnel_encap_add_fou_ops();
  633. if (ret < 0)
  634. genl_unregister_family(&fou_nl_family);
  635. exit:
  636. return ret;
  637. }
  638. static void __exit fou_fini(void)
  639. {
  640. struct fou *fou, *next;
  641. ip_tunnel_encap_del_fou_ops();
  642. genl_unregister_family(&fou_nl_family);
  643. /* Close all the FOU sockets */
  644. spin_lock(&fou_lock);
  645. list_for_each_entry_safe(fou, next, &fou_list, list)
  646. fou_release(fou);
  647. spin_unlock(&fou_lock);
  648. }
  649. module_init(fou_init);
  650. module_exit(fou_fini);
  651. MODULE_AUTHOR("Tom Herbert <therbert@google.com>");
  652. MODULE_LICENSE("GPL");