fou.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860
  1. #include <linux/module.h>
  2. #include <linux/errno.h>
  3. #include <linux/socket.h>
  4. #include <linux/skbuff.h>
  5. #include <linux/ip.h>
  6. #include <linux/udp.h>
  7. #include <linux/types.h>
  8. #include <linux/kernel.h>
  9. #include <net/genetlink.h>
  10. #include <net/gue.h>
  11. #include <net/ip.h>
  12. #include <net/protocol.h>
  13. #include <net/udp.h>
  14. #include <net/udp_tunnel.h>
  15. #include <net/xfrm.h>
  16. #include <uapi/linux/fou.h>
  17. #include <uapi/linux/genetlink.h>
  18. static DEFINE_SPINLOCK(fou_lock);
  19. static LIST_HEAD(fou_list);
  20. struct fou {
  21. struct socket *sock;
  22. u8 protocol;
  23. u8 flags;
  24. u16 port;
  25. struct udp_offload udp_offloads;
  26. struct list_head list;
  27. };
  28. #define FOU_F_REMCSUM_NOPARTIAL BIT(0)
  29. struct fou_cfg {
  30. u16 type;
  31. u8 protocol;
  32. u8 flags;
  33. struct udp_port_cfg udp_config;
  34. };
  35. static inline struct fou *fou_from_sock(struct sock *sk)
  36. {
  37. return sk->sk_user_data;
  38. }
  39. static void fou_recv_pull(struct sk_buff *skb, size_t len)
  40. {
  41. struct iphdr *iph = ip_hdr(skb);
  42. /* Remove 'len' bytes from the packet (UDP header and
  43. * FOU header if present).
  44. */
  45. iph->tot_len = htons(ntohs(iph->tot_len) - len);
  46. __skb_pull(skb, len);
  47. skb_postpull_rcsum(skb, udp_hdr(skb), len);
  48. skb_reset_transport_header(skb);
  49. }
  50. static int fou_udp_recv(struct sock *sk, struct sk_buff *skb)
  51. {
  52. struct fou *fou = fou_from_sock(sk);
  53. if (!fou)
  54. return 1;
  55. fou_recv_pull(skb, sizeof(struct udphdr));
  56. return -fou->protocol;
  57. }
  58. static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr,
  59. void *data, size_t hdrlen, u8 ipproto,
  60. bool nopartial)
  61. {
  62. __be16 *pd = data;
  63. size_t start = ntohs(pd[0]);
  64. size_t offset = ntohs(pd[1]);
  65. size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
  66. if (!pskb_may_pull(skb, plen))
  67. return NULL;
  68. guehdr = (struct guehdr *)&udp_hdr(skb)[1];
  69. skb_remcsum_process(skb, (void *)guehdr + hdrlen,
  70. start, offset, nopartial);
  71. return guehdr;
  72. }
  73. static int gue_control_message(struct sk_buff *skb, struct guehdr *guehdr)
  74. {
  75. /* No support yet */
  76. kfree_skb(skb);
  77. return 0;
  78. }
  79. static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
  80. {
  81. struct fou *fou = fou_from_sock(sk);
  82. size_t len, optlen, hdrlen;
  83. struct guehdr *guehdr;
  84. void *data;
  85. u16 doffset = 0;
  86. if (!fou)
  87. return 1;
  88. len = sizeof(struct udphdr) + sizeof(struct guehdr);
  89. if (!pskb_may_pull(skb, len))
  90. goto drop;
  91. guehdr = (struct guehdr *)&udp_hdr(skb)[1];
  92. optlen = guehdr->hlen << 2;
  93. len += optlen;
  94. if (!pskb_may_pull(skb, len))
  95. goto drop;
  96. /* guehdr may change after pull */
  97. guehdr = (struct guehdr *)&udp_hdr(skb)[1];
  98. hdrlen = sizeof(struct guehdr) + optlen;
  99. if (guehdr->version != 0 || validate_gue_flags(guehdr, optlen))
  100. goto drop;
  101. hdrlen = sizeof(struct guehdr) + optlen;
  102. ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(skb)->tot_len) - len);
  103. /* Pull csum through the guehdr now . This can be used if
  104. * there is a remote checksum offload.
  105. */
  106. skb_postpull_rcsum(skb, udp_hdr(skb), len);
  107. data = &guehdr[1];
  108. if (guehdr->flags & GUE_FLAG_PRIV) {
  109. __be32 flags = *(__be32 *)(data + doffset);
  110. doffset += GUE_LEN_PRIV;
  111. if (flags & GUE_PFLAG_REMCSUM) {
  112. guehdr = gue_remcsum(skb, guehdr, data + doffset,
  113. hdrlen, guehdr->proto_ctype,
  114. !!(fou->flags &
  115. FOU_F_REMCSUM_NOPARTIAL));
  116. if (!guehdr)
  117. goto drop;
  118. data = &guehdr[1];
  119. doffset += GUE_PLEN_REMCSUM;
  120. }
  121. }
  122. if (unlikely(guehdr->control))
  123. return gue_control_message(skb, guehdr);
  124. __skb_pull(skb, sizeof(struct udphdr) + hdrlen);
  125. skb_reset_transport_header(skb);
  126. return -guehdr->proto_ctype;
  127. drop:
  128. kfree_skb(skb);
  129. return 0;
  130. }
  131. static struct sk_buff **fou_gro_receive(struct sk_buff **head,
  132. struct sk_buff *skb,
  133. struct udp_offload *uoff)
  134. {
  135. const struct net_offload *ops;
  136. struct sk_buff **pp = NULL;
  137. u8 proto = NAPI_GRO_CB(skb)->proto;
  138. const struct net_offload **offloads;
  139. rcu_read_lock();
  140. offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
  141. ops = rcu_dereference(offloads[proto]);
  142. if (!ops || !ops->callbacks.gro_receive)
  143. goto out_unlock;
  144. pp = ops->callbacks.gro_receive(head, skb);
  145. out_unlock:
  146. rcu_read_unlock();
  147. return pp;
  148. }
  149. static int fou_gro_complete(struct sk_buff *skb, int nhoff,
  150. struct udp_offload *uoff)
  151. {
  152. const struct net_offload *ops;
  153. u8 proto = NAPI_GRO_CB(skb)->proto;
  154. int err = -ENOSYS;
  155. const struct net_offload **offloads;
  156. udp_tunnel_gro_complete(skb, nhoff);
  157. rcu_read_lock();
  158. offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
  159. ops = rcu_dereference(offloads[proto]);
  160. if (WARN_ON(!ops || !ops->callbacks.gro_complete))
  161. goto out_unlock;
  162. err = ops->callbacks.gro_complete(skb, nhoff);
  163. out_unlock:
  164. rcu_read_unlock();
  165. return err;
  166. }
  167. static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
  168. struct guehdr *guehdr, void *data,
  169. size_t hdrlen, u8 ipproto,
  170. struct gro_remcsum *grc, bool nopartial)
  171. {
  172. __be16 *pd = data;
  173. size_t start = ntohs(pd[0]);
  174. size_t offset = ntohs(pd[1]);
  175. size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
  176. if (skb->remcsum_offload)
  177. return NULL;
  178. if (!NAPI_GRO_CB(skb)->csum_valid)
  179. return NULL;
  180. /* Pull checksum that will be written */
  181. if (skb_gro_header_hard(skb, off + plen)) {
  182. guehdr = skb_gro_header_slow(skb, off + plen, off);
  183. if (!guehdr)
  184. return NULL;
  185. }
  186. skb_gro_remcsum_process(skb, (void *)guehdr + hdrlen,
  187. start, offset, grc, nopartial);
  188. skb->remcsum_offload = 1;
  189. return guehdr;
  190. }
  191. static struct sk_buff **gue_gro_receive(struct sk_buff **head,
  192. struct sk_buff *skb,
  193. struct udp_offload *uoff)
  194. {
  195. const struct net_offload **offloads;
  196. const struct net_offload *ops;
  197. struct sk_buff **pp = NULL;
  198. struct sk_buff *p;
  199. struct guehdr *guehdr;
  200. size_t len, optlen, hdrlen, off;
  201. void *data;
  202. u16 doffset = 0;
  203. int flush = 1;
  204. struct fou *fou = container_of(uoff, struct fou, udp_offloads);
  205. struct gro_remcsum grc;
  206. skb_gro_remcsum_init(&grc);
  207. off = skb_gro_offset(skb);
  208. len = off + sizeof(*guehdr);
  209. guehdr = skb_gro_header_fast(skb, off);
  210. if (skb_gro_header_hard(skb, len)) {
  211. guehdr = skb_gro_header_slow(skb, len, off);
  212. if (unlikely(!guehdr))
  213. goto out;
  214. }
  215. optlen = guehdr->hlen << 2;
  216. len += optlen;
  217. if (skb_gro_header_hard(skb, len)) {
  218. guehdr = skb_gro_header_slow(skb, len, off);
  219. if (unlikely(!guehdr))
  220. goto out;
  221. }
  222. if (unlikely(guehdr->control) || guehdr->version != 0 ||
  223. validate_gue_flags(guehdr, optlen))
  224. goto out;
  225. hdrlen = sizeof(*guehdr) + optlen;
  226. /* Adjust NAPI_GRO_CB(skb)->csum to account for guehdr,
  227. * this is needed if there is a remote checkcsum offload.
  228. */
  229. skb_gro_postpull_rcsum(skb, guehdr, hdrlen);
  230. data = &guehdr[1];
  231. if (guehdr->flags & GUE_FLAG_PRIV) {
  232. __be32 flags = *(__be32 *)(data + doffset);
  233. doffset += GUE_LEN_PRIV;
  234. if (flags & GUE_PFLAG_REMCSUM) {
  235. guehdr = gue_gro_remcsum(skb, off, guehdr,
  236. data + doffset, hdrlen,
  237. guehdr->proto_ctype, &grc,
  238. !!(fou->flags &
  239. FOU_F_REMCSUM_NOPARTIAL));
  240. if (!guehdr)
  241. goto out;
  242. data = &guehdr[1];
  243. doffset += GUE_PLEN_REMCSUM;
  244. }
  245. }
  246. skb_gro_pull(skb, hdrlen);
  247. flush = 0;
  248. for (p = *head; p; p = p->next) {
  249. const struct guehdr *guehdr2;
  250. if (!NAPI_GRO_CB(p)->same_flow)
  251. continue;
  252. guehdr2 = (struct guehdr *)(p->data + off);
  253. /* Compare base GUE header to be equal (covers
  254. * hlen, version, proto_ctype, and flags.
  255. */
  256. if (guehdr->word != guehdr2->word) {
  257. NAPI_GRO_CB(p)->same_flow = 0;
  258. continue;
  259. }
  260. /* Compare optional fields are the same. */
  261. if (guehdr->hlen && memcmp(&guehdr[1], &guehdr2[1],
  262. guehdr->hlen << 2)) {
  263. NAPI_GRO_CB(p)->same_flow = 0;
  264. continue;
  265. }
  266. }
  267. rcu_read_lock();
  268. offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
  269. ops = rcu_dereference(offloads[guehdr->proto_ctype]);
  270. if (WARN_ON(!ops || !ops->callbacks.gro_receive))
  271. goto out_unlock;
  272. pp = ops->callbacks.gro_receive(head, skb);
  273. out_unlock:
  274. rcu_read_unlock();
  275. out:
  276. NAPI_GRO_CB(skb)->flush |= flush;
  277. skb_gro_remcsum_cleanup(skb, &grc);
  278. return pp;
  279. }
  280. static int gue_gro_complete(struct sk_buff *skb, int nhoff,
  281. struct udp_offload *uoff)
  282. {
  283. const struct net_offload **offloads;
  284. struct guehdr *guehdr = (struct guehdr *)(skb->data + nhoff);
  285. const struct net_offload *ops;
  286. unsigned int guehlen;
  287. u8 proto;
  288. int err = -ENOENT;
  289. proto = guehdr->proto_ctype;
  290. guehlen = sizeof(*guehdr) + (guehdr->hlen << 2);
  291. rcu_read_lock();
  292. offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
  293. ops = rcu_dereference(offloads[proto]);
  294. if (WARN_ON(!ops || !ops->callbacks.gro_complete))
  295. goto out_unlock;
  296. err = ops->callbacks.gro_complete(skb, nhoff + guehlen);
  297. out_unlock:
  298. rcu_read_unlock();
  299. return err;
  300. }
  301. static int fou_add_to_port_list(struct fou *fou)
  302. {
  303. struct fou *fout;
  304. spin_lock(&fou_lock);
  305. list_for_each_entry(fout, &fou_list, list) {
  306. if (fou->port == fout->port) {
  307. spin_unlock(&fou_lock);
  308. return -EALREADY;
  309. }
  310. }
  311. list_add(&fou->list, &fou_list);
  312. spin_unlock(&fou_lock);
  313. return 0;
  314. }
  315. static void fou_release(struct fou *fou)
  316. {
  317. struct socket *sock = fou->sock;
  318. struct sock *sk = sock->sk;
  319. udp_del_offload(&fou->udp_offloads);
  320. list_del(&fou->list);
  321. /* Remove hooks into tunnel socket */
  322. sk->sk_user_data = NULL;
  323. sock_release(sock);
  324. kfree(fou);
  325. }
  326. static int fou_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg)
  327. {
  328. udp_sk(sk)->encap_rcv = fou_udp_recv;
  329. fou->protocol = cfg->protocol;
  330. fou->udp_offloads.callbacks.gro_receive = fou_gro_receive;
  331. fou->udp_offloads.callbacks.gro_complete = fou_gro_complete;
  332. fou->udp_offloads.port = cfg->udp_config.local_udp_port;
  333. fou->udp_offloads.ipproto = cfg->protocol;
  334. return 0;
  335. }
  336. static int gue_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg)
  337. {
  338. udp_sk(sk)->encap_rcv = gue_udp_recv;
  339. fou->udp_offloads.callbacks.gro_receive = gue_gro_receive;
  340. fou->udp_offloads.callbacks.gro_complete = gue_gro_complete;
  341. fou->udp_offloads.port = cfg->udp_config.local_udp_port;
  342. return 0;
  343. }
  344. static int fou_create(struct net *net, struct fou_cfg *cfg,
  345. struct socket **sockp)
  346. {
  347. struct fou *fou = NULL;
  348. int err;
  349. struct socket *sock = NULL;
  350. struct sock *sk;
  351. /* Open UDP socket */
  352. err = udp_sock_create(net, &cfg->udp_config, &sock);
  353. if (err < 0)
  354. goto error;
  355. /* Allocate FOU port structure */
  356. fou = kzalloc(sizeof(*fou), GFP_KERNEL);
  357. if (!fou) {
  358. err = -ENOMEM;
  359. goto error;
  360. }
  361. sk = sock->sk;
  362. fou->flags = cfg->flags;
  363. fou->port = cfg->udp_config.local_udp_port;
  364. /* Initial for fou type */
  365. switch (cfg->type) {
  366. case FOU_ENCAP_DIRECT:
  367. err = fou_encap_init(sk, fou, cfg);
  368. if (err)
  369. goto error;
  370. break;
  371. case FOU_ENCAP_GUE:
  372. err = gue_encap_init(sk, fou, cfg);
  373. if (err)
  374. goto error;
  375. break;
  376. default:
  377. err = -EINVAL;
  378. goto error;
  379. }
  380. udp_sk(sk)->encap_type = 1;
  381. udp_encap_enable();
  382. sk->sk_user_data = fou;
  383. fou->sock = sock;
  384. inet_inc_convert_csum(sk);
  385. sk->sk_allocation = GFP_ATOMIC;
  386. if (cfg->udp_config.family == AF_INET) {
  387. err = udp_add_offload(&fou->udp_offloads);
  388. if (err)
  389. goto error;
  390. }
  391. err = fou_add_to_port_list(fou);
  392. if (err)
  393. goto error;
  394. if (sockp)
  395. *sockp = sock;
  396. return 0;
  397. error:
  398. kfree(fou);
  399. if (sock)
  400. sock_release(sock);
  401. return err;
  402. }
  403. static int fou_destroy(struct net *net, struct fou_cfg *cfg)
  404. {
  405. struct fou *fou;
  406. u16 port = cfg->udp_config.local_udp_port;
  407. int err = -EINVAL;
  408. spin_lock(&fou_lock);
  409. list_for_each_entry(fou, &fou_list, list) {
  410. if (fou->port == port) {
  411. udp_del_offload(&fou->udp_offloads);
  412. fou_release(fou);
  413. err = 0;
  414. break;
  415. }
  416. }
  417. spin_unlock(&fou_lock);
  418. return err;
  419. }
  420. static struct genl_family fou_nl_family = {
  421. .id = GENL_ID_GENERATE,
  422. .hdrsize = 0,
  423. .name = FOU_GENL_NAME,
  424. .version = FOU_GENL_VERSION,
  425. .maxattr = FOU_ATTR_MAX,
  426. .netnsok = true,
  427. };
  428. static struct nla_policy fou_nl_policy[FOU_ATTR_MAX + 1] = {
  429. [FOU_ATTR_PORT] = { .type = NLA_U16, },
  430. [FOU_ATTR_AF] = { .type = NLA_U8, },
  431. [FOU_ATTR_IPPROTO] = { .type = NLA_U8, },
  432. [FOU_ATTR_TYPE] = { .type = NLA_U8, },
  433. [FOU_ATTR_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG, },
  434. };
  435. static int parse_nl_config(struct genl_info *info,
  436. struct fou_cfg *cfg)
  437. {
  438. memset(cfg, 0, sizeof(*cfg));
  439. cfg->udp_config.family = AF_INET;
  440. if (info->attrs[FOU_ATTR_AF]) {
  441. u8 family = nla_get_u8(info->attrs[FOU_ATTR_AF]);
  442. if (family != AF_INET && family != AF_INET6)
  443. return -EINVAL;
  444. cfg->udp_config.family = family;
  445. }
  446. if (info->attrs[FOU_ATTR_PORT]) {
  447. u16 port = nla_get_u16(info->attrs[FOU_ATTR_PORT]);
  448. cfg->udp_config.local_udp_port = port;
  449. }
  450. if (info->attrs[FOU_ATTR_IPPROTO])
  451. cfg->protocol = nla_get_u8(info->attrs[FOU_ATTR_IPPROTO]);
  452. if (info->attrs[FOU_ATTR_TYPE])
  453. cfg->type = nla_get_u8(info->attrs[FOU_ATTR_TYPE]);
  454. if (info->attrs[FOU_ATTR_REMCSUM_NOPARTIAL])
  455. cfg->flags |= FOU_F_REMCSUM_NOPARTIAL;
  456. return 0;
  457. }
  458. static int fou_nl_cmd_add_port(struct sk_buff *skb, struct genl_info *info)
  459. {
  460. struct fou_cfg cfg;
  461. int err;
  462. err = parse_nl_config(info, &cfg);
  463. if (err)
  464. return err;
  465. return fou_create(&init_net, &cfg, NULL);
  466. }
  467. static int fou_nl_cmd_rm_port(struct sk_buff *skb, struct genl_info *info)
  468. {
  469. struct fou_cfg cfg;
  470. parse_nl_config(info, &cfg);
  471. return fou_destroy(&init_net, &cfg);
  472. }
  473. static const struct genl_ops fou_nl_ops[] = {
  474. {
  475. .cmd = FOU_CMD_ADD,
  476. .doit = fou_nl_cmd_add_port,
  477. .policy = fou_nl_policy,
  478. .flags = GENL_ADMIN_PERM,
  479. },
  480. {
  481. .cmd = FOU_CMD_DEL,
  482. .doit = fou_nl_cmd_rm_port,
  483. .policy = fou_nl_policy,
  484. .flags = GENL_ADMIN_PERM,
  485. },
  486. };
  487. size_t fou_encap_hlen(struct ip_tunnel_encap *e)
  488. {
  489. return sizeof(struct udphdr);
  490. }
  491. EXPORT_SYMBOL(fou_encap_hlen);
  492. size_t gue_encap_hlen(struct ip_tunnel_encap *e)
  493. {
  494. size_t len;
  495. bool need_priv = false;
  496. len = sizeof(struct udphdr) + sizeof(struct guehdr);
  497. if (e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) {
  498. len += GUE_PLEN_REMCSUM;
  499. need_priv = true;
  500. }
  501. len += need_priv ? GUE_LEN_PRIV : 0;
  502. return len;
  503. }
  504. EXPORT_SYMBOL(gue_encap_hlen);
  505. static void fou_build_udp(struct sk_buff *skb, struct ip_tunnel_encap *e,
  506. struct flowi4 *fl4, u8 *protocol, __be16 sport)
  507. {
  508. struct udphdr *uh;
  509. skb_push(skb, sizeof(struct udphdr));
  510. skb_reset_transport_header(skb);
  511. uh = udp_hdr(skb);
  512. uh->dest = e->dport;
  513. uh->source = sport;
  514. uh->len = htons(skb->len);
  515. uh->check = 0;
  516. udp_set_csum(!(e->flags & TUNNEL_ENCAP_FLAG_CSUM), skb,
  517. fl4->saddr, fl4->daddr, skb->len);
  518. *protocol = IPPROTO_UDP;
  519. }
  520. int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
  521. u8 *protocol, struct flowi4 *fl4)
  522. {
  523. bool csum = !!(e->flags & TUNNEL_ENCAP_FLAG_CSUM);
  524. int type = csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
  525. __be16 sport;
  526. skb = iptunnel_handle_offloads(skb, csum, type);
  527. if (IS_ERR(skb))
  528. return PTR_ERR(skb);
  529. sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
  530. skb, 0, 0, false);
  531. fou_build_udp(skb, e, fl4, protocol, sport);
  532. return 0;
  533. }
  534. EXPORT_SYMBOL(fou_build_header);
  535. int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
  536. u8 *protocol, struct flowi4 *fl4)
  537. {
  538. bool csum = !!(e->flags & TUNNEL_ENCAP_FLAG_CSUM);
  539. int type = csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
  540. struct guehdr *guehdr;
  541. size_t hdrlen, optlen = 0;
  542. __be16 sport;
  543. void *data;
  544. bool need_priv = false;
  545. if ((e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) &&
  546. skb->ip_summed == CHECKSUM_PARTIAL) {
  547. csum = false;
  548. optlen += GUE_PLEN_REMCSUM;
  549. type |= SKB_GSO_TUNNEL_REMCSUM;
  550. need_priv = true;
  551. }
  552. optlen += need_priv ? GUE_LEN_PRIV : 0;
  553. skb = iptunnel_handle_offloads(skb, csum, type);
  554. if (IS_ERR(skb))
  555. return PTR_ERR(skb);
  556. /* Get source port (based on flow hash) before skb_push */
  557. sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
  558. skb, 0, 0, false);
  559. hdrlen = sizeof(struct guehdr) + optlen;
  560. skb_push(skb, hdrlen);
  561. guehdr = (struct guehdr *)skb->data;
  562. guehdr->control = 0;
  563. guehdr->version = 0;
  564. guehdr->hlen = optlen >> 2;
  565. guehdr->flags = 0;
  566. guehdr->proto_ctype = *protocol;
  567. data = &guehdr[1];
  568. if (need_priv) {
  569. __be32 *flags = data;
  570. guehdr->flags |= GUE_FLAG_PRIV;
  571. *flags = 0;
  572. data += GUE_LEN_PRIV;
  573. if (type & SKB_GSO_TUNNEL_REMCSUM) {
  574. u16 csum_start = skb_checksum_start_offset(skb);
  575. __be16 *pd = data;
  576. if (csum_start < hdrlen)
  577. return -EINVAL;
  578. csum_start -= hdrlen;
  579. pd[0] = htons(csum_start);
  580. pd[1] = htons(csum_start + skb->csum_offset);
  581. if (!skb_is_gso(skb)) {
  582. skb->ip_summed = CHECKSUM_NONE;
  583. skb->encapsulation = 0;
  584. }
  585. *flags |= GUE_PFLAG_REMCSUM;
  586. data += GUE_PLEN_REMCSUM;
  587. }
  588. }
  589. fou_build_udp(skb, e, fl4, protocol, sport);
  590. return 0;
  591. }
  592. EXPORT_SYMBOL(gue_build_header);
  593. #ifdef CONFIG_NET_FOU_IP_TUNNELS
  594. static const struct ip_tunnel_encap_ops __read_mostly fou_iptun_ops = {
  595. .encap_hlen = fou_encap_hlen,
  596. .build_header = fou_build_header,
  597. };
  598. static const struct ip_tunnel_encap_ops __read_mostly gue_iptun_ops = {
  599. .encap_hlen = gue_encap_hlen,
  600. .build_header = gue_build_header,
  601. };
  602. static int ip_tunnel_encap_add_fou_ops(void)
  603. {
  604. int ret;
  605. ret = ip_tunnel_encap_add_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU);
  606. if (ret < 0) {
  607. pr_err("can't add fou ops\n");
  608. return ret;
  609. }
  610. ret = ip_tunnel_encap_add_ops(&gue_iptun_ops, TUNNEL_ENCAP_GUE);
  611. if (ret < 0) {
  612. pr_err("can't add gue ops\n");
  613. ip_tunnel_encap_del_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU);
  614. return ret;
  615. }
  616. return 0;
  617. }
  618. static void ip_tunnel_encap_del_fou_ops(void)
  619. {
  620. ip_tunnel_encap_del_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU);
  621. ip_tunnel_encap_del_ops(&gue_iptun_ops, TUNNEL_ENCAP_GUE);
  622. }
  623. #else
  624. static int ip_tunnel_encap_add_fou_ops(void)
  625. {
  626. return 0;
  627. }
  628. static void ip_tunnel_encap_del_fou_ops(void)
  629. {
  630. }
  631. #endif
  632. static int __init fou_init(void)
  633. {
  634. int ret;
  635. ret = genl_register_family_with_ops(&fou_nl_family,
  636. fou_nl_ops);
  637. if (ret < 0)
  638. goto exit;
  639. ret = ip_tunnel_encap_add_fou_ops();
  640. if (ret < 0)
  641. genl_unregister_family(&fou_nl_family);
  642. exit:
  643. return ret;
  644. }
  645. static void __exit fou_fini(void)
  646. {
  647. struct fou *fou, *next;
  648. ip_tunnel_encap_del_fou_ops();
  649. genl_unregister_family(&fou_nl_family);
  650. /* Close all the FOU sockets */
  651. spin_lock(&fou_lock);
  652. list_for_each_entry_safe(fou, next, &fou_list, list)
  653. fou_release(fou);
  654. spin_unlock(&fou_lock);
  655. }
  656. module_init(fou_init);
  657. module_exit(fou_fini);
  658. MODULE_AUTHOR("Tom Herbert <therbert@google.com>");
  659. MODULE_LICENSE("GPL");