fou.c 22 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018
  1. #include <linux/module.h>
  2. #include <linux/errno.h>
  3. #include <linux/socket.h>
  4. #include <linux/skbuff.h>
  5. #include <linux/ip.h>
  6. #include <linux/udp.h>
  7. #include <linux/types.h>
  8. #include <linux/kernel.h>
  9. #include <net/genetlink.h>
  10. #include <net/gue.h>
  11. #include <net/ip.h>
  12. #include <net/protocol.h>
  13. #include <net/udp.h>
  14. #include <net/udp_tunnel.h>
  15. #include <net/xfrm.h>
  16. #include <uapi/linux/fou.h>
  17. #include <uapi/linux/genetlink.h>
  18. struct fou {
  19. struct socket *sock;
  20. u8 protocol;
  21. u8 flags;
  22. __be16 port;
  23. u16 type;
  24. struct udp_offload udp_offloads;
  25. struct list_head list;
  26. struct rcu_head rcu;
  27. };
  28. #define FOU_F_REMCSUM_NOPARTIAL BIT(0)
  29. struct fou_cfg {
  30. u16 type;
  31. u8 protocol;
  32. u8 flags;
  33. struct udp_port_cfg udp_config;
  34. };
  35. static unsigned int fou_net_id;
  36. struct fou_net {
  37. struct list_head fou_list;
  38. struct mutex fou_lock;
  39. };
  40. static inline struct fou *fou_from_sock(struct sock *sk)
  41. {
  42. return sk->sk_user_data;
  43. }
  44. static int fou_recv_pull(struct sk_buff *skb, size_t len)
  45. {
  46. struct iphdr *iph = ip_hdr(skb);
  47. /* Remove 'len' bytes from the packet (UDP header and
  48. * FOU header if present).
  49. */
  50. iph->tot_len = htons(ntohs(iph->tot_len) - len);
  51. __skb_pull(skb, len);
  52. skb_postpull_rcsum(skb, udp_hdr(skb), len);
  53. skb_reset_transport_header(skb);
  54. return iptunnel_pull_offloads(skb);
  55. }
  56. static int fou_udp_recv(struct sock *sk, struct sk_buff *skb)
  57. {
  58. struct fou *fou = fou_from_sock(sk);
  59. if (!fou)
  60. return 1;
  61. if (fou_recv_pull(skb, sizeof(struct udphdr)))
  62. goto drop;
  63. return -fou->protocol;
  64. drop:
  65. kfree_skb(skb);
  66. return 0;
  67. }
  68. static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr,
  69. void *data, size_t hdrlen, u8 ipproto,
  70. bool nopartial)
  71. {
  72. __be16 *pd = data;
  73. size_t start = ntohs(pd[0]);
  74. size_t offset = ntohs(pd[1]);
  75. size_t plen = sizeof(struct udphdr) + hdrlen +
  76. max_t(size_t, offset + sizeof(u16), start);
  77. if (skb->remcsum_offload)
  78. return guehdr;
  79. if (!pskb_may_pull(skb, plen))
  80. return NULL;
  81. guehdr = (struct guehdr *)&udp_hdr(skb)[1];
  82. skb_remcsum_process(skb, (void *)guehdr + hdrlen,
  83. start, offset, nopartial);
  84. return guehdr;
  85. }
  86. static int gue_control_message(struct sk_buff *skb, struct guehdr *guehdr)
  87. {
  88. /* No support yet */
  89. kfree_skb(skb);
  90. return 0;
  91. }
  92. static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
  93. {
  94. struct fou *fou = fou_from_sock(sk);
  95. size_t len, optlen, hdrlen;
  96. struct guehdr *guehdr;
  97. void *data;
  98. u16 doffset = 0;
  99. if (!fou)
  100. return 1;
  101. len = sizeof(struct udphdr) + sizeof(struct guehdr);
  102. if (!pskb_may_pull(skb, len))
  103. goto drop;
  104. guehdr = (struct guehdr *)&udp_hdr(skb)[1];
  105. optlen = guehdr->hlen << 2;
  106. len += optlen;
  107. if (!pskb_may_pull(skb, len))
  108. goto drop;
  109. /* guehdr may change after pull */
  110. guehdr = (struct guehdr *)&udp_hdr(skb)[1];
  111. hdrlen = sizeof(struct guehdr) + optlen;
  112. if (guehdr->version != 0 || validate_gue_flags(guehdr, optlen))
  113. goto drop;
  114. hdrlen = sizeof(struct guehdr) + optlen;
  115. ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(skb)->tot_len) - len);
  116. /* Pull csum through the guehdr now . This can be used if
  117. * there is a remote checksum offload.
  118. */
  119. skb_postpull_rcsum(skb, udp_hdr(skb), len);
  120. data = &guehdr[1];
  121. if (guehdr->flags & GUE_FLAG_PRIV) {
  122. __be32 flags = *(__be32 *)(data + doffset);
  123. doffset += GUE_LEN_PRIV;
  124. if (flags & GUE_PFLAG_REMCSUM) {
  125. guehdr = gue_remcsum(skb, guehdr, data + doffset,
  126. hdrlen, guehdr->proto_ctype,
  127. !!(fou->flags &
  128. FOU_F_REMCSUM_NOPARTIAL));
  129. if (!guehdr)
  130. goto drop;
  131. data = &guehdr[1];
  132. doffset += GUE_PLEN_REMCSUM;
  133. }
  134. }
  135. if (unlikely(guehdr->control))
  136. return gue_control_message(skb, guehdr);
  137. __skb_pull(skb, sizeof(struct udphdr) + hdrlen);
  138. skb_reset_transport_header(skb);
  139. if (iptunnel_pull_offloads(skb))
  140. goto drop;
  141. return -guehdr->proto_ctype;
  142. drop:
  143. kfree_skb(skb);
  144. return 0;
  145. }
  146. static struct sk_buff **fou_gro_receive(struct sk_buff **head,
  147. struct sk_buff *skb,
  148. struct udp_offload *uoff)
  149. {
  150. const struct net_offload *ops;
  151. struct sk_buff **pp = NULL;
  152. u8 proto = NAPI_GRO_CB(skb)->proto;
  153. const struct net_offload **offloads;
  154. /* We can clear the encap_mark for FOU as we are essentially doing
  155. * one of two possible things. We are either adding an L4 tunnel
  156. * header to the outer L3 tunnel header, or we are are simply
  157. * treating the GRE tunnel header as though it is a UDP protocol
  158. * specific header such as VXLAN or GENEVE.
  159. */
  160. NAPI_GRO_CB(skb)->encap_mark = 0;
  161. rcu_read_lock();
  162. offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
  163. ops = rcu_dereference(offloads[proto]);
  164. if (!ops || !ops->callbacks.gro_receive)
  165. goto out_unlock;
  166. pp = ops->callbacks.gro_receive(head, skb);
  167. out_unlock:
  168. rcu_read_unlock();
  169. return pp;
  170. }
  171. static int fou_gro_complete(struct sk_buff *skb, int nhoff,
  172. struct udp_offload *uoff)
  173. {
  174. const struct net_offload *ops;
  175. u8 proto = NAPI_GRO_CB(skb)->proto;
  176. int err = -ENOSYS;
  177. const struct net_offload **offloads;
  178. udp_tunnel_gro_complete(skb, nhoff);
  179. rcu_read_lock();
  180. offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
  181. ops = rcu_dereference(offloads[proto]);
  182. if (WARN_ON(!ops || !ops->callbacks.gro_complete))
  183. goto out_unlock;
  184. err = ops->callbacks.gro_complete(skb, nhoff);
  185. out_unlock:
  186. rcu_read_unlock();
  187. return err;
  188. }
  189. static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
  190. struct guehdr *guehdr, void *data,
  191. size_t hdrlen, struct gro_remcsum *grc,
  192. bool nopartial)
  193. {
  194. __be16 *pd = data;
  195. size_t start = ntohs(pd[0]);
  196. size_t offset = ntohs(pd[1]);
  197. if (skb->remcsum_offload)
  198. return guehdr;
  199. if (!NAPI_GRO_CB(skb)->csum_valid)
  200. return NULL;
  201. guehdr = skb_gro_remcsum_process(skb, (void *)guehdr, off, hdrlen,
  202. start, offset, grc, nopartial);
  203. skb->remcsum_offload = 1;
  204. return guehdr;
  205. }
  206. static struct sk_buff **gue_gro_receive(struct sk_buff **head,
  207. struct sk_buff *skb,
  208. struct udp_offload *uoff)
  209. {
  210. const struct net_offload **offloads;
  211. const struct net_offload *ops;
  212. struct sk_buff **pp = NULL;
  213. struct sk_buff *p;
  214. struct guehdr *guehdr;
  215. size_t len, optlen, hdrlen, off;
  216. void *data;
  217. u16 doffset = 0;
  218. int flush = 1;
  219. struct fou *fou = container_of(uoff, struct fou, udp_offloads);
  220. struct gro_remcsum grc;
  221. skb_gro_remcsum_init(&grc);
  222. off = skb_gro_offset(skb);
  223. len = off + sizeof(*guehdr);
  224. guehdr = skb_gro_header_fast(skb, off);
  225. if (skb_gro_header_hard(skb, len)) {
  226. guehdr = skb_gro_header_slow(skb, len, off);
  227. if (unlikely(!guehdr))
  228. goto out;
  229. }
  230. optlen = guehdr->hlen << 2;
  231. len += optlen;
  232. if (skb_gro_header_hard(skb, len)) {
  233. guehdr = skb_gro_header_slow(skb, len, off);
  234. if (unlikely(!guehdr))
  235. goto out;
  236. }
  237. if (unlikely(guehdr->control) || guehdr->version != 0 ||
  238. validate_gue_flags(guehdr, optlen))
  239. goto out;
  240. hdrlen = sizeof(*guehdr) + optlen;
  241. /* Adjust NAPI_GRO_CB(skb)->csum to account for guehdr,
  242. * this is needed if there is a remote checkcsum offload.
  243. */
  244. skb_gro_postpull_rcsum(skb, guehdr, hdrlen);
  245. data = &guehdr[1];
  246. if (guehdr->flags & GUE_FLAG_PRIV) {
  247. __be32 flags = *(__be32 *)(data + doffset);
  248. doffset += GUE_LEN_PRIV;
  249. if (flags & GUE_PFLAG_REMCSUM) {
  250. guehdr = gue_gro_remcsum(skb, off, guehdr,
  251. data + doffset, hdrlen, &grc,
  252. !!(fou->flags &
  253. FOU_F_REMCSUM_NOPARTIAL));
  254. if (!guehdr)
  255. goto out;
  256. data = &guehdr[1];
  257. doffset += GUE_PLEN_REMCSUM;
  258. }
  259. }
  260. skb_gro_pull(skb, hdrlen);
  261. for (p = *head; p; p = p->next) {
  262. const struct guehdr *guehdr2;
  263. if (!NAPI_GRO_CB(p)->same_flow)
  264. continue;
  265. guehdr2 = (struct guehdr *)(p->data + off);
  266. /* Compare base GUE header to be equal (covers
  267. * hlen, version, proto_ctype, and flags.
  268. */
  269. if (guehdr->word != guehdr2->word) {
  270. NAPI_GRO_CB(p)->same_flow = 0;
  271. continue;
  272. }
  273. /* Compare optional fields are the same. */
  274. if (guehdr->hlen && memcmp(&guehdr[1], &guehdr2[1],
  275. guehdr->hlen << 2)) {
  276. NAPI_GRO_CB(p)->same_flow = 0;
  277. continue;
  278. }
  279. }
  280. /* We can clear the encap_mark for GUE as we are essentially doing
  281. * one of two possible things. We are either adding an L4 tunnel
  282. * header to the outer L3 tunnel header, or we are are simply
  283. * treating the GRE tunnel header as though it is a UDP protocol
  284. * specific header such as VXLAN or GENEVE.
  285. */
  286. NAPI_GRO_CB(skb)->encap_mark = 0;
  287. rcu_read_lock();
  288. offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
  289. ops = rcu_dereference(offloads[guehdr->proto_ctype]);
  290. if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive))
  291. goto out_unlock;
  292. pp = ops->callbacks.gro_receive(head, skb);
  293. flush = 0;
  294. out_unlock:
  295. rcu_read_unlock();
  296. out:
  297. NAPI_GRO_CB(skb)->flush |= flush;
  298. skb_gro_remcsum_cleanup(skb, &grc);
  299. return pp;
  300. }
  301. static int gue_gro_complete(struct sk_buff *skb, int nhoff,
  302. struct udp_offload *uoff)
  303. {
  304. const struct net_offload **offloads;
  305. struct guehdr *guehdr = (struct guehdr *)(skb->data + nhoff);
  306. const struct net_offload *ops;
  307. unsigned int guehlen;
  308. u8 proto;
  309. int err = -ENOENT;
  310. proto = guehdr->proto_ctype;
  311. guehlen = sizeof(*guehdr) + (guehdr->hlen << 2);
  312. rcu_read_lock();
  313. offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
  314. ops = rcu_dereference(offloads[proto]);
  315. if (WARN_ON(!ops || !ops->callbacks.gro_complete))
  316. goto out_unlock;
  317. err = ops->callbacks.gro_complete(skb, nhoff + guehlen);
  318. out_unlock:
  319. rcu_read_unlock();
  320. return err;
  321. }
  322. static int fou_add_to_port_list(struct net *net, struct fou *fou)
  323. {
  324. struct fou_net *fn = net_generic(net, fou_net_id);
  325. struct fou *fout;
  326. mutex_lock(&fn->fou_lock);
  327. list_for_each_entry(fout, &fn->fou_list, list) {
  328. if (fou->port == fout->port) {
  329. mutex_unlock(&fn->fou_lock);
  330. return -EALREADY;
  331. }
  332. }
  333. list_add(&fou->list, &fn->fou_list);
  334. mutex_unlock(&fn->fou_lock);
  335. return 0;
  336. }
  337. static void fou_release(struct fou *fou)
  338. {
  339. struct socket *sock = fou->sock;
  340. struct sock *sk = sock->sk;
  341. if (sk->sk_family == AF_INET)
  342. udp_del_offload(&fou->udp_offloads);
  343. list_del(&fou->list);
  344. udp_tunnel_sock_release(sock);
  345. kfree_rcu(fou, rcu);
  346. }
  347. static int fou_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg)
  348. {
  349. udp_sk(sk)->encap_rcv = fou_udp_recv;
  350. fou->protocol = cfg->protocol;
  351. fou->udp_offloads.callbacks.gro_receive = fou_gro_receive;
  352. fou->udp_offloads.callbacks.gro_complete = fou_gro_complete;
  353. fou->udp_offloads.port = cfg->udp_config.local_udp_port;
  354. fou->udp_offloads.ipproto = cfg->protocol;
  355. return 0;
  356. }
  357. static int gue_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg)
  358. {
  359. udp_sk(sk)->encap_rcv = gue_udp_recv;
  360. fou->udp_offloads.callbacks.gro_receive = gue_gro_receive;
  361. fou->udp_offloads.callbacks.gro_complete = gue_gro_complete;
  362. fou->udp_offloads.port = cfg->udp_config.local_udp_port;
  363. return 0;
  364. }
  365. static int fou_create(struct net *net, struct fou_cfg *cfg,
  366. struct socket **sockp)
  367. {
  368. struct socket *sock = NULL;
  369. struct fou *fou = NULL;
  370. struct sock *sk;
  371. int err;
  372. /* Open UDP socket */
  373. err = udp_sock_create(net, &cfg->udp_config, &sock);
  374. if (err < 0)
  375. goto error;
  376. /* Allocate FOU port structure */
  377. fou = kzalloc(sizeof(*fou), GFP_KERNEL);
  378. if (!fou) {
  379. err = -ENOMEM;
  380. goto error;
  381. }
  382. sk = sock->sk;
  383. fou->flags = cfg->flags;
  384. fou->port = cfg->udp_config.local_udp_port;
  385. /* Initial for fou type */
  386. switch (cfg->type) {
  387. case FOU_ENCAP_DIRECT:
  388. err = fou_encap_init(sk, fou, cfg);
  389. if (err)
  390. goto error;
  391. break;
  392. case FOU_ENCAP_GUE:
  393. err = gue_encap_init(sk, fou, cfg);
  394. if (err)
  395. goto error;
  396. break;
  397. default:
  398. err = -EINVAL;
  399. goto error;
  400. }
  401. fou->type = cfg->type;
  402. udp_sk(sk)->encap_type = 1;
  403. udp_encap_enable();
  404. sk->sk_user_data = fou;
  405. fou->sock = sock;
  406. inet_inc_convert_csum(sk);
  407. sk->sk_allocation = GFP_ATOMIC;
  408. if (cfg->udp_config.family == AF_INET) {
  409. err = udp_add_offload(net, &fou->udp_offloads);
  410. if (err)
  411. goto error;
  412. }
  413. err = fou_add_to_port_list(net, fou);
  414. if (err)
  415. goto error;
  416. if (sockp)
  417. *sockp = sock;
  418. return 0;
  419. error:
  420. kfree(fou);
  421. if (sock)
  422. udp_tunnel_sock_release(sock);
  423. return err;
  424. }
  425. static int fou_destroy(struct net *net, struct fou_cfg *cfg)
  426. {
  427. struct fou_net *fn = net_generic(net, fou_net_id);
  428. __be16 port = cfg->udp_config.local_udp_port;
  429. int err = -EINVAL;
  430. struct fou *fou;
  431. mutex_lock(&fn->fou_lock);
  432. list_for_each_entry(fou, &fn->fou_list, list) {
  433. if (fou->port == port) {
  434. fou_release(fou);
  435. err = 0;
  436. break;
  437. }
  438. }
  439. mutex_unlock(&fn->fou_lock);
  440. return err;
  441. }
  442. static struct genl_family fou_nl_family = {
  443. .id = GENL_ID_GENERATE,
  444. .hdrsize = 0,
  445. .name = FOU_GENL_NAME,
  446. .version = FOU_GENL_VERSION,
  447. .maxattr = FOU_ATTR_MAX,
  448. .netnsok = true,
  449. };
  450. static struct nla_policy fou_nl_policy[FOU_ATTR_MAX + 1] = {
  451. [FOU_ATTR_PORT] = { .type = NLA_U16, },
  452. [FOU_ATTR_AF] = { .type = NLA_U8, },
  453. [FOU_ATTR_IPPROTO] = { .type = NLA_U8, },
  454. [FOU_ATTR_TYPE] = { .type = NLA_U8, },
  455. [FOU_ATTR_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG, },
  456. };
  457. static int parse_nl_config(struct genl_info *info,
  458. struct fou_cfg *cfg)
  459. {
  460. memset(cfg, 0, sizeof(*cfg));
  461. cfg->udp_config.family = AF_INET;
  462. if (info->attrs[FOU_ATTR_AF]) {
  463. u8 family = nla_get_u8(info->attrs[FOU_ATTR_AF]);
  464. if (family != AF_INET)
  465. return -EINVAL;
  466. cfg->udp_config.family = family;
  467. }
  468. if (info->attrs[FOU_ATTR_PORT]) {
  469. __be16 port = nla_get_be16(info->attrs[FOU_ATTR_PORT]);
  470. cfg->udp_config.local_udp_port = port;
  471. }
  472. if (info->attrs[FOU_ATTR_IPPROTO])
  473. cfg->protocol = nla_get_u8(info->attrs[FOU_ATTR_IPPROTO]);
  474. if (info->attrs[FOU_ATTR_TYPE])
  475. cfg->type = nla_get_u8(info->attrs[FOU_ATTR_TYPE]);
  476. if (info->attrs[FOU_ATTR_REMCSUM_NOPARTIAL])
  477. cfg->flags |= FOU_F_REMCSUM_NOPARTIAL;
  478. return 0;
  479. }
  480. static int fou_nl_cmd_add_port(struct sk_buff *skb, struct genl_info *info)
  481. {
  482. struct net *net = genl_info_net(info);
  483. struct fou_cfg cfg;
  484. int err;
  485. err = parse_nl_config(info, &cfg);
  486. if (err)
  487. return err;
  488. return fou_create(net, &cfg, NULL);
  489. }
  490. static int fou_nl_cmd_rm_port(struct sk_buff *skb, struct genl_info *info)
  491. {
  492. struct net *net = genl_info_net(info);
  493. struct fou_cfg cfg;
  494. int err;
  495. err = parse_nl_config(info, &cfg);
  496. if (err)
  497. return err;
  498. return fou_destroy(net, &cfg);
  499. }
  500. static int fou_fill_info(struct fou *fou, struct sk_buff *msg)
  501. {
  502. if (nla_put_u8(msg, FOU_ATTR_AF, fou->sock->sk->sk_family) ||
  503. nla_put_be16(msg, FOU_ATTR_PORT, fou->port) ||
  504. nla_put_u8(msg, FOU_ATTR_IPPROTO, fou->protocol) ||
  505. nla_put_u8(msg, FOU_ATTR_TYPE, fou->type))
  506. return -1;
  507. if (fou->flags & FOU_F_REMCSUM_NOPARTIAL)
  508. if (nla_put_flag(msg, FOU_ATTR_REMCSUM_NOPARTIAL))
  509. return -1;
  510. return 0;
  511. }
  512. static int fou_dump_info(struct fou *fou, u32 portid, u32 seq,
  513. u32 flags, struct sk_buff *skb, u8 cmd)
  514. {
  515. void *hdr;
  516. hdr = genlmsg_put(skb, portid, seq, &fou_nl_family, flags, cmd);
  517. if (!hdr)
  518. return -ENOMEM;
  519. if (fou_fill_info(fou, skb) < 0)
  520. goto nla_put_failure;
  521. genlmsg_end(skb, hdr);
  522. return 0;
  523. nla_put_failure:
  524. genlmsg_cancel(skb, hdr);
  525. return -EMSGSIZE;
  526. }
  527. static int fou_nl_cmd_get_port(struct sk_buff *skb, struct genl_info *info)
  528. {
  529. struct net *net = genl_info_net(info);
  530. struct fou_net *fn = net_generic(net, fou_net_id);
  531. struct sk_buff *msg;
  532. struct fou_cfg cfg;
  533. struct fou *fout;
  534. __be16 port;
  535. int ret;
  536. ret = parse_nl_config(info, &cfg);
  537. if (ret)
  538. return ret;
  539. port = cfg.udp_config.local_udp_port;
  540. if (port == 0)
  541. return -EINVAL;
  542. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  543. if (!msg)
  544. return -ENOMEM;
  545. ret = -ESRCH;
  546. mutex_lock(&fn->fou_lock);
  547. list_for_each_entry(fout, &fn->fou_list, list) {
  548. if (port == fout->port) {
  549. ret = fou_dump_info(fout, info->snd_portid,
  550. info->snd_seq, 0, msg,
  551. info->genlhdr->cmd);
  552. break;
  553. }
  554. }
  555. mutex_unlock(&fn->fou_lock);
  556. if (ret < 0)
  557. goto out_free;
  558. return genlmsg_reply(msg, info);
  559. out_free:
  560. nlmsg_free(msg);
  561. return ret;
  562. }
  563. static int fou_nl_dump(struct sk_buff *skb, struct netlink_callback *cb)
  564. {
  565. struct net *net = sock_net(skb->sk);
  566. struct fou_net *fn = net_generic(net, fou_net_id);
  567. struct fou *fout;
  568. int idx = 0, ret;
  569. mutex_lock(&fn->fou_lock);
  570. list_for_each_entry(fout, &fn->fou_list, list) {
  571. if (idx++ < cb->args[0])
  572. continue;
  573. ret = fou_dump_info(fout, NETLINK_CB(cb->skb).portid,
  574. cb->nlh->nlmsg_seq, NLM_F_MULTI,
  575. skb, FOU_CMD_GET);
  576. if (ret)
  577. break;
  578. }
  579. mutex_unlock(&fn->fou_lock);
  580. cb->args[0] = idx;
  581. return skb->len;
  582. }
  583. static const struct genl_ops fou_nl_ops[] = {
  584. {
  585. .cmd = FOU_CMD_ADD,
  586. .doit = fou_nl_cmd_add_port,
  587. .policy = fou_nl_policy,
  588. .flags = GENL_ADMIN_PERM,
  589. },
  590. {
  591. .cmd = FOU_CMD_DEL,
  592. .doit = fou_nl_cmd_rm_port,
  593. .policy = fou_nl_policy,
  594. .flags = GENL_ADMIN_PERM,
  595. },
  596. {
  597. .cmd = FOU_CMD_GET,
  598. .doit = fou_nl_cmd_get_port,
  599. .dumpit = fou_nl_dump,
  600. .policy = fou_nl_policy,
  601. },
  602. };
  603. size_t fou_encap_hlen(struct ip_tunnel_encap *e)
  604. {
  605. return sizeof(struct udphdr);
  606. }
  607. EXPORT_SYMBOL(fou_encap_hlen);
  608. size_t gue_encap_hlen(struct ip_tunnel_encap *e)
  609. {
  610. size_t len;
  611. bool need_priv = false;
  612. len = sizeof(struct udphdr) + sizeof(struct guehdr);
  613. if (e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) {
  614. len += GUE_PLEN_REMCSUM;
  615. need_priv = true;
  616. }
  617. len += need_priv ? GUE_LEN_PRIV : 0;
  618. return len;
  619. }
  620. EXPORT_SYMBOL(gue_encap_hlen);
  621. static void fou_build_udp(struct sk_buff *skb, struct ip_tunnel_encap *e,
  622. struct flowi4 *fl4, u8 *protocol, __be16 sport)
  623. {
  624. struct udphdr *uh;
  625. skb_push(skb, sizeof(struct udphdr));
  626. skb_reset_transport_header(skb);
  627. uh = udp_hdr(skb);
  628. uh->dest = e->dport;
  629. uh->source = sport;
  630. uh->len = htons(skb->len);
  631. udp_set_csum(!(e->flags & TUNNEL_ENCAP_FLAG_CSUM), skb,
  632. fl4->saddr, fl4->daddr, skb->len);
  633. *protocol = IPPROTO_UDP;
  634. }
  635. int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
  636. u8 *protocol, struct flowi4 *fl4)
  637. {
  638. int type = e->flags & TUNNEL_ENCAP_FLAG_CSUM ? SKB_GSO_UDP_TUNNEL_CSUM :
  639. SKB_GSO_UDP_TUNNEL;
  640. __be16 sport;
  641. skb = iptunnel_handle_offloads(skb, type);
  642. if (IS_ERR(skb))
  643. return PTR_ERR(skb);
  644. sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
  645. skb, 0, 0, false);
  646. fou_build_udp(skb, e, fl4, protocol, sport);
  647. return 0;
  648. }
  649. EXPORT_SYMBOL(fou_build_header);
  650. int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
  651. u8 *protocol, struct flowi4 *fl4)
  652. {
  653. int type = e->flags & TUNNEL_ENCAP_FLAG_CSUM ? SKB_GSO_UDP_TUNNEL_CSUM :
  654. SKB_GSO_UDP_TUNNEL;
  655. struct guehdr *guehdr;
  656. size_t hdrlen, optlen = 0;
  657. __be16 sport;
  658. void *data;
  659. bool need_priv = false;
  660. if ((e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) &&
  661. skb->ip_summed == CHECKSUM_PARTIAL) {
  662. optlen += GUE_PLEN_REMCSUM;
  663. type |= SKB_GSO_TUNNEL_REMCSUM;
  664. need_priv = true;
  665. }
  666. optlen += need_priv ? GUE_LEN_PRIV : 0;
  667. skb = iptunnel_handle_offloads(skb, type);
  668. if (IS_ERR(skb))
  669. return PTR_ERR(skb);
  670. /* Get source port (based on flow hash) before skb_push */
  671. sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
  672. skb, 0, 0, false);
  673. hdrlen = sizeof(struct guehdr) + optlen;
  674. skb_push(skb, hdrlen);
  675. guehdr = (struct guehdr *)skb->data;
  676. guehdr->control = 0;
  677. guehdr->version = 0;
  678. guehdr->hlen = optlen >> 2;
  679. guehdr->flags = 0;
  680. guehdr->proto_ctype = *protocol;
  681. data = &guehdr[1];
  682. if (need_priv) {
  683. __be32 *flags = data;
  684. guehdr->flags |= GUE_FLAG_PRIV;
  685. *flags = 0;
  686. data += GUE_LEN_PRIV;
  687. if (type & SKB_GSO_TUNNEL_REMCSUM) {
  688. u16 csum_start = skb_checksum_start_offset(skb);
  689. __be16 *pd = data;
  690. if (csum_start < hdrlen)
  691. return -EINVAL;
  692. csum_start -= hdrlen;
  693. pd[0] = htons(csum_start);
  694. pd[1] = htons(csum_start + skb->csum_offset);
  695. if (!skb_is_gso(skb)) {
  696. skb->ip_summed = CHECKSUM_NONE;
  697. skb->encapsulation = 0;
  698. }
  699. *flags |= GUE_PFLAG_REMCSUM;
  700. data += GUE_PLEN_REMCSUM;
  701. }
  702. }
  703. fou_build_udp(skb, e, fl4, protocol, sport);
  704. return 0;
  705. }
  706. EXPORT_SYMBOL(gue_build_header);
  707. #ifdef CONFIG_NET_FOU_IP_TUNNELS
  708. static const struct ip_tunnel_encap_ops fou_iptun_ops = {
  709. .encap_hlen = fou_encap_hlen,
  710. .build_header = fou_build_header,
  711. };
  712. static const struct ip_tunnel_encap_ops gue_iptun_ops = {
  713. .encap_hlen = gue_encap_hlen,
  714. .build_header = gue_build_header,
  715. };
  716. static int ip_tunnel_encap_add_fou_ops(void)
  717. {
  718. int ret;
  719. ret = ip_tunnel_encap_add_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU);
  720. if (ret < 0) {
  721. pr_err("can't add fou ops\n");
  722. return ret;
  723. }
  724. ret = ip_tunnel_encap_add_ops(&gue_iptun_ops, TUNNEL_ENCAP_GUE);
  725. if (ret < 0) {
  726. pr_err("can't add gue ops\n");
  727. ip_tunnel_encap_del_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU);
  728. return ret;
  729. }
  730. return 0;
  731. }
  732. static void ip_tunnel_encap_del_fou_ops(void)
  733. {
  734. ip_tunnel_encap_del_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU);
  735. ip_tunnel_encap_del_ops(&gue_iptun_ops, TUNNEL_ENCAP_GUE);
  736. }
  737. #else
  738. static int ip_tunnel_encap_add_fou_ops(void)
  739. {
  740. return 0;
  741. }
  742. static void ip_tunnel_encap_del_fou_ops(void)
  743. {
  744. }
  745. #endif
  746. static __net_init int fou_init_net(struct net *net)
  747. {
  748. struct fou_net *fn = net_generic(net, fou_net_id);
  749. INIT_LIST_HEAD(&fn->fou_list);
  750. mutex_init(&fn->fou_lock);
  751. return 0;
  752. }
  753. static __net_exit void fou_exit_net(struct net *net)
  754. {
  755. struct fou_net *fn = net_generic(net, fou_net_id);
  756. struct fou *fou, *next;
  757. /* Close all the FOU sockets */
  758. mutex_lock(&fn->fou_lock);
  759. list_for_each_entry_safe(fou, next, &fn->fou_list, list)
  760. fou_release(fou);
  761. mutex_unlock(&fn->fou_lock);
  762. }
  763. static struct pernet_operations fou_net_ops = {
  764. .init = fou_init_net,
  765. .exit = fou_exit_net,
  766. .id = &fou_net_id,
  767. .size = sizeof(struct fou_net),
  768. };
  769. static int __init fou_init(void)
  770. {
  771. int ret;
  772. ret = register_pernet_device(&fou_net_ops);
  773. if (ret)
  774. goto exit;
  775. ret = genl_register_family_with_ops(&fou_nl_family,
  776. fou_nl_ops);
  777. if (ret < 0)
  778. goto unregister;
  779. ret = ip_tunnel_encap_add_fou_ops();
  780. if (ret == 0)
  781. return 0;
  782. genl_unregister_family(&fou_nl_family);
  783. unregister:
  784. unregister_pernet_device(&fou_net_ops);
  785. exit:
  786. return ret;
  787. }
  788. static void __exit fou_fini(void)
  789. {
  790. ip_tunnel_encap_del_fou_ops();
  791. genl_unregister_family(&fou_nl_family);
  792. unregister_pernet_device(&fou_net_ops);
  793. }
  794. module_init(fou_init);
  795. module_exit(fou_fini);
  796. MODULE_AUTHOR("Tom Herbert <therbert@google.com>");
  797. MODULE_LICENSE("GPL");