fou.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517
  1. #include <linux/module.h>
  2. #include <linux/errno.h>
  3. #include <linux/socket.h>
  4. #include <linux/skbuff.h>
  5. #include <linux/ip.h>
  6. #include <linux/udp.h>
  7. #include <linux/types.h>
  8. #include <linux/kernel.h>
  9. #include <net/genetlink.h>
  10. #include <net/gue.h>
  11. #include <net/ip.h>
  12. #include <net/protocol.h>
  13. #include <net/udp.h>
  14. #include <net/udp_tunnel.h>
  15. #include <net/xfrm.h>
  16. #include <uapi/linux/fou.h>
  17. #include <uapi/linux/genetlink.h>
  18. static DEFINE_SPINLOCK(fou_lock);
  19. static LIST_HEAD(fou_list);
  20. struct fou {
  21. struct socket *sock;
  22. u8 protocol;
  23. u16 port;
  24. struct udp_offload udp_offloads;
  25. struct list_head list;
  26. };
  27. struct fou_cfg {
  28. u16 type;
  29. u8 protocol;
  30. struct udp_port_cfg udp_config;
  31. };
  32. static inline struct fou *fou_from_sock(struct sock *sk)
  33. {
  34. return sk->sk_user_data;
  35. }
  36. static int fou_udp_encap_recv_deliver(struct sk_buff *skb,
  37. u8 protocol, size_t len)
  38. {
  39. struct iphdr *iph = ip_hdr(skb);
  40. /* Remove 'len' bytes from the packet (UDP header and
  41. * FOU header if present), modify the protocol to the one
  42. * we found, and then call rcv_encap.
  43. */
  44. iph->tot_len = htons(ntohs(iph->tot_len) - len);
  45. __skb_pull(skb, len);
  46. skb_postpull_rcsum(skb, udp_hdr(skb), len);
  47. skb_reset_transport_header(skb);
  48. return -protocol;
  49. }
  50. static int fou_udp_recv(struct sock *sk, struct sk_buff *skb)
  51. {
  52. struct fou *fou = fou_from_sock(sk);
  53. if (!fou)
  54. return 1;
  55. return fou_udp_encap_recv_deliver(skb, fou->protocol,
  56. sizeof(struct udphdr));
  57. }
  58. static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
  59. {
  60. struct fou *fou = fou_from_sock(sk);
  61. size_t len;
  62. struct guehdr *guehdr;
  63. struct udphdr *uh;
  64. if (!fou)
  65. return 1;
  66. len = sizeof(struct udphdr) + sizeof(struct guehdr);
  67. if (!pskb_may_pull(skb, len))
  68. goto drop;
  69. uh = udp_hdr(skb);
  70. guehdr = (struct guehdr *)&uh[1];
  71. len += guehdr->hlen << 2;
  72. if (!pskb_may_pull(skb, len))
  73. goto drop;
  74. uh = udp_hdr(skb);
  75. guehdr = (struct guehdr *)&uh[1];
  76. if (guehdr->version != 0)
  77. goto drop;
  78. if (guehdr->flags) {
  79. /* No support yet */
  80. goto drop;
  81. }
  82. return fou_udp_encap_recv_deliver(skb, guehdr->next_hdr, len);
  83. drop:
  84. kfree_skb(skb);
  85. return 0;
  86. }
  87. static struct sk_buff **fou_gro_receive(struct sk_buff **head,
  88. struct sk_buff *skb)
  89. {
  90. const struct net_offload *ops;
  91. struct sk_buff **pp = NULL;
  92. u8 proto = NAPI_GRO_CB(skb)->proto;
  93. const struct net_offload **offloads;
  94. rcu_read_lock();
  95. offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
  96. ops = rcu_dereference(offloads[proto]);
  97. if (!ops || !ops->callbacks.gro_receive)
  98. goto out_unlock;
  99. pp = ops->callbacks.gro_receive(head, skb);
  100. out_unlock:
  101. rcu_read_unlock();
  102. return pp;
  103. }
  104. static int fou_gro_complete(struct sk_buff *skb, int nhoff)
  105. {
  106. const struct net_offload *ops;
  107. u8 proto = NAPI_GRO_CB(skb)->proto;
  108. int err = -ENOSYS;
  109. const struct net_offload **offloads;
  110. rcu_read_lock();
  111. offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
  112. ops = rcu_dereference(offloads[proto]);
  113. if (WARN_ON(!ops || !ops->callbacks.gro_complete))
  114. goto out_unlock;
  115. err = ops->callbacks.gro_complete(skb, nhoff);
  116. out_unlock:
  117. rcu_read_unlock();
  118. return err;
  119. }
  120. static struct sk_buff **gue_gro_receive(struct sk_buff **head,
  121. struct sk_buff *skb)
  122. {
  123. const struct net_offload **offloads;
  124. const struct net_offload *ops;
  125. struct sk_buff **pp = NULL;
  126. struct sk_buff *p;
  127. u8 proto;
  128. struct guehdr *guehdr;
  129. unsigned int hlen, guehlen;
  130. unsigned int off;
  131. int flush = 1;
  132. off = skb_gro_offset(skb);
  133. hlen = off + sizeof(*guehdr);
  134. guehdr = skb_gro_header_fast(skb, off);
  135. if (skb_gro_header_hard(skb, hlen)) {
  136. guehdr = skb_gro_header_slow(skb, hlen, off);
  137. if (unlikely(!guehdr))
  138. goto out;
  139. }
  140. proto = guehdr->next_hdr;
  141. rcu_read_lock();
  142. offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
  143. ops = rcu_dereference(offloads[proto]);
  144. if (WARN_ON(!ops || !ops->callbacks.gro_receive))
  145. goto out_unlock;
  146. guehlen = sizeof(*guehdr) + (guehdr->hlen << 2);
  147. hlen = off + guehlen;
  148. if (skb_gro_header_hard(skb, hlen)) {
  149. guehdr = skb_gro_header_slow(skb, hlen, off);
  150. if (unlikely(!guehdr))
  151. goto out_unlock;
  152. }
  153. flush = 0;
  154. for (p = *head; p; p = p->next) {
  155. const struct guehdr *guehdr2;
  156. if (!NAPI_GRO_CB(p)->same_flow)
  157. continue;
  158. guehdr2 = (struct guehdr *)(p->data + off);
  159. /* Compare base GUE header to be equal (covers
  160. * hlen, version, next_hdr, and flags.
  161. */
  162. if (guehdr->word != guehdr2->word) {
  163. NAPI_GRO_CB(p)->same_flow = 0;
  164. continue;
  165. }
  166. /* Compare optional fields are the same. */
  167. if (guehdr->hlen && memcmp(&guehdr[1], &guehdr2[1],
  168. guehdr->hlen << 2)) {
  169. NAPI_GRO_CB(p)->same_flow = 0;
  170. continue;
  171. }
  172. }
  173. skb_gro_pull(skb, guehlen);
  174. /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
  175. skb_gro_postpull_rcsum(skb, guehdr, guehlen);
  176. pp = ops->callbacks.gro_receive(head, skb);
  177. out_unlock:
  178. rcu_read_unlock();
  179. out:
  180. NAPI_GRO_CB(skb)->flush |= flush;
  181. return pp;
  182. }
  183. static int gue_gro_complete(struct sk_buff *skb, int nhoff)
  184. {
  185. const struct net_offload **offloads;
  186. struct guehdr *guehdr = (struct guehdr *)(skb->data + nhoff);
  187. const struct net_offload *ops;
  188. unsigned int guehlen;
  189. u8 proto;
  190. int err = -ENOENT;
  191. proto = guehdr->next_hdr;
  192. guehlen = sizeof(*guehdr) + (guehdr->hlen << 2);
  193. rcu_read_lock();
  194. offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
  195. ops = rcu_dereference(offloads[proto]);
  196. if (WARN_ON(!ops || !ops->callbacks.gro_complete))
  197. goto out_unlock;
  198. err = ops->callbacks.gro_complete(skb, nhoff + guehlen);
  199. out_unlock:
  200. rcu_read_unlock();
  201. return err;
  202. }
  203. static int fou_add_to_port_list(struct fou *fou)
  204. {
  205. struct fou *fout;
  206. spin_lock(&fou_lock);
  207. list_for_each_entry(fout, &fou_list, list) {
  208. if (fou->port == fout->port) {
  209. spin_unlock(&fou_lock);
  210. return -EALREADY;
  211. }
  212. }
  213. list_add(&fou->list, &fou_list);
  214. spin_unlock(&fou_lock);
  215. return 0;
  216. }
  217. static void fou_release(struct fou *fou)
  218. {
  219. struct socket *sock = fou->sock;
  220. struct sock *sk = sock->sk;
  221. udp_del_offload(&fou->udp_offloads);
  222. list_del(&fou->list);
  223. /* Remove hooks into tunnel socket */
  224. sk->sk_user_data = NULL;
  225. sock_release(sock);
  226. kfree(fou);
  227. }
  228. static int fou_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg)
  229. {
  230. udp_sk(sk)->encap_rcv = fou_udp_recv;
  231. fou->protocol = cfg->protocol;
  232. fou->udp_offloads.callbacks.gro_receive = fou_gro_receive;
  233. fou->udp_offloads.callbacks.gro_complete = fou_gro_complete;
  234. fou->udp_offloads.port = cfg->udp_config.local_udp_port;
  235. fou->udp_offloads.ipproto = cfg->protocol;
  236. return 0;
  237. }
  238. static int gue_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg)
  239. {
  240. udp_sk(sk)->encap_rcv = gue_udp_recv;
  241. fou->udp_offloads.callbacks.gro_receive = gue_gro_receive;
  242. fou->udp_offloads.callbacks.gro_complete = gue_gro_complete;
  243. fou->udp_offloads.port = cfg->udp_config.local_udp_port;
  244. return 0;
  245. }
  246. static int fou_create(struct net *net, struct fou_cfg *cfg,
  247. struct socket **sockp)
  248. {
  249. struct fou *fou = NULL;
  250. int err;
  251. struct socket *sock = NULL;
  252. struct sock *sk;
  253. /* Open UDP socket */
  254. err = udp_sock_create(net, &cfg->udp_config, &sock);
  255. if (err < 0)
  256. goto error;
  257. /* Allocate FOU port structure */
  258. fou = kzalloc(sizeof(*fou), GFP_KERNEL);
  259. if (!fou) {
  260. err = -ENOMEM;
  261. goto error;
  262. }
  263. sk = sock->sk;
  264. fou->port = cfg->udp_config.local_udp_port;
  265. /* Initial for fou type */
  266. switch (cfg->type) {
  267. case FOU_ENCAP_DIRECT:
  268. err = fou_encap_init(sk, fou, cfg);
  269. if (err)
  270. goto error;
  271. break;
  272. case FOU_ENCAP_GUE:
  273. err = gue_encap_init(sk, fou, cfg);
  274. if (err)
  275. goto error;
  276. break;
  277. default:
  278. err = -EINVAL;
  279. goto error;
  280. }
  281. udp_sk(sk)->encap_type = 1;
  282. udp_encap_enable();
  283. sk->sk_user_data = fou;
  284. fou->sock = sock;
  285. udp_set_convert_csum(sk, true);
  286. sk->sk_allocation = GFP_ATOMIC;
  287. if (cfg->udp_config.family == AF_INET) {
  288. err = udp_add_offload(&fou->udp_offloads);
  289. if (err)
  290. goto error;
  291. }
  292. err = fou_add_to_port_list(fou);
  293. if (err)
  294. goto error;
  295. if (sockp)
  296. *sockp = sock;
  297. return 0;
  298. error:
  299. kfree(fou);
  300. if (sock)
  301. sock_release(sock);
  302. return err;
  303. }
  304. static int fou_destroy(struct net *net, struct fou_cfg *cfg)
  305. {
  306. struct fou *fou;
  307. u16 port = cfg->udp_config.local_udp_port;
  308. int err = -EINVAL;
  309. spin_lock(&fou_lock);
  310. list_for_each_entry(fou, &fou_list, list) {
  311. if (fou->port == port) {
  312. udp_del_offload(&fou->udp_offloads);
  313. fou_release(fou);
  314. err = 0;
  315. break;
  316. }
  317. }
  318. spin_unlock(&fou_lock);
  319. return err;
  320. }
  321. static struct genl_family fou_nl_family = {
  322. .id = GENL_ID_GENERATE,
  323. .hdrsize = 0,
  324. .name = FOU_GENL_NAME,
  325. .version = FOU_GENL_VERSION,
  326. .maxattr = FOU_ATTR_MAX,
  327. .netnsok = true,
  328. };
  329. static struct nla_policy fou_nl_policy[FOU_ATTR_MAX + 1] = {
  330. [FOU_ATTR_PORT] = { .type = NLA_U16, },
  331. [FOU_ATTR_AF] = { .type = NLA_U8, },
  332. [FOU_ATTR_IPPROTO] = { .type = NLA_U8, },
  333. [FOU_ATTR_TYPE] = { .type = NLA_U8, },
  334. };
  335. static int parse_nl_config(struct genl_info *info,
  336. struct fou_cfg *cfg)
  337. {
  338. memset(cfg, 0, sizeof(*cfg));
  339. cfg->udp_config.family = AF_INET;
  340. if (info->attrs[FOU_ATTR_AF]) {
  341. u8 family = nla_get_u8(info->attrs[FOU_ATTR_AF]);
  342. if (family != AF_INET && family != AF_INET6)
  343. return -EINVAL;
  344. cfg->udp_config.family = family;
  345. }
  346. if (info->attrs[FOU_ATTR_PORT]) {
  347. u16 port = nla_get_u16(info->attrs[FOU_ATTR_PORT]);
  348. cfg->udp_config.local_udp_port = port;
  349. }
  350. if (info->attrs[FOU_ATTR_IPPROTO])
  351. cfg->protocol = nla_get_u8(info->attrs[FOU_ATTR_IPPROTO]);
  352. if (info->attrs[FOU_ATTR_TYPE])
  353. cfg->type = nla_get_u8(info->attrs[FOU_ATTR_TYPE]);
  354. return 0;
  355. }
  356. static int fou_nl_cmd_add_port(struct sk_buff *skb, struct genl_info *info)
  357. {
  358. struct fou_cfg cfg;
  359. int err;
  360. err = parse_nl_config(info, &cfg);
  361. if (err)
  362. return err;
  363. return fou_create(&init_net, &cfg, NULL);
  364. }
  365. static int fou_nl_cmd_rm_port(struct sk_buff *skb, struct genl_info *info)
  366. {
  367. struct fou_cfg cfg;
  368. parse_nl_config(info, &cfg);
  369. return fou_destroy(&init_net, &cfg);
  370. }
  371. static const struct genl_ops fou_nl_ops[] = {
  372. {
  373. .cmd = FOU_CMD_ADD,
  374. .doit = fou_nl_cmd_add_port,
  375. .policy = fou_nl_policy,
  376. .flags = GENL_ADMIN_PERM,
  377. },
  378. {
  379. .cmd = FOU_CMD_DEL,
  380. .doit = fou_nl_cmd_rm_port,
  381. .policy = fou_nl_policy,
  382. .flags = GENL_ADMIN_PERM,
  383. },
  384. };
  385. static int __init fou_init(void)
  386. {
  387. int ret;
  388. ret = genl_register_family_with_ops(&fou_nl_family,
  389. fou_nl_ops);
  390. return ret;
  391. }
  392. static void __exit fou_fini(void)
  393. {
  394. struct fou *fou, *next;
  395. genl_unregister_family(&fou_nl_family);
  396. /* Close all the FOU sockets */
  397. spin_lock(&fou_lock);
  398. list_for_each_entry_safe(fou, next, &fou_list, list)
  399. fou_release(fou);
  400. spin_unlock(&fou_lock);
  401. }
  402. module_init(fou_init);
  403. module_exit(fou_fini);
  404. MODULE_AUTHOR("Tom Herbert <therbert@google.com>");
  405. MODULE_LICENSE("GPL");