geneve.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084
  1. /*
  2. * GENEVE: Generic Network Virtualization Encapsulation
  3. *
  4. * Copyright (c) 2015 Red Hat, Inc.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  11. #include <linux/kernel.h>
  12. #include <linux/module.h>
  13. #include <linux/netdevice.h>
  14. #include <linux/etherdevice.h>
  15. #include <linux/hash.h>
  16. #include <net/dst_metadata.h>
  17. #include <net/gro_cells.h>
  18. #include <net/rtnetlink.h>
  19. #include <net/geneve.h>
  20. #include <net/protocol.h>
  21. #define GENEVE_NETDEV_VER "0.6"
  22. #define GENEVE_UDP_PORT 6081
  23. #define GENEVE_N_VID (1u << 24)
  24. #define GENEVE_VID_MASK (GENEVE_N_VID - 1)
  25. #define VNI_HASH_BITS 10
  26. #define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
  27. static bool log_ecn_error = true;
  28. module_param(log_ecn_error, bool, 0644);
  29. MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
  30. #define GENEVE_VER 0
  31. #define GENEVE_BASE_HLEN (sizeof(struct udphdr) + sizeof(struct genevehdr))
  32. /* per-network namespace private data for this module */
  33. struct geneve_net {
  34. struct list_head geneve_list;
  35. struct list_head sock_list;
  36. };
  37. static int geneve_net_id;
  38. /* Pseudo network device */
  39. struct geneve_dev {
  40. struct hlist_node hlist; /* vni hash table */
  41. struct net *net; /* netns for packet i/o */
  42. struct net_device *dev; /* netdev for geneve tunnel */
  43. struct geneve_sock *sock; /* socket used for geneve tunnel */
  44. u8 vni[3]; /* virtual network ID for tunnel */
  45. u8 ttl; /* TTL override */
  46. u8 tos; /* TOS override */
  47. struct sockaddr_in remote; /* IPv4 address for link partner */
  48. struct list_head next; /* geneve's per namespace list */
  49. __be16 dst_port;
  50. bool collect_md;
  51. struct gro_cells gro_cells;
  52. };
  53. struct geneve_sock {
  54. bool collect_md;
  55. struct list_head list;
  56. struct socket *sock;
  57. struct rcu_head rcu;
  58. int refcnt;
  59. struct udp_offload udp_offloads;
  60. struct hlist_head vni_list[VNI_HASH_SIZE];
  61. };
  62. static inline __u32 geneve_net_vni_hash(u8 vni[3])
  63. {
  64. __u32 vnid;
  65. vnid = (vni[0] << 16) | (vni[1] << 8) | vni[2];
  66. return hash_32(vnid, VNI_HASH_BITS);
  67. }
  68. static __be64 vni_to_tunnel_id(const __u8 *vni)
  69. {
  70. #ifdef __BIG_ENDIAN
  71. return (vni[0] << 16) | (vni[1] << 8) | vni[2];
  72. #else
  73. return (__force __be64)(((__force u64)vni[0] << 40) |
  74. ((__force u64)vni[1] << 48) |
  75. ((__force u64)vni[2] << 56));
  76. #endif
  77. }
  78. static struct geneve_dev *geneve_lookup(struct geneve_sock *gs,
  79. __be32 addr, u8 vni[])
  80. {
  81. struct hlist_head *vni_list_head;
  82. struct geneve_dev *geneve;
  83. __u32 hash;
  84. /* Find the device for this VNI */
  85. hash = geneve_net_vni_hash(vni);
  86. vni_list_head = &gs->vni_list[hash];
  87. hlist_for_each_entry_rcu(geneve, vni_list_head, hlist) {
  88. if (!memcmp(vni, geneve->vni, sizeof(geneve->vni)) &&
  89. addr == geneve->remote.sin_addr.s_addr)
  90. return geneve;
  91. }
  92. return NULL;
  93. }
  94. static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb)
  95. {
  96. return (struct genevehdr *)(udp_hdr(skb) + 1);
  97. }
  98. /* geneve receive/decap routine */
  99. static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb)
  100. {
  101. struct genevehdr *gnvh = geneve_hdr(skb);
  102. struct metadata_dst *tun_dst = NULL;
  103. struct geneve_dev *geneve = NULL;
  104. struct pcpu_sw_netstats *stats;
  105. struct iphdr *iph;
  106. u8 *vni;
  107. __be32 addr;
  108. int err;
  109. iph = ip_hdr(skb); /* outer IP header... */
  110. if (gs->collect_md) {
  111. static u8 zero_vni[3];
  112. vni = zero_vni;
  113. addr = 0;
  114. } else {
  115. vni = gnvh->vni;
  116. addr = iph->saddr;
  117. }
  118. geneve = geneve_lookup(gs, addr, vni);
  119. if (!geneve)
  120. goto drop;
  121. if (ip_tunnel_collect_metadata() || gs->collect_md) {
  122. __be16 flags;
  123. flags = TUNNEL_KEY | TUNNEL_GENEVE_OPT |
  124. (gnvh->oam ? TUNNEL_OAM : 0) |
  125. (gnvh->critical ? TUNNEL_CRIT_OPT : 0);
  126. tun_dst = udp_tun_rx_dst(skb, AF_INET, flags,
  127. vni_to_tunnel_id(gnvh->vni),
  128. gnvh->opt_len * 4);
  129. if (!tun_dst)
  130. goto drop;
  131. /* Update tunnel dst according to Geneve options. */
  132. ip_tunnel_info_opts_set(&tun_dst->u.tun_info,
  133. gnvh->options, gnvh->opt_len * 4);
  134. } else {
  135. /* Drop packets w/ critical options,
  136. * since we don't support any...
  137. */
  138. if (gnvh->critical)
  139. goto drop;
  140. }
  141. skb_reset_mac_header(skb);
  142. skb_scrub_packet(skb, !net_eq(geneve->net, dev_net(geneve->dev)));
  143. skb->protocol = eth_type_trans(skb, geneve->dev);
  144. skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
  145. if (tun_dst)
  146. skb_dst_set(skb, &tun_dst->dst);
  147. /* Ignore packet loops (and multicast echo) */
  148. if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr))
  149. goto drop;
  150. skb_reset_network_header(skb);
  151. err = IP_ECN_decapsulate(iph, skb);
  152. if (unlikely(err)) {
  153. if (log_ecn_error)
  154. net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
  155. &iph->saddr, iph->tos);
  156. if (err > 1) {
  157. ++geneve->dev->stats.rx_frame_errors;
  158. ++geneve->dev->stats.rx_errors;
  159. goto drop;
  160. }
  161. }
  162. stats = this_cpu_ptr(geneve->dev->tstats);
  163. u64_stats_update_begin(&stats->syncp);
  164. stats->rx_packets++;
  165. stats->rx_bytes += skb->len;
  166. u64_stats_update_end(&stats->syncp);
  167. gro_cells_receive(&geneve->gro_cells, skb);
  168. return;
  169. drop:
  170. /* Consume bad packet */
  171. kfree_skb(skb);
  172. }
  173. /* Setup stats when device is created */
  174. static int geneve_init(struct net_device *dev)
  175. {
  176. struct geneve_dev *geneve = netdev_priv(dev);
  177. int err;
  178. dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
  179. if (!dev->tstats)
  180. return -ENOMEM;
  181. err = gro_cells_init(&geneve->gro_cells, dev);
  182. if (err) {
  183. free_percpu(dev->tstats);
  184. return err;
  185. }
  186. return 0;
  187. }
  188. static void geneve_uninit(struct net_device *dev)
  189. {
  190. struct geneve_dev *geneve = netdev_priv(dev);
  191. gro_cells_destroy(&geneve->gro_cells);
  192. free_percpu(dev->tstats);
  193. }
  194. /* Callback from net/ipv4/udp.c to receive packets */
  195. static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
  196. {
  197. struct genevehdr *geneveh;
  198. struct geneve_sock *gs;
  199. int opts_len;
  200. /* Need Geneve and inner Ethernet header to be present */
  201. if (unlikely(!pskb_may_pull(skb, GENEVE_BASE_HLEN)))
  202. goto error;
  203. /* Return packets with reserved bits set */
  204. geneveh = geneve_hdr(skb);
  205. if (unlikely(geneveh->ver != GENEVE_VER))
  206. goto error;
  207. if (unlikely(geneveh->proto_type != htons(ETH_P_TEB)))
  208. goto error;
  209. opts_len = geneveh->opt_len * 4;
  210. if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len,
  211. htons(ETH_P_TEB)))
  212. goto drop;
  213. gs = rcu_dereference_sk_user_data(sk);
  214. if (!gs)
  215. goto drop;
  216. geneve_rx(gs, skb);
  217. return 0;
  218. drop:
  219. /* Consume bad packet */
  220. kfree_skb(skb);
  221. return 0;
  222. error:
  223. /* Let the UDP layer deal with the skb */
  224. return 1;
  225. }
  226. static struct socket *geneve_create_sock(struct net *net, bool ipv6,
  227. __be16 port)
  228. {
  229. struct socket *sock;
  230. struct udp_port_cfg udp_conf;
  231. int err;
  232. memset(&udp_conf, 0, sizeof(udp_conf));
  233. if (ipv6) {
  234. udp_conf.family = AF_INET6;
  235. } else {
  236. udp_conf.family = AF_INET;
  237. udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
  238. }
  239. udp_conf.local_udp_port = port;
  240. /* Open UDP socket */
  241. err = udp_sock_create(net, &udp_conf, &sock);
  242. if (err < 0)
  243. return ERR_PTR(err);
  244. return sock;
  245. }
  246. static void geneve_notify_add_rx_port(struct geneve_sock *gs)
  247. {
  248. struct sock *sk = gs->sock->sk;
  249. sa_family_t sa_family = sk->sk_family;
  250. int err;
  251. if (sa_family == AF_INET) {
  252. err = udp_add_offload(&gs->udp_offloads);
  253. if (err)
  254. pr_warn("geneve: udp_add_offload failed with status %d\n",
  255. err);
  256. }
  257. }
  258. static int geneve_hlen(struct genevehdr *gh)
  259. {
  260. return sizeof(*gh) + gh->opt_len * 4;
  261. }
  262. static struct sk_buff **geneve_gro_receive(struct sk_buff **head,
  263. struct sk_buff *skb,
  264. struct udp_offload *uoff)
  265. {
  266. struct sk_buff *p, **pp = NULL;
  267. struct genevehdr *gh, *gh2;
  268. unsigned int hlen, gh_len, off_gnv;
  269. const struct packet_offload *ptype;
  270. __be16 type;
  271. int flush = 1;
  272. off_gnv = skb_gro_offset(skb);
  273. hlen = off_gnv + sizeof(*gh);
  274. gh = skb_gro_header_fast(skb, off_gnv);
  275. if (skb_gro_header_hard(skb, hlen)) {
  276. gh = skb_gro_header_slow(skb, hlen, off_gnv);
  277. if (unlikely(!gh))
  278. goto out;
  279. }
  280. if (gh->ver != GENEVE_VER || gh->oam)
  281. goto out;
  282. gh_len = geneve_hlen(gh);
  283. hlen = off_gnv + gh_len;
  284. if (skb_gro_header_hard(skb, hlen)) {
  285. gh = skb_gro_header_slow(skb, hlen, off_gnv);
  286. if (unlikely(!gh))
  287. goto out;
  288. }
  289. flush = 0;
  290. for (p = *head; p; p = p->next) {
  291. if (!NAPI_GRO_CB(p)->same_flow)
  292. continue;
  293. gh2 = (struct genevehdr *)(p->data + off_gnv);
  294. if (gh->opt_len != gh2->opt_len ||
  295. memcmp(gh, gh2, gh_len)) {
  296. NAPI_GRO_CB(p)->same_flow = 0;
  297. continue;
  298. }
  299. }
  300. type = gh->proto_type;
  301. rcu_read_lock();
  302. ptype = gro_find_receive_by_type(type);
  303. if (!ptype) {
  304. flush = 1;
  305. goto out_unlock;
  306. }
  307. skb_gro_pull(skb, gh_len);
  308. skb_gro_postpull_rcsum(skb, gh, gh_len);
  309. pp = ptype->callbacks.gro_receive(head, skb);
  310. out_unlock:
  311. rcu_read_unlock();
  312. out:
  313. NAPI_GRO_CB(skb)->flush |= flush;
  314. return pp;
  315. }
  316. static int geneve_gro_complete(struct sk_buff *skb, int nhoff,
  317. struct udp_offload *uoff)
  318. {
  319. struct genevehdr *gh;
  320. struct packet_offload *ptype;
  321. __be16 type;
  322. int gh_len;
  323. int err = -ENOSYS;
  324. udp_tunnel_gro_complete(skb, nhoff);
  325. gh = (struct genevehdr *)(skb->data + nhoff);
  326. gh_len = geneve_hlen(gh);
  327. type = gh->proto_type;
  328. rcu_read_lock();
  329. ptype = gro_find_complete_by_type(type);
  330. if (ptype)
  331. err = ptype->callbacks.gro_complete(skb, nhoff + gh_len);
  332. rcu_read_unlock();
  333. return err;
  334. }
  335. /* Create new listen socket if needed */
  336. static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port,
  337. bool ipv6)
  338. {
  339. struct geneve_net *gn = net_generic(net, geneve_net_id);
  340. struct geneve_sock *gs;
  341. struct socket *sock;
  342. struct udp_tunnel_sock_cfg tunnel_cfg;
  343. int h;
  344. gs = kzalloc(sizeof(*gs), GFP_KERNEL);
  345. if (!gs)
  346. return ERR_PTR(-ENOMEM);
  347. sock = geneve_create_sock(net, ipv6, port);
  348. if (IS_ERR(sock)) {
  349. kfree(gs);
  350. return ERR_CAST(sock);
  351. }
  352. gs->sock = sock;
  353. gs->refcnt = 1;
  354. for (h = 0; h < VNI_HASH_SIZE; ++h)
  355. INIT_HLIST_HEAD(&gs->vni_list[h]);
  356. /* Initialize the geneve udp offloads structure */
  357. gs->udp_offloads.port = port;
  358. gs->udp_offloads.callbacks.gro_receive = geneve_gro_receive;
  359. gs->udp_offloads.callbacks.gro_complete = geneve_gro_complete;
  360. geneve_notify_add_rx_port(gs);
  361. /* Mark socket as an encapsulation socket */
  362. tunnel_cfg.sk_user_data = gs;
  363. tunnel_cfg.encap_type = 1;
  364. tunnel_cfg.encap_rcv = geneve_udp_encap_recv;
  365. tunnel_cfg.encap_destroy = NULL;
  366. setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
  367. list_add(&gs->list, &gn->sock_list);
  368. return gs;
  369. }
  370. static void geneve_notify_del_rx_port(struct geneve_sock *gs)
  371. {
  372. struct sock *sk = gs->sock->sk;
  373. sa_family_t sa_family = sk->sk_family;
  374. if (sa_family == AF_INET)
  375. udp_del_offload(&gs->udp_offloads);
  376. }
  377. static void geneve_sock_release(struct geneve_sock *gs)
  378. {
  379. if (--gs->refcnt)
  380. return;
  381. list_del(&gs->list);
  382. geneve_notify_del_rx_port(gs);
  383. udp_tunnel_sock_release(gs->sock);
  384. kfree_rcu(gs, rcu);
  385. }
  386. static struct geneve_sock *geneve_find_sock(struct geneve_net *gn,
  387. __be16 dst_port)
  388. {
  389. struct geneve_sock *gs;
  390. list_for_each_entry(gs, &gn->sock_list, list) {
  391. if (inet_sk(gs->sock->sk)->inet_sport == dst_port &&
  392. inet_sk(gs->sock->sk)->sk.sk_family == AF_INET) {
  393. return gs;
  394. }
  395. }
  396. return NULL;
  397. }
  398. static int geneve_open(struct net_device *dev)
  399. {
  400. struct geneve_dev *geneve = netdev_priv(dev);
  401. struct net *net = geneve->net;
  402. struct geneve_net *gn = net_generic(net, geneve_net_id);
  403. struct geneve_sock *gs;
  404. __u32 hash;
  405. gs = geneve_find_sock(gn, geneve->dst_port);
  406. if (gs) {
  407. gs->refcnt++;
  408. goto out;
  409. }
  410. gs = geneve_socket_create(net, geneve->dst_port, false);
  411. if (IS_ERR(gs))
  412. return PTR_ERR(gs);
  413. out:
  414. gs->collect_md = geneve->collect_md;
  415. geneve->sock = gs;
  416. hash = geneve_net_vni_hash(geneve->vni);
  417. hlist_add_head_rcu(&geneve->hlist, &gs->vni_list[hash]);
  418. return 0;
  419. }
  420. static int geneve_stop(struct net_device *dev)
  421. {
  422. struct geneve_dev *geneve = netdev_priv(dev);
  423. struct geneve_sock *gs = geneve->sock;
  424. if (!hlist_unhashed(&geneve->hlist))
  425. hlist_del_rcu(&geneve->hlist);
  426. geneve_sock_release(gs);
  427. return 0;
  428. }
  429. static int geneve_build_skb(struct rtable *rt, struct sk_buff *skb,
  430. __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
  431. bool csum)
  432. {
  433. struct genevehdr *gnvh;
  434. int min_headroom;
  435. int err;
  436. min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
  437. + GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr);
  438. err = skb_cow_head(skb, min_headroom);
  439. if (unlikely(err)) {
  440. kfree_skb(skb);
  441. goto free_rt;
  442. }
  443. skb = udp_tunnel_handle_offloads(skb, csum);
  444. if (IS_ERR(skb)) {
  445. err = PTR_ERR(skb);
  446. goto free_rt;
  447. }
  448. gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
  449. gnvh->ver = GENEVE_VER;
  450. gnvh->opt_len = opt_len / 4;
  451. gnvh->oam = !!(tun_flags & TUNNEL_OAM);
  452. gnvh->critical = !!(tun_flags & TUNNEL_CRIT_OPT);
  453. gnvh->rsvd1 = 0;
  454. memcpy(gnvh->vni, vni, 3);
  455. gnvh->proto_type = htons(ETH_P_TEB);
  456. gnvh->rsvd2 = 0;
  457. memcpy(gnvh->options, opt, opt_len);
  458. skb_set_inner_protocol(skb, htons(ETH_P_TEB));
  459. return 0;
  460. free_rt:
  461. ip_rt_put(rt);
  462. return err;
  463. }
  464. static struct rtable *geneve_get_rt(struct sk_buff *skb,
  465. struct net_device *dev,
  466. struct flowi4 *fl4,
  467. struct ip_tunnel_info *info)
  468. {
  469. struct geneve_dev *geneve = netdev_priv(dev);
  470. struct rtable *rt = NULL;
  471. __u8 tos;
  472. memset(fl4, 0, sizeof(*fl4));
  473. fl4->flowi4_mark = skb->mark;
  474. fl4->flowi4_proto = IPPROTO_UDP;
  475. if (info) {
  476. fl4->daddr = info->key.u.ipv4.dst;
  477. fl4->saddr = info->key.u.ipv4.src;
  478. fl4->flowi4_tos = RT_TOS(info->key.tos);
  479. } else {
  480. tos = geneve->tos;
  481. if (tos == 1) {
  482. const struct iphdr *iip = ip_hdr(skb);
  483. tos = ip_tunnel_get_dsfield(iip, skb);
  484. }
  485. fl4->flowi4_tos = RT_TOS(tos);
  486. fl4->daddr = geneve->remote.sin_addr.s_addr;
  487. }
  488. rt = ip_route_output_key(geneve->net, fl4);
  489. if (IS_ERR(rt)) {
  490. netdev_dbg(dev, "no route to %pI4\n", &fl4->daddr);
  491. return ERR_PTR(-ENETUNREACH);
  492. }
  493. if (rt->dst.dev == dev) { /* is this necessary? */
  494. netdev_dbg(dev, "circular route to %pI4\n", &fl4->daddr);
  495. ip_rt_put(rt);
  496. return ERR_PTR(-ELOOP);
  497. }
  498. return rt;
  499. }
  500. /* Convert 64 bit tunnel ID to 24 bit VNI. */
  501. static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni)
  502. {
  503. #ifdef __BIG_ENDIAN
  504. vni[0] = (__force __u8)(tun_id >> 16);
  505. vni[1] = (__force __u8)(tun_id >> 8);
  506. vni[2] = (__force __u8)tun_id;
  507. #else
  508. vni[0] = (__force __u8)((__force u64)tun_id >> 40);
  509. vni[1] = (__force __u8)((__force u64)tun_id >> 48);
  510. vni[2] = (__force __u8)((__force u64)tun_id >> 56);
  511. #endif
  512. }
  513. static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
  514. {
  515. struct geneve_dev *geneve = netdev_priv(dev);
  516. struct geneve_sock *gs = geneve->sock;
  517. struct ip_tunnel_info *info = NULL;
  518. struct rtable *rt = NULL;
  519. const struct iphdr *iip; /* interior IP header */
  520. int err = -EINVAL;
  521. struct flowi4 fl4;
  522. __u8 tos, ttl;
  523. __be16 sport;
  524. bool udp_csum;
  525. __be16 df;
  526. if (geneve->collect_md) {
  527. info = skb_tunnel_info(skb);
  528. if (unlikely(info && !(info->mode & IP_TUNNEL_INFO_TX))) {
  529. netdev_dbg(dev, "no tunnel metadata\n");
  530. goto tx_error;
  531. }
  532. if (info && ip_tunnel_info_af(info) != AF_INET)
  533. goto tx_error;
  534. }
  535. rt = geneve_get_rt(skb, dev, &fl4, info);
  536. if (IS_ERR(rt)) {
  537. netdev_dbg(dev, "no route to %pI4\n", &fl4.daddr);
  538. err = PTR_ERR(rt);
  539. goto tx_error;
  540. }
  541. sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
  542. skb_reset_mac_header(skb);
  543. iip = ip_hdr(skb);
  544. if (info) {
  545. const struct ip_tunnel_key *key = &info->key;
  546. u8 *opts = NULL;
  547. u8 vni[3];
  548. tunnel_id_to_vni(key->tun_id, vni);
  549. if (key->tun_flags & TUNNEL_GENEVE_OPT)
  550. opts = ip_tunnel_info_opts(info);
  551. udp_csum = !!(key->tun_flags & TUNNEL_CSUM);
  552. err = geneve_build_skb(rt, skb, key->tun_flags, vni,
  553. info->options_len, opts, udp_csum);
  554. if (unlikely(err))
  555. goto err;
  556. tos = ip_tunnel_ecn_encap(key->tos, iip, skb);
  557. ttl = key->ttl;
  558. df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
  559. } else {
  560. udp_csum = false;
  561. err = geneve_build_skb(rt, skb, 0, geneve->vni,
  562. 0, NULL, udp_csum);
  563. if (unlikely(err))
  564. goto err;
  565. tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb);
  566. ttl = geneve->ttl;
  567. if (!ttl && IN_MULTICAST(ntohl(fl4.daddr)))
  568. ttl = 1;
  569. ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
  570. df = 0;
  571. }
  572. err = udp_tunnel_xmit_skb(rt, gs->sock->sk, skb, fl4.saddr, fl4.daddr,
  573. tos, ttl, df, sport, geneve->dst_port,
  574. !net_eq(geneve->net, dev_net(geneve->dev)),
  575. !udp_csum);
  576. iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
  577. return NETDEV_TX_OK;
  578. tx_error:
  579. dev_kfree_skb(skb);
  580. err:
  581. if (err == -ELOOP)
  582. dev->stats.collisions++;
  583. else if (err == -ENETUNREACH)
  584. dev->stats.tx_carrier_errors++;
  585. else
  586. dev->stats.tx_errors++;
  587. return NETDEV_TX_OK;
  588. }
  589. static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
  590. {
  591. struct ip_tunnel_info *info = skb_tunnel_info(skb);
  592. struct geneve_dev *geneve = netdev_priv(dev);
  593. struct rtable *rt;
  594. struct flowi4 fl4;
  595. if (ip_tunnel_info_af(info) != AF_INET)
  596. return -EINVAL;
  597. rt = geneve_get_rt(skb, dev, &fl4, info);
  598. if (IS_ERR(rt))
  599. return PTR_ERR(rt);
  600. ip_rt_put(rt);
  601. info->key.u.ipv4.src = fl4.saddr;
  602. info->key.tp_src = udp_flow_src_port(geneve->net, skb,
  603. 1, USHRT_MAX, true);
  604. info->key.tp_dst = geneve->dst_port;
  605. return 0;
  606. }
  607. static const struct net_device_ops geneve_netdev_ops = {
  608. .ndo_init = geneve_init,
  609. .ndo_uninit = geneve_uninit,
  610. .ndo_open = geneve_open,
  611. .ndo_stop = geneve_stop,
  612. .ndo_start_xmit = geneve_xmit,
  613. .ndo_get_stats64 = ip_tunnel_get_stats64,
  614. .ndo_change_mtu = eth_change_mtu,
  615. .ndo_validate_addr = eth_validate_addr,
  616. .ndo_set_mac_address = eth_mac_addr,
  617. .ndo_fill_metadata_dst = geneve_fill_metadata_dst,
  618. };
  619. static void geneve_get_drvinfo(struct net_device *dev,
  620. struct ethtool_drvinfo *drvinfo)
  621. {
  622. strlcpy(drvinfo->version, GENEVE_NETDEV_VER, sizeof(drvinfo->version));
  623. strlcpy(drvinfo->driver, "geneve", sizeof(drvinfo->driver));
  624. }
  625. static const struct ethtool_ops geneve_ethtool_ops = {
  626. .get_drvinfo = geneve_get_drvinfo,
  627. .get_link = ethtool_op_get_link,
  628. };
  629. /* Info for udev, that this is a virtual tunnel endpoint */
  630. static struct device_type geneve_type = {
  631. .name = "geneve",
  632. };
  633. /* Initialize the device structure. */
  634. static void geneve_setup(struct net_device *dev)
  635. {
  636. ether_setup(dev);
  637. dev->netdev_ops = &geneve_netdev_ops;
  638. dev->ethtool_ops = &geneve_ethtool_ops;
  639. dev->destructor = free_netdev;
  640. SET_NETDEV_DEVTYPE(dev, &geneve_type);
  641. dev->features |= NETIF_F_LLTX;
  642. dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
  643. dev->features |= NETIF_F_RXCSUM;
  644. dev->features |= NETIF_F_GSO_SOFTWARE;
  645. dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
  646. dev->hw_features |= NETIF_F_GSO_SOFTWARE;
  647. netif_keep_dst(dev);
  648. dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
  649. eth_hw_addr_random(dev);
  650. }
  651. static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = {
  652. [IFLA_GENEVE_ID] = { .type = NLA_U32 },
  653. [IFLA_GENEVE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
  654. [IFLA_GENEVE_TTL] = { .type = NLA_U8 },
  655. [IFLA_GENEVE_TOS] = { .type = NLA_U8 },
  656. [IFLA_GENEVE_PORT] = { .type = NLA_U16 },
  657. [IFLA_GENEVE_COLLECT_METADATA] = { .type = NLA_FLAG },
  658. };
  659. static int geneve_validate(struct nlattr *tb[], struct nlattr *data[])
  660. {
  661. if (tb[IFLA_ADDRESS]) {
  662. if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
  663. return -EINVAL;
  664. if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
  665. return -EADDRNOTAVAIL;
  666. }
  667. if (!data)
  668. return -EINVAL;
  669. if (data[IFLA_GENEVE_ID]) {
  670. __u32 vni = nla_get_u32(data[IFLA_GENEVE_ID]);
  671. if (vni >= GENEVE_VID_MASK)
  672. return -ERANGE;
  673. }
  674. return 0;
  675. }
  676. static struct geneve_dev *geneve_find_dev(struct geneve_net *gn,
  677. __be16 dst_port,
  678. __be32 rem_addr,
  679. u8 vni[],
  680. bool *tun_on_same_port,
  681. bool *tun_collect_md)
  682. {
  683. struct geneve_dev *geneve, *t;
  684. *tun_on_same_port = false;
  685. *tun_collect_md = false;
  686. t = NULL;
  687. list_for_each_entry(geneve, &gn->geneve_list, next) {
  688. if (geneve->dst_port == dst_port) {
  689. *tun_collect_md = geneve->collect_md;
  690. *tun_on_same_port = true;
  691. }
  692. if (!memcmp(vni, geneve->vni, sizeof(geneve->vni)) &&
  693. rem_addr == geneve->remote.sin_addr.s_addr &&
  694. dst_port == geneve->dst_port)
  695. t = geneve;
  696. }
  697. return t;
  698. }
  699. static int geneve_configure(struct net *net, struct net_device *dev,
  700. __be32 rem_addr, __u32 vni, __u8 ttl, __u8 tos,
  701. __be16 dst_port, bool metadata)
  702. {
  703. struct geneve_net *gn = net_generic(net, geneve_net_id);
  704. struct geneve_dev *t, *geneve = netdev_priv(dev);
  705. bool tun_collect_md, tun_on_same_port;
  706. int err;
  707. if (metadata) {
  708. if (rem_addr || vni || tos || ttl)
  709. return -EINVAL;
  710. }
  711. geneve->net = net;
  712. geneve->dev = dev;
  713. geneve->vni[0] = (vni & 0x00ff0000) >> 16;
  714. geneve->vni[1] = (vni & 0x0000ff00) >> 8;
  715. geneve->vni[2] = vni & 0x000000ff;
  716. geneve->remote.sin_addr.s_addr = rem_addr;
  717. if (IN_MULTICAST(ntohl(geneve->remote.sin_addr.s_addr)))
  718. return -EINVAL;
  719. geneve->ttl = ttl;
  720. geneve->tos = tos;
  721. geneve->dst_port = dst_port;
  722. geneve->collect_md = metadata;
  723. t = geneve_find_dev(gn, dst_port, rem_addr, geneve->vni,
  724. &tun_on_same_port, &tun_collect_md);
  725. if (t)
  726. return -EBUSY;
  727. if (metadata) {
  728. if (tun_on_same_port)
  729. return -EPERM;
  730. } else {
  731. if (tun_collect_md)
  732. return -EPERM;
  733. }
  734. err = register_netdevice(dev);
  735. if (err)
  736. return err;
  737. list_add(&geneve->next, &gn->geneve_list);
  738. return 0;
  739. }
  740. static int geneve_newlink(struct net *net, struct net_device *dev,
  741. struct nlattr *tb[], struct nlattr *data[])
  742. {
  743. __be16 dst_port = htons(GENEVE_UDP_PORT);
  744. __u8 ttl = 0, tos = 0;
  745. bool metadata = false;
  746. __be32 rem_addr = 0;
  747. __u32 vni = 0;
  748. if (data[IFLA_GENEVE_ID])
  749. vni = nla_get_u32(data[IFLA_GENEVE_ID]);
  750. if (data[IFLA_GENEVE_REMOTE])
  751. rem_addr = nla_get_in_addr(data[IFLA_GENEVE_REMOTE]);
  752. if (data[IFLA_GENEVE_TTL])
  753. ttl = nla_get_u8(data[IFLA_GENEVE_TTL]);
  754. if (data[IFLA_GENEVE_TOS])
  755. tos = nla_get_u8(data[IFLA_GENEVE_TOS]);
  756. if (data[IFLA_GENEVE_PORT])
  757. dst_port = nla_get_be16(data[IFLA_GENEVE_PORT]);
  758. if (data[IFLA_GENEVE_COLLECT_METADATA])
  759. metadata = true;
  760. return geneve_configure(net, dev, rem_addr, vni,
  761. ttl, tos, dst_port, metadata);
  762. }
  763. static void geneve_dellink(struct net_device *dev, struct list_head *head)
  764. {
  765. struct geneve_dev *geneve = netdev_priv(dev);
  766. list_del(&geneve->next);
  767. unregister_netdevice_queue(dev, head);
  768. }
  769. static size_t geneve_get_size(const struct net_device *dev)
  770. {
  771. return nla_total_size(sizeof(__u32)) + /* IFLA_GENEVE_ID */
  772. nla_total_size(sizeof(struct in_addr)) + /* IFLA_GENEVE_REMOTE */
  773. nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL */
  774. nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TOS */
  775. nla_total_size(sizeof(__be16)) + /* IFLA_GENEVE_PORT */
  776. nla_total_size(0) + /* IFLA_GENEVE_COLLECT_METADATA */
  777. 0;
  778. }
  779. static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
  780. {
  781. struct geneve_dev *geneve = netdev_priv(dev);
  782. __u32 vni;
  783. vni = (geneve->vni[0] << 16) | (geneve->vni[1] << 8) | geneve->vni[2];
  784. if (nla_put_u32(skb, IFLA_GENEVE_ID, vni))
  785. goto nla_put_failure;
  786. if (nla_put_in_addr(skb, IFLA_GENEVE_REMOTE,
  787. geneve->remote.sin_addr.s_addr))
  788. goto nla_put_failure;
  789. if (nla_put_u8(skb, IFLA_GENEVE_TTL, geneve->ttl) ||
  790. nla_put_u8(skb, IFLA_GENEVE_TOS, geneve->tos))
  791. goto nla_put_failure;
  792. if (nla_put_be16(skb, IFLA_GENEVE_PORT, geneve->dst_port))
  793. goto nla_put_failure;
  794. if (geneve->collect_md) {
  795. if (nla_put_flag(skb, IFLA_GENEVE_COLLECT_METADATA))
  796. goto nla_put_failure;
  797. }
  798. return 0;
  799. nla_put_failure:
  800. return -EMSGSIZE;
  801. }
  802. static struct rtnl_link_ops geneve_link_ops __read_mostly = {
  803. .kind = "geneve",
  804. .maxtype = IFLA_GENEVE_MAX,
  805. .policy = geneve_policy,
  806. .priv_size = sizeof(struct geneve_dev),
  807. .setup = geneve_setup,
  808. .validate = geneve_validate,
  809. .newlink = geneve_newlink,
  810. .dellink = geneve_dellink,
  811. .get_size = geneve_get_size,
  812. .fill_info = geneve_fill_info,
  813. };
  814. struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
  815. u8 name_assign_type, u16 dst_port)
  816. {
  817. struct nlattr *tb[IFLA_MAX + 1];
  818. struct net_device *dev;
  819. int err;
  820. memset(tb, 0, sizeof(tb));
  821. dev = rtnl_create_link(net, name, name_assign_type,
  822. &geneve_link_ops, tb);
  823. if (IS_ERR(dev))
  824. return dev;
  825. err = geneve_configure(net, dev, 0, 0, 0, 0, htons(dst_port), true);
  826. if (err) {
  827. free_netdev(dev);
  828. return ERR_PTR(err);
  829. }
  830. return dev;
  831. }
  832. EXPORT_SYMBOL_GPL(geneve_dev_create_fb);
  833. static __net_init int geneve_init_net(struct net *net)
  834. {
  835. struct geneve_net *gn = net_generic(net, geneve_net_id);
  836. INIT_LIST_HEAD(&gn->geneve_list);
  837. INIT_LIST_HEAD(&gn->sock_list);
  838. return 0;
  839. }
  840. static void __net_exit geneve_exit_net(struct net *net)
  841. {
  842. struct geneve_net *gn = net_generic(net, geneve_net_id);
  843. struct geneve_dev *geneve, *next;
  844. struct net_device *dev, *aux;
  845. LIST_HEAD(list);
  846. rtnl_lock();
  847. /* gather any geneve devices that were moved into this ns */
  848. for_each_netdev_safe(net, dev, aux)
  849. if (dev->rtnl_link_ops == &geneve_link_ops)
  850. unregister_netdevice_queue(dev, &list);
  851. /* now gather any other geneve devices that were created in this ns */
  852. list_for_each_entry_safe(geneve, next, &gn->geneve_list, next) {
  853. /* If geneve->dev is in the same netns, it was already added
  854. * to the list by the previous loop.
  855. */
  856. if (!net_eq(dev_net(geneve->dev), net))
  857. unregister_netdevice_queue(geneve->dev, &list);
  858. }
  859. /* unregister the devices gathered above */
  860. unregister_netdevice_many(&list);
  861. rtnl_unlock();
  862. }
  863. static struct pernet_operations geneve_net_ops = {
  864. .init = geneve_init_net,
  865. .exit = geneve_exit_net,
  866. .id = &geneve_net_id,
  867. .size = sizeof(struct geneve_net),
  868. };
  869. static int __init geneve_init_module(void)
  870. {
  871. int rc;
  872. rc = register_pernet_subsys(&geneve_net_ops);
  873. if (rc)
  874. goto out1;
  875. rc = rtnl_link_register(&geneve_link_ops);
  876. if (rc)
  877. goto out2;
  878. return 0;
  879. out2:
  880. unregister_pernet_subsys(&geneve_net_ops);
  881. out1:
  882. return rc;
  883. }
  884. late_initcall(geneve_init_module);
  885. static void __exit geneve_cleanup_module(void)
  886. {
  887. rtnl_link_unregister(&geneve_link_ops);
  888. unregister_pernet_subsys(&geneve_net_ops);
  889. }
  890. module_exit(geneve_cleanup_module);
  891. MODULE_LICENSE("GPL");
  892. MODULE_VERSION(GENEVE_NETDEV_VER);
  893. MODULE_AUTHOR("John W. Linville <linville@tuxdriver.com>");
  894. MODULE_DESCRIPTION("Interface driver for GENEVE encapsulated traffic");
  895. MODULE_ALIAS_RTNL_LINK("geneve");