geneve.c 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058
  1. /*
  2. * GENEVE: Generic Network Virtualization Encapsulation
  3. *
  4. * Copyright (c) 2015 Red Hat, Inc.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  11. #include <linux/kernel.h>
  12. #include <linux/module.h>
  13. #include <linux/netdevice.h>
  14. #include <linux/etherdevice.h>
  15. #include <linux/hash.h>
  16. #include <net/dst_metadata.h>
  17. #include <net/gro_cells.h>
  18. #include <net/rtnetlink.h>
  19. #include <net/geneve.h>
  20. #include <net/protocol.h>
  21. #define GENEVE_NETDEV_VER "0.6"
  22. #define GENEVE_UDP_PORT 6081
  23. #define GENEVE_N_VID (1u << 24)
  24. #define GENEVE_VID_MASK (GENEVE_N_VID - 1)
  25. #define VNI_HASH_BITS 10
  26. #define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
  27. static bool log_ecn_error = true;
  28. module_param(log_ecn_error, bool, 0644);
  29. MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
  30. #define GENEVE_VER 0
  31. #define GENEVE_BASE_HLEN (sizeof(struct udphdr) + sizeof(struct genevehdr))
  32. /* per-network namespace private data for this module */
  33. struct geneve_net {
  34. struct list_head geneve_list;
  35. struct list_head sock_list;
  36. };
  37. static int geneve_net_id;
  38. /* Pseudo network device */
  39. struct geneve_dev {
  40. struct hlist_node hlist; /* vni hash table */
  41. struct net *net; /* netns for packet i/o */
  42. struct net_device *dev; /* netdev for geneve tunnel */
  43. struct geneve_sock *sock; /* socket used for geneve tunnel */
  44. u8 vni[3]; /* virtual network ID for tunnel */
  45. u8 ttl; /* TTL override */
  46. u8 tos; /* TOS override */
  47. struct sockaddr_in remote; /* IPv4 address for link partner */
  48. struct list_head next; /* geneve's per namespace list */
  49. __be16 dst_port;
  50. bool collect_md;
  51. struct gro_cells gro_cells;
  52. };
  53. struct geneve_sock {
  54. bool collect_md;
  55. struct list_head list;
  56. struct socket *sock;
  57. struct rcu_head rcu;
  58. int refcnt;
  59. struct udp_offload udp_offloads;
  60. struct hlist_head vni_list[VNI_HASH_SIZE];
  61. };
  62. static inline __u32 geneve_net_vni_hash(u8 vni[3])
  63. {
  64. __u32 vnid;
  65. vnid = (vni[0] << 16) | (vni[1] << 8) | vni[2];
  66. return hash_32(vnid, VNI_HASH_BITS);
  67. }
  68. static __be64 vni_to_tunnel_id(const __u8 *vni)
  69. {
  70. #ifdef __BIG_ENDIAN
  71. return (vni[0] << 16) | (vni[1] << 8) | vni[2];
  72. #else
  73. return (__force __be64)(((__force u64)vni[0] << 40) |
  74. ((__force u64)vni[1] << 48) |
  75. ((__force u64)vni[2] << 56));
  76. #endif
  77. }
  78. static struct geneve_dev *geneve_lookup(struct geneve_sock *gs,
  79. __be32 addr, u8 vni[])
  80. {
  81. struct hlist_head *vni_list_head;
  82. struct geneve_dev *geneve;
  83. __u32 hash;
  84. /* Find the device for this VNI */
  85. hash = geneve_net_vni_hash(vni);
  86. vni_list_head = &gs->vni_list[hash];
  87. hlist_for_each_entry_rcu(geneve, vni_list_head, hlist) {
  88. if (!memcmp(vni, geneve->vni, sizeof(geneve->vni)) &&
  89. addr == geneve->remote.sin_addr.s_addr)
  90. return geneve;
  91. }
  92. return NULL;
  93. }
  94. static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb)
  95. {
  96. return (struct genevehdr *)(udp_hdr(skb) + 1);
  97. }
  98. /* geneve receive/decap routine */
  99. static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb)
  100. {
  101. struct genevehdr *gnvh = geneve_hdr(skb);
  102. struct metadata_dst *tun_dst = NULL;
  103. struct geneve_dev *geneve = NULL;
  104. struct pcpu_sw_netstats *stats;
  105. struct iphdr *iph;
  106. u8 *vni;
  107. __be32 addr;
  108. int err;
  109. iph = ip_hdr(skb); /* outer IP header... */
  110. if (gs->collect_md) {
  111. static u8 zero_vni[3];
  112. vni = zero_vni;
  113. addr = 0;
  114. } else {
  115. vni = gnvh->vni;
  116. addr = iph->saddr;
  117. }
  118. geneve = geneve_lookup(gs, addr, vni);
  119. if (!geneve)
  120. goto drop;
  121. if (ip_tunnel_collect_metadata() || gs->collect_md) {
  122. __be16 flags;
  123. flags = TUNNEL_KEY | TUNNEL_GENEVE_OPT |
  124. (gnvh->oam ? TUNNEL_OAM : 0) |
  125. (gnvh->critical ? TUNNEL_CRIT_OPT : 0);
  126. tun_dst = udp_tun_rx_dst(skb, AF_INET, flags,
  127. vni_to_tunnel_id(gnvh->vni),
  128. gnvh->opt_len * 4);
  129. if (!tun_dst)
  130. goto drop;
  131. /* Update tunnel dst according to Geneve options. */
  132. ip_tunnel_info_opts_set(&tun_dst->u.tun_info,
  133. gnvh->options, gnvh->opt_len * 4);
  134. } else {
  135. /* Drop packets w/ critical options,
  136. * since we don't support any...
  137. */
  138. if (gnvh->critical)
  139. goto drop;
  140. }
  141. skb_reset_mac_header(skb);
  142. skb_scrub_packet(skb, !net_eq(geneve->net, dev_net(geneve->dev)));
  143. skb->protocol = eth_type_trans(skb, geneve->dev);
  144. skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
  145. if (tun_dst)
  146. skb_dst_set(skb, &tun_dst->dst);
  147. /* Ignore packet loops (and multicast echo) */
  148. if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr))
  149. goto drop;
  150. skb_reset_network_header(skb);
  151. err = IP_ECN_decapsulate(iph, skb);
  152. if (unlikely(err)) {
  153. if (log_ecn_error)
  154. net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
  155. &iph->saddr, iph->tos);
  156. if (err > 1) {
  157. ++geneve->dev->stats.rx_frame_errors;
  158. ++geneve->dev->stats.rx_errors;
  159. goto drop;
  160. }
  161. }
  162. stats = this_cpu_ptr(geneve->dev->tstats);
  163. u64_stats_update_begin(&stats->syncp);
  164. stats->rx_packets++;
  165. stats->rx_bytes += skb->len;
  166. u64_stats_update_end(&stats->syncp);
  167. gro_cells_receive(&geneve->gro_cells, skb);
  168. return;
  169. drop:
  170. /* Consume bad packet */
  171. kfree_skb(skb);
  172. }
  173. /* Setup stats when device is created */
  174. static int geneve_init(struct net_device *dev)
  175. {
  176. struct geneve_dev *geneve = netdev_priv(dev);
  177. int err;
  178. dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
  179. if (!dev->tstats)
  180. return -ENOMEM;
  181. err = gro_cells_init(&geneve->gro_cells, dev);
  182. if (err) {
  183. free_percpu(dev->tstats);
  184. return err;
  185. }
  186. return 0;
  187. }
  188. static void geneve_uninit(struct net_device *dev)
  189. {
  190. struct geneve_dev *geneve = netdev_priv(dev);
  191. gro_cells_destroy(&geneve->gro_cells);
  192. free_percpu(dev->tstats);
  193. }
  194. /* Callback from net/ipv4/udp.c to receive packets */
  195. static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
  196. {
  197. struct genevehdr *geneveh;
  198. struct geneve_sock *gs;
  199. int opts_len;
  200. /* Need Geneve and inner Ethernet header to be present */
  201. if (unlikely(!pskb_may_pull(skb, GENEVE_BASE_HLEN)))
  202. goto error;
  203. /* Return packets with reserved bits set */
  204. geneveh = geneve_hdr(skb);
  205. if (unlikely(geneveh->ver != GENEVE_VER))
  206. goto error;
  207. if (unlikely(geneveh->proto_type != htons(ETH_P_TEB)))
  208. goto error;
  209. opts_len = geneveh->opt_len * 4;
  210. if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len,
  211. htons(ETH_P_TEB)))
  212. goto drop;
  213. gs = rcu_dereference_sk_user_data(sk);
  214. if (!gs)
  215. goto drop;
  216. geneve_rx(gs, skb);
  217. return 0;
  218. drop:
  219. /* Consume bad packet */
  220. kfree_skb(skb);
  221. return 0;
  222. error:
  223. /* Let the UDP layer deal with the skb */
  224. return 1;
  225. }
  226. static struct socket *geneve_create_sock(struct net *net, bool ipv6,
  227. __be16 port)
  228. {
  229. struct socket *sock;
  230. struct udp_port_cfg udp_conf;
  231. int err;
  232. memset(&udp_conf, 0, sizeof(udp_conf));
  233. if (ipv6) {
  234. udp_conf.family = AF_INET6;
  235. } else {
  236. udp_conf.family = AF_INET;
  237. udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
  238. }
  239. udp_conf.local_udp_port = port;
  240. /* Open UDP socket */
  241. err = udp_sock_create(net, &udp_conf, &sock);
  242. if (err < 0)
  243. return ERR_PTR(err);
  244. return sock;
  245. }
  246. static void geneve_notify_add_rx_port(struct geneve_sock *gs)
  247. {
  248. struct sock *sk = gs->sock->sk;
  249. sa_family_t sa_family = sk->sk_family;
  250. int err;
  251. if (sa_family == AF_INET) {
  252. err = udp_add_offload(&gs->udp_offloads);
  253. if (err)
  254. pr_warn("geneve: udp_add_offload failed with status %d\n",
  255. err);
  256. }
  257. }
  258. static int geneve_hlen(struct genevehdr *gh)
  259. {
  260. return sizeof(*gh) + gh->opt_len * 4;
  261. }
  262. static struct sk_buff **geneve_gro_receive(struct sk_buff **head,
  263. struct sk_buff *skb,
  264. struct udp_offload *uoff)
  265. {
  266. struct sk_buff *p, **pp = NULL;
  267. struct genevehdr *gh, *gh2;
  268. unsigned int hlen, gh_len, off_gnv;
  269. const struct packet_offload *ptype;
  270. __be16 type;
  271. int flush = 1;
  272. off_gnv = skb_gro_offset(skb);
  273. hlen = off_gnv + sizeof(*gh);
  274. gh = skb_gro_header_fast(skb, off_gnv);
  275. if (skb_gro_header_hard(skb, hlen)) {
  276. gh = skb_gro_header_slow(skb, hlen, off_gnv);
  277. if (unlikely(!gh))
  278. goto out;
  279. }
  280. if (gh->ver != GENEVE_VER || gh->oam)
  281. goto out;
  282. gh_len = geneve_hlen(gh);
  283. hlen = off_gnv + gh_len;
  284. if (skb_gro_header_hard(skb, hlen)) {
  285. gh = skb_gro_header_slow(skb, hlen, off_gnv);
  286. if (unlikely(!gh))
  287. goto out;
  288. }
  289. flush = 0;
  290. for (p = *head; p; p = p->next) {
  291. if (!NAPI_GRO_CB(p)->same_flow)
  292. continue;
  293. gh2 = (struct genevehdr *)(p->data + off_gnv);
  294. if (gh->opt_len != gh2->opt_len ||
  295. memcmp(gh, gh2, gh_len)) {
  296. NAPI_GRO_CB(p)->same_flow = 0;
  297. continue;
  298. }
  299. }
  300. type = gh->proto_type;
  301. rcu_read_lock();
  302. ptype = gro_find_receive_by_type(type);
  303. if (!ptype) {
  304. flush = 1;
  305. goto out_unlock;
  306. }
  307. skb_gro_pull(skb, gh_len);
  308. skb_gro_postpull_rcsum(skb, gh, gh_len);
  309. pp = ptype->callbacks.gro_receive(head, skb);
  310. out_unlock:
  311. rcu_read_unlock();
  312. out:
  313. NAPI_GRO_CB(skb)->flush |= flush;
  314. return pp;
  315. }
  316. static int geneve_gro_complete(struct sk_buff *skb, int nhoff,
  317. struct udp_offload *uoff)
  318. {
  319. struct genevehdr *gh;
  320. struct packet_offload *ptype;
  321. __be16 type;
  322. int gh_len;
  323. int err = -ENOSYS;
  324. udp_tunnel_gro_complete(skb, nhoff);
  325. gh = (struct genevehdr *)(skb->data + nhoff);
  326. gh_len = geneve_hlen(gh);
  327. type = gh->proto_type;
  328. rcu_read_lock();
  329. ptype = gro_find_complete_by_type(type);
  330. if (ptype)
  331. err = ptype->callbacks.gro_complete(skb, nhoff + gh_len);
  332. rcu_read_unlock();
  333. return err;
  334. }
  335. /* Create new listen socket if needed */
  336. static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port,
  337. bool ipv6)
  338. {
  339. struct geneve_net *gn = net_generic(net, geneve_net_id);
  340. struct geneve_sock *gs;
  341. struct socket *sock;
  342. struct udp_tunnel_sock_cfg tunnel_cfg;
  343. int h;
  344. gs = kzalloc(sizeof(*gs), GFP_KERNEL);
  345. if (!gs)
  346. return ERR_PTR(-ENOMEM);
  347. sock = geneve_create_sock(net, ipv6, port);
  348. if (IS_ERR(sock)) {
  349. kfree(gs);
  350. return ERR_CAST(sock);
  351. }
  352. gs->sock = sock;
  353. gs->refcnt = 1;
  354. for (h = 0; h < VNI_HASH_SIZE; ++h)
  355. INIT_HLIST_HEAD(&gs->vni_list[h]);
  356. /* Initialize the geneve udp offloads structure */
  357. gs->udp_offloads.port = port;
  358. gs->udp_offloads.callbacks.gro_receive = geneve_gro_receive;
  359. gs->udp_offloads.callbacks.gro_complete = geneve_gro_complete;
  360. geneve_notify_add_rx_port(gs);
  361. /* Mark socket as an encapsulation socket */
  362. tunnel_cfg.sk_user_data = gs;
  363. tunnel_cfg.encap_type = 1;
  364. tunnel_cfg.encap_rcv = geneve_udp_encap_recv;
  365. tunnel_cfg.encap_destroy = NULL;
  366. setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
  367. list_add(&gs->list, &gn->sock_list);
  368. return gs;
  369. }
  370. static void geneve_notify_del_rx_port(struct geneve_sock *gs)
  371. {
  372. struct sock *sk = gs->sock->sk;
  373. sa_family_t sa_family = sk->sk_family;
  374. if (sa_family == AF_INET)
  375. udp_del_offload(&gs->udp_offloads);
  376. }
  377. static void geneve_sock_release(struct geneve_sock *gs)
  378. {
  379. if (--gs->refcnt)
  380. return;
  381. list_del(&gs->list);
  382. geneve_notify_del_rx_port(gs);
  383. udp_tunnel_sock_release(gs->sock);
  384. kfree_rcu(gs, rcu);
  385. }
  386. static struct geneve_sock *geneve_find_sock(struct geneve_net *gn,
  387. __be16 dst_port)
  388. {
  389. struct geneve_sock *gs;
  390. list_for_each_entry(gs, &gn->sock_list, list) {
  391. if (inet_sk(gs->sock->sk)->inet_sport == dst_port &&
  392. inet_sk(gs->sock->sk)->sk.sk_family == AF_INET) {
  393. return gs;
  394. }
  395. }
  396. return NULL;
  397. }
  398. static int geneve_open(struct net_device *dev)
  399. {
  400. struct geneve_dev *geneve = netdev_priv(dev);
  401. struct net *net = geneve->net;
  402. struct geneve_net *gn = net_generic(net, geneve_net_id);
  403. struct geneve_sock *gs;
  404. __u32 hash;
  405. gs = geneve_find_sock(gn, geneve->dst_port);
  406. if (gs) {
  407. gs->refcnt++;
  408. goto out;
  409. }
  410. gs = geneve_socket_create(net, geneve->dst_port, false);
  411. if (IS_ERR(gs))
  412. return PTR_ERR(gs);
  413. out:
  414. gs->collect_md = geneve->collect_md;
  415. geneve->sock = gs;
  416. hash = geneve_net_vni_hash(geneve->vni);
  417. hlist_add_head_rcu(&geneve->hlist, &gs->vni_list[hash]);
  418. return 0;
  419. }
  420. static int geneve_stop(struct net_device *dev)
  421. {
  422. struct geneve_dev *geneve = netdev_priv(dev);
  423. struct geneve_sock *gs = geneve->sock;
  424. if (!hlist_unhashed(&geneve->hlist))
  425. hlist_del_rcu(&geneve->hlist);
  426. geneve_sock_release(gs);
  427. return 0;
  428. }
  429. static int geneve_build_skb(struct rtable *rt, struct sk_buff *skb,
  430. __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
  431. bool csum)
  432. {
  433. struct genevehdr *gnvh;
  434. int min_headroom;
  435. int err;
  436. min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
  437. + GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr);
  438. err = skb_cow_head(skb, min_headroom);
  439. if (unlikely(err)) {
  440. kfree_skb(skb);
  441. goto free_rt;
  442. }
  443. skb = udp_tunnel_handle_offloads(skb, csum);
  444. if (IS_ERR(skb)) {
  445. err = PTR_ERR(skb);
  446. goto free_rt;
  447. }
  448. gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
  449. gnvh->ver = GENEVE_VER;
  450. gnvh->opt_len = opt_len / 4;
  451. gnvh->oam = !!(tun_flags & TUNNEL_OAM);
  452. gnvh->critical = !!(tun_flags & TUNNEL_CRIT_OPT);
  453. gnvh->rsvd1 = 0;
  454. memcpy(gnvh->vni, vni, 3);
  455. gnvh->proto_type = htons(ETH_P_TEB);
  456. gnvh->rsvd2 = 0;
  457. memcpy(gnvh->options, opt, opt_len);
  458. skb_set_inner_protocol(skb, htons(ETH_P_TEB));
  459. return 0;
  460. free_rt:
  461. ip_rt_put(rt);
  462. return err;
  463. }
  464. static struct rtable *geneve_get_rt(struct sk_buff *skb,
  465. struct net_device *dev,
  466. struct flowi4 *fl4,
  467. struct ip_tunnel_info *info)
  468. {
  469. struct geneve_dev *geneve = netdev_priv(dev);
  470. struct rtable *rt = NULL;
  471. __u8 tos;
  472. memset(fl4, 0, sizeof(*fl4));
  473. fl4->flowi4_mark = skb->mark;
  474. fl4->flowi4_proto = IPPROTO_UDP;
  475. if (info) {
  476. fl4->daddr = info->key.u.ipv4.dst;
  477. fl4->saddr = info->key.u.ipv4.src;
  478. fl4->flowi4_tos = RT_TOS(info->key.tos);
  479. } else {
  480. tos = geneve->tos;
  481. if (tos == 1) {
  482. const struct iphdr *iip = ip_hdr(skb);
  483. tos = ip_tunnel_get_dsfield(iip, skb);
  484. }
  485. fl4->flowi4_tos = RT_TOS(tos);
  486. fl4->daddr = geneve->remote.sin_addr.s_addr;
  487. }
  488. rt = ip_route_output_key(geneve->net, fl4);
  489. if (IS_ERR(rt)) {
  490. netdev_dbg(dev, "no route to %pI4\n", &fl4->daddr);
  491. dev->stats.tx_carrier_errors++;
  492. return rt;
  493. }
  494. if (rt->dst.dev == dev) { /* is this necessary? */
  495. netdev_dbg(dev, "circular route to %pI4\n", &fl4->daddr);
  496. dev->stats.collisions++;
  497. ip_rt_put(rt);
  498. return ERR_PTR(-EINVAL);
  499. }
  500. return rt;
  501. }
  502. /* Convert 64 bit tunnel ID to 24 bit VNI. */
  503. static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni)
  504. {
  505. #ifdef __BIG_ENDIAN
  506. vni[0] = (__force __u8)(tun_id >> 16);
  507. vni[1] = (__force __u8)(tun_id >> 8);
  508. vni[2] = (__force __u8)tun_id;
  509. #else
  510. vni[0] = (__force __u8)((__force u64)tun_id >> 40);
  511. vni[1] = (__force __u8)((__force u64)tun_id >> 48);
  512. vni[2] = (__force __u8)((__force u64)tun_id >> 56);
  513. #endif
  514. }
  515. static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
  516. {
  517. struct geneve_dev *geneve = netdev_priv(dev);
  518. struct geneve_sock *gs = geneve->sock;
  519. struct ip_tunnel_info *info = NULL;
  520. struct rtable *rt = NULL;
  521. const struct iphdr *iip; /* interior IP header */
  522. struct flowi4 fl4;
  523. __u8 tos, ttl;
  524. __be16 sport;
  525. bool udp_csum;
  526. __be16 df;
  527. int err;
  528. if (geneve->collect_md) {
  529. info = skb_tunnel_info(skb);
  530. if (unlikely(info && !(info->mode & IP_TUNNEL_INFO_TX))) {
  531. netdev_dbg(dev, "no tunnel metadata\n");
  532. goto tx_error;
  533. }
  534. if (info && ip_tunnel_info_af(info) != AF_INET)
  535. goto tx_error;
  536. }
  537. rt = geneve_get_rt(skb, dev, &fl4, info);
  538. if (IS_ERR(rt)) {
  539. netdev_dbg(dev, "no route to %pI4\n", &fl4.daddr);
  540. dev->stats.tx_carrier_errors++;
  541. goto tx_error;
  542. }
  543. sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
  544. skb_reset_mac_header(skb);
  545. iip = ip_hdr(skb);
  546. if (info) {
  547. const struct ip_tunnel_key *key = &info->key;
  548. u8 *opts = NULL;
  549. u8 vni[3];
  550. tunnel_id_to_vni(key->tun_id, vni);
  551. if (key->tun_flags & TUNNEL_GENEVE_OPT)
  552. opts = ip_tunnel_info_opts(info);
  553. udp_csum = !!(key->tun_flags & TUNNEL_CSUM);
  554. err = geneve_build_skb(rt, skb, key->tun_flags, vni,
  555. info->options_len, opts, udp_csum);
  556. if (unlikely(err))
  557. goto err;
  558. tos = ip_tunnel_ecn_encap(key->tos, iip, skb);
  559. ttl = key->ttl;
  560. df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
  561. } else {
  562. udp_csum = false;
  563. err = geneve_build_skb(rt, skb, 0, geneve->vni,
  564. 0, NULL, udp_csum);
  565. if (unlikely(err))
  566. goto err;
  567. tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb);
  568. ttl = geneve->ttl;
  569. if (!ttl && IN_MULTICAST(ntohl(fl4.daddr)))
  570. ttl = 1;
  571. ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
  572. df = 0;
  573. }
  574. err = udp_tunnel_xmit_skb(rt, gs->sock->sk, skb, fl4.saddr, fl4.daddr,
  575. tos, ttl, df, sport, geneve->dst_port,
  576. !net_eq(geneve->net, dev_net(geneve->dev)),
  577. !udp_csum);
  578. iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
  579. return NETDEV_TX_OK;
  580. tx_error:
  581. dev_kfree_skb(skb);
  582. err:
  583. dev->stats.tx_errors++;
  584. return NETDEV_TX_OK;
  585. }
  586. static const struct net_device_ops geneve_netdev_ops = {
  587. .ndo_init = geneve_init,
  588. .ndo_uninit = geneve_uninit,
  589. .ndo_open = geneve_open,
  590. .ndo_stop = geneve_stop,
  591. .ndo_start_xmit = geneve_xmit,
  592. .ndo_get_stats64 = ip_tunnel_get_stats64,
  593. .ndo_change_mtu = eth_change_mtu,
  594. .ndo_validate_addr = eth_validate_addr,
  595. .ndo_set_mac_address = eth_mac_addr,
  596. };
  597. static void geneve_get_drvinfo(struct net_device *dev,
  598. struct ethtool_drvinfo *drvinfo)
  599. {
  600. strlcpy(drvinfo->version, GENEVE_NETDEV_VER, sizeof(drvinfo->version));
  601. strlcpy(drvinfo->driver, "geneve", sizeof(drvinfo->driver));
  602. }
  603. static const struct ethtool_ops geneve_ethtool_ops = {
  604. .get_drvinfo = geneve_get_drvinfo,
  605. .get_link = ethtool_op_get_link,
  606. };
  607. /* Info for udev, that this is a virtual tunnel endpoint */
  608. static struct device_type geneve_type = {
  609. .name = "geneve",
  610. };
  611. /* Initialize the device structure. */
  612. static void geneve_setup(struct net_device *dev)
  613. {
  614. ether_setup(dev);
  615. dev->netdev_ops = &geneve_netdev_ops;
  616. dev->ethtool_ops = &geneve_ethtool_ops;
  617. dev->destructor = free_netdev;
  618. SET_NETDEV_DEVTYPE(dev, &geneve_type);
  619. dev->features |= NETIF_F_LLTX;
  620. dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
  621. dev->features |= NETIF_F_RXCSUM;
  622. dev->features |= NETIF_F_GSO_SOFTWARE;
  623. dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
  624. dev->hw_features |= NETIF_F_GSO_SOFTWARE;
  625. netif_keep_dst(dev);
  626. dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
  627. eth_hw_addr_random(dev);
  628. }
  629. static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = {
  630. [IFLA_GENEVE_ID] = { .type = NLA_U32 },
  631. [IFLA_GENEVE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
  632. [IFLA_GENEVE_TTL] = { .type = NLA_U8 },
  633. [IFLA_GENEVE_TOS] = { .type = NLA_U8 },
  634. [IFLA_GENEVE_PORT] = { .type = NLA_U16 },
  635. [IFLA_GENEVE_COLLECT_METADATA] = { .type = NLA_FLAG },
  636. };
  637. static int geneve_validate(struct nlattr *tb[], struct nlattr *data[])
  638. {
  639. if (tb[IFLA_ADDRESS]) {
  640. if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
  641. return -EINVAL;
  642. if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
  643. return -EADDRNOTAVAIL;
  644. }
  645. if (!data)
  646. return -EINVAL;
  647. if (data[IFLA_GENEVE_ID]) {
  648. __u32 vni = nla_get_u32(data[IFLA_GENEVE_ID]);
  649. if (vni >= GENEVE_VID_MASK)
  650. return -ERANGE;
  651. }
  652. return 0;
  653. }
  654. static struct geneve_dev *geneve_find_dev(struct geneve_net *gn,
  655. __be16 dst_port,
  656. __be32 rem_addr,
  657. u8 vni[],
  658. bool *tun_on_same_port,
  659. bool *tun_collect_md)
  660. {
  661. struct geneve_dev *geneve, *t;
  662. *tun_on_same_port = false;
  663. *tun_collect_md = false;
  664. t = NULL;
  665. list_for_each_entry(geneve, &gn->geneve_list, next) {
  666. if (geneve->dst_port == dst_port) {
  667. *tun_collect_md = geneve->collect_md;
  668. *tun_on_same_port = true;
  669. }
  670. if (!memcmp(vni, geneve->vni, sizeof(geneve->vni)) &&
  671. rem_addr == geneve->remote.sin_addr.s_addr &&
  672. dst_port == geneve->dst_port)
  673. t = geneve;
  674. }
  675. return t;
  676. }
  677. static int geneve_configure(struct net *net, struct net_device *dev,
  678. __be32 rem_addr, __u32 vni, __u8 ttl, __u8 tos,
  679. __be16 dst_port, bool metadata)
  680. {
  681. struct geneve_net *gn = net_generic(net, geneve_net_id);
  682. struct geneve_dev *t, *geneve = netdev_priv(dev);
  683. bool tun_collect_md, tun_on_same_port;
  684. int err;
  685. if (metadata) {
  686. if (rem_addr || vni || tos || ttl)
  687. return -EINVAL;
  688. }
  689. geneve->net = net;
  690. geneve->dev = dev;
  691. geneve->vni[0] = (vni & 0x00ff0000) >> 16;
  692. geneve->vni[1] = (vni & 0x0000ff00) >> 8;
  693. geneve->vni[2] = vni & 0x000000ff;
  694. geneve->remote.sin_addr.s_addr = rem_addr;
  695. if (IN_MULTICAST(ntohl(geneve->remote.sin_addr.s_addr)))
  696. return -EINVAL;
  697. geneve->ttl = ttl;
  698. geneve->tos = tos;
  699. geneve->dst_port = dst_port;
  700. geneve->collect_md = metadata;
  701. t = geneve_find_dev(gn, dst_port, rem_addr, geneve->vni,
  702. &tun_on_same_port, &tun_collect_md);
  703. if (t)
  704. return -EBUSY;
  705. if (metadata) {
  706. if (tun_on_same_port)
  707. return -EPERM;
  708. } else {
  709. if (tun_collect_md)
  710. return -EPERM;
  711. }
  712. err = register_netdevice(dev);
  713. if (err)
  714. return err;
  715. list_add(&geneve->next, &gn->geneve_list);
  716. return 0;
  717. }
  718. static int geneve_newlink(struct net *net, struct net_device *dev,
  719. struct nlattr *tb[], struct nlattr *data[])
  720. {
  721. __be16 dst_port = htons(GENEVE_UDP_PORT);
  722. __u8 ttl = 0, tos = 0;
  723. bool metadata = false;
  724. __be32 rem_addr;
  725. __u32 vni;
  726. if (!data[IFLA_GENEVE_ID] || !data[IFLA_GENEVE_REMOTE])
  727. return -EINVAL;
  728. vni = nla_get_u32(data[IFLA_GENEVE_ID]);
  729. rem_addr = nla_get_in_addr(data[IFLA_GENEVE_REMOTE]);
  730. if (data[IFLA_GENEVE_TTL])
  731. ttl = nla_get_u8(data[IFLA_GENEVE_TTL]);
  732. if (data[IFLA_GENEVE_TOS])
  733. tos = nla_get_u8(data[IFLA_GENEVE_TOS]);
  734. if (data[IFLA_GENEVE_PORT])
  735. dst_port = nla_get_be16(data[IFLA_GENEVE_PORT]);
  736. if (data[IFLA_GENEVE_COLLECT_METADATA])
  737. metadata = true;
  738. return geneve_configure(net, dev, rem_addr, vni,
  739. ttl, tos, dst_port, metadata);
  740. }
  741. static void geneve_dellink(struct net_device *dev, struct list_head *head)
  742. {
  743. struct geneve_dev *geneve = netdev_priv(dev);
  744. list_del(&geneve->next);
  745. unregister_netdevice_queue(dev, head);
  746. }
  747. static size_t geneve_get_size(const struct net_device *dev)
  748. {
  749. return nla_total_size(sizeof(__u32)) + /* IFLA_GENEVE_ID */
  750. nla_total_size(sizeof(struct in_addr)) + /* IFLA_GENEVE_REMOTE */
  751. nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL */
  752. nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TOS */
  753. nla_total_size(sizeof(__be16)) + /* IFLA_GENEVE_PORT */
  754. nla_total_size(0) + /* IFLA_GENEVE_COLLECT_METADATA */
  755. 0;
  756. }
  757. static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
  758. {
  759. struct geneve_dev *geneve = netdev_priv(dev);
  760. __u32 vni;
  761. vni = (geneve->vni[0] << 16) | (geneve->vni[1] << 8) | geneve->vni[2];
  762. if (nla_put_u32(skb, IFLA_GENEVE_ID, vni))
  763. goto nla_put_failure;
  764. if (nla_put_in_addr(skb, IFLA_GENEVE_REMOTE,
  765. geneve->remote.sin_addr.s_addr))
  766. goto nla_put_failure;
  767. if (nla_put_u8(skb, IFLA_GENEVE_TTL, geneve->ttl) ||
  768. nla_put_u8(skb, IFLA_GENEVE_TOS, geneve->tos))
  769. goto nla_put_failure;
  770. if (nla_put_be16(skb, IFLA_GENEVE_PORT, geneve->dst_port))
  771. goto nla_put_failure;
  772. if (geneve->collect_md) {
  773. if (nla_put_flag(skb, IFLA_GENEVE_COLLECT_METADATA))
  774. goto nla_put_failure;
  775. }
  776. return 0;
  777. nla_put_failure:
  778. return -EMSGSIZE;
  779. }
  780. static struct rtnl_link_ops geneve_link_ops __read_mostly = {
  781. .kind = "geneve",
  782. .maxtype = IFLA_GENEVE_MAX,
  783. .policy = geneve_policy,
  784. .priv_size = sizeof(struct geneve_dev),
  785. .setup = geneve_setup,
  786. .validate = geneve_validate,
  787. .newlink = geneve_newlink,
  788. .dellink = geneve_dellink,
  789. .get_size = geneve_get_size,
  790. .fill_info = geneve_fill_info,
  791. };
  792. struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
  793. u8 name_assign_type, u16 dst_port)
  794. {
  795. struct nlattr *tb[IFLA_MAX + 1];
  796. struct net_device *dev;
  797. int err;
  798. memset(tb, 0, sizeof(tb));
  799. dev = rtnl_create_link(net, name, name_assign_type,
  800. &geneve_link_ops, tb);
  801. if (IS_ERR(dev))
  802. return dev;
  803. err = geneve_configure(net, dev, 0, 0, 0, 0, htons(dst_port), true);
  804. if (err) {
  805. free_netdev(dev);
  806. return ERR_PTR(err);
  807. }
  808. return dev;
  809. }
  810. EXPORT_SYMBOL_GPL(geneve_dev_create_fb);
  811. static __net_init int geneve_init_net(struct net *net)
  812. {
  813. struct geneve_net *gn = net_generic(net, geneve_net_id);
  814. INIT_LIST_HEAD(&gn->geneve_list);
  815. INIT_LIST_HEAD(&gn->sock_list);
  816. return 0;
  817. }
  818. static void __net_exit geneve_exit_net(struct net *net)
  819. {
  820. struct geneve_net *gn = net_generic(net, geneve_net_id);
  821. struct geneve_dev *geneve, *next;
  822. struct net_device *dev, *aux;
  823. LIST_HEAD(list);
  824. rtnl_lock();
  825. /* gather any geneve devices that were moved into this ns */
  826. for_each_netdev_safe(net, dev, aux)
  827. if (dev->rtnl_link_ops == &geneve_link_ops)
  828. unregister_netdevice_queue(dev, &list);
  829. /* now gather any other geneve devices that were created in this ns */
  830. list_for_each_entry_safe(geneve, next, &gn->geneve_list, next) {
  831. /* If geneve->dev is in the same netns, it was already added
  832. * to the list by the previous loop.
  833. */
  834. if (!net_eq(dev_net(geneve->dev), net))
  835. unregister_netdevice_queue(geneve->dev, &list);
  836. }
  837. /* unregister the devices gathered above */
  838. unregister_netdevice_many(&list);
  839. rtnl_unlock();
  840. }
  841. static struct pernet_operations geneve_net_ops = {
  842. .init = geneve_init_net,
  843. .exit = geneve_exit_net,
  844. .id = &geneve_net_id,
  845. .size = sizeof(struct geneve_net),
  846. };
  847. static int __init geneve_init_module(void)
  848. {
  849. int rc;
  850. rc = register_pernet_subsys(&geneve_net_ops);
  851. if (rc)
  852. goto out1;
  853. rc = rtnl_link_register(&geneve_link_ops);
  854. if (rc)
  855. goto out2;
  856. return 0;
  857. out2:
  858. unregister_pernet_subsys(&geneve_net_ops);
  859. out1:
  860. return rc;
  861. }
  862. late_initcall(geneve_init_module);
  863. static void __exit geneve_cleanup_module(void)
  864. {
  865. rtnl_link_unregister(&geneve_link_ops);
  866. unregister_pernet_subsys(&geneve_net_ops);
  867. }
  868. module_exit(geneve_cleanup_module);
  869. MODULE_LICENSE("GPL");
  870. MODULE_VERSION(GENEVE_NETDEV_VER);
  871. MODULE_AUTHOR("John W. Linville <linville@tuxdriver.com>");
  872. MODULE_DESCRIPTION("Interface driver for GENEVE encapsulated traffic");
  873. MODULE_ALIAS_RTNL_LINK("geneve");