vxlan.c 79 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163
  1. /*
  2. * VXLAN: Virtual eXtensible Local Area Network
  3. *
  4. * Copyright (c) 2012-2013 Vyatta Inc.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  11. #include <linux/kernel.h>
  12. #include <linux/types.h>
  13. #include <linux/module.h>
  14. #include <linux/errno.h>
  15. #include <linux/slab.h>
  16. #include <linux/skbuff.h>
  17. #include <linux/rculist.h>
  18. #include <linux/netdevice.h>
  19. #include <linux/in.h>
  20. #include <linux/ip.h>
  21. #include <linux/udp.h>
  22. #include <linux/igmp.h>
  23. #include <linux/etherdevice.h>
  24. #include <linux/if_ether.h>
  25. #include <linux/if_vlan.h>
  26. #include <linux/hash.h>
  27. #include <linux/ethtool.h>
  28. #include <net/arp.h>
  29. #include <net/ndisc.h>
  30. #include <net/ip.h>
  31. #include <net/ip_tunnels.h>
  32. #include <net/icmp.h>
  33. #include <net/udp.h>
  34. #include <net/udp_tunnel.h>
  35. #include <net/rtnetlink.h>
  36. #include <net/route.h>
  37. #include <net/dsfield.h>
  38. #include <net/inet_ecn.h>
  39. #include <net/net_namespace.h>
  40. #include <net/netns/generic.h>
  41. #include <net/vxlan.h>
  42. #include <net/protocol.h>
  43. #include <net/udp_tunnel.h>
  44. #if IS_ENABLED(CONFIG_IPV6)
  45. #include <net/ipv6.h>
  46. #include <net/addrconf.h>
  47. #include <net/ip6_tunnel.h>
  48. #include <net/ip6_checksum.h>
  49. #endif
  50. #include <net/dst_metadata.h>
  51. #define VXLAN_VERSION "0.1"
  52. #define PORT_HASH_BITS 8
  53. #define PORT_HASH_SIZE (1<<PORT_HASH_BITS)
  54. #define FDB_AGE_DEFAULT 300 /* 5 min */
  55. #define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
  56. /* UDP port for VXLAN traffic.
  57. * The IANA assigned port is 4789, but the Linux default is 8472
  58. * for compatibility with early adopters.
  59. */
  60. static unsigned short vxlan_port __read_mostly = 8472;
  61. module_param_named(udp_port, vxlan_port, ushort, 0444);
  62. MODULE_PARM_DESC(udp_port, "Destination UDP port");
  63. static bool log_ecn_error = true;
  64. module_param(log_ecn_error, bool, 0644);
  65. MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
  66. static int vxlan_net_id;
  67. static struct rtnl_link_ops vxlan_link_ops;
  68. static const u8 all_zeros_mac[ETH_ALEN];
  69. static struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
  70. bool no_share, u32 flags);
  71. /* per-network namespace private data for this module */
  72. struct vxlan_net {
  73. struct list_head vxlan_list;
  74. struct hlist_head sock_list[PORT_HASH_SIZE];
  75. spinlock_t sock_lock;
  76. };
  77. /* Forwarding table entry */
  78. struct vxlan_fdb {
  79. struct hlist_node hlist; /* linked list of entries */
  80. struct rcu_head rcu;
  81. unsigned long updated; /* jiffies */
  82. unsigned long used;
  83. struct list_head remotes;
  84. u8 eth_addr[ETH_ALEN];
  85. u16 state; /* see ndm_state */
  86. u8 flags; /* see ndm_flags */
  87. };
  88. /* salt for hash table */
  89. static u32 vxlan_salt __read_mostly;
  90. static struct workqueue_struct *vxlan_wq;
  91. static inline bool vxlan_collect_metadata(struct vxlan_sock *vs)
  92. {
  93. return vs->flags & VXLAN_F_COLLECT_METADATA ||
  94. ip_tunnel_collect_metadata();
  95. }
  96. #if IS_ENABLED(CONFIG_IPV6)
  97. static inline
  98. bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
  99. {
  100. if (a->sa.sa_family != b->sa.sa_family)
  101. return false;
  102. if (a->sa.sa_family == AF_INET6)
  103. return ipv6_addr_equal(&a->sin6.sin6_addr, &b->sin6.sin6_addr);
  104. else
  105. return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
  106. }
  107. static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
  108. {
  109. if (ipa->sa.sa_family == AF_INET6)
  110. return ipv6_addr_any(&ipa->sin6.sin6_addr);
  111. else
  112. return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
  113. }
  114. static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
  115. {
  116. if (ipa->sa.sa_family == AF_INET6)
  117. return ipv6_addr_is_multicast(&ipa->sin6.sin6_addr);
  118. else
  119. return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
  120. }
  121. static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
  122. {
  123. if (nla_len(nla) >= sizeof(struct in6_addr)) {
  124. ip->sin6.sin6_addr = nla_get_in6_addr(nla);
  125. ip->sa.sa_family = AF_INET6;
  126. return 0;
  127. } else if (nla_len(nla) >= sizeof(__be32)) {
  128. ip->sin.sin_addr.s_addr = nla_get_in_addr(nla);
  129. ip->sa.sa_family = AF_INET;
  130. return 0;
  131. } else {
  132. return -EAFNOSUPPORT;
  133. }
  134. }
  135. static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
  136. const union vxlan_addr *ip)
  137. {
  138. if (ip->sa.sa_family == AF_INET6)
  139. return nla_put_in6_addr(skb, attr, &ip->sin6.sin6_addr);
  140. else
  141. return nla_put_in_addr(skb, attr, ip->sin.sin_addr.s_addr);
  142. }
  143. #else /* !CONFIG_IPV6 */
  144. static inline
  145. bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
  146. {
  147. return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
  148. }
  149. static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
  150. {
  151. return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
  152. }
  153. static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
  154. {
  155. return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
  156. }
  157. static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
  158. {
  159. if (nla_len(nla) >= sizeof(struct in6_addr)) {
  160. return -EAFNOSUPPORT;
  161. } else if (nla_len(nla) >= sizeof(__be32)) {
  162. ip->sin.sin_addr.s_addr = nla_get_in_addr(nla);
  163. ip->sa.sa_family = AF_INET;
  164. return 0;
  165. } else {
  166. return -EAFNOSUPPORT;
  167. }
  168. }
  169. static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
  170. const union vxlan_addr *ip)
  171. {
  172. return nla_put_in_addr(skb, attr, ip->sin.sin_addr.s_addr);
  173. }
  174. #endif
  175. /* Virtual Network hash table head */
  176. static inline struct hlist_head *vni_head(struct vxlan_sock *vs, u32 id)
  177. {
  178. return &vs->vni_list[hash_32(id, VNI_HASH_BITS)];
  179. }
  180. /* Socket hash table head */
  181. static inline struct hlist_head *vs_head(struct net *net, __be16 port)
  182. {
  183. struct vxlan_net *vn = net_generic(net, vxlan_net_id);
  184. return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)];
  185. }
  186. /* First remote destination for a forwarding entry.
  187. * Guaranteed to be non-NULL because remotes are never deleted.
  188. */
  189. static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb)
  190. {
  191. return list_entry_rcu(fdb->remotes.next, struct vxlan_rdst, list);
  192. }
  193. static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
  194. {
  195. return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
  196. }
  197. /* Find VXLAN socket based on network namespace, address family and UDP port
  198. * and enabled unshareable flags.
  199. */
  200. static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family,
  201. __be16 port, u32 flags)
  202. {
  203. struct vxlan_sock *vs;
  204. flags &= VXLAN_F_RCV_FLAGS;
  205. hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
  206. if (inet_sk(vs->sock->sk)->inet_sport == port &&
  207. vxlan_get_sk_family(vs) == family &&
  208. vs->flags == flags)
  209. return vs;
  210. }
  211. return NULL;
  212. }
  213. static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, u32 id)
  214. {
  215. struct vxlan_dev *vxlan;
  216. hlist_for_each_entry_rcu(vxlan, vni_head(vs, id), hlist) {
  217. if (vxlan->default_dst.remote_vni == id)
  218. return vxlan;
  219. }
  220. return NULL;
  221. }
  222. /* Look up VNI in a per net namespace table */
  223. static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id,
  224. sa_family_t family, __be16 port,
  225. u32 flags)
  226. {
  227. struct vxlan_sock *vs;
  228. vs = vxlan_find_sock(net, family, port, flags);
  229. if (!vs)
  230. return NULL;
  231. return vxlan_vs_find_vni(vs, id);
  232. }
  233. /* Fill in neighbour message in skbuff. */
  234. static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
  235. const struct vxlan_fdb *fdb,
  236. u32 portid, u32 seq, int type, unsigned int flags,
  237. const struct vxlan_rdst *rdst)
  238. {
  239. unsigned long now = jiffies;
  240. struct nda_cacheinfo ci;
  241. struct nlmsghdr *nlh;
  242. struct ndmsg *ndm;
  243. bool send_ip, send_eth;
  244. nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
  245. if (nlh == NULL)
  246. return -EMSGSIZE;
  247. ndm = nlmsg_data(nlh);
  248. memset(ndm, 0, sizeof(*ndm));
  249. send_eth = send_ip = true;
  250. if (type == RTM_GETNEIGH) {
  251. ndm->ndm_family = AF_INET;
  252. send_ip = !vxlan_addr_any(&rdst->remote_ip);
  253. send_eth = !is_zero_ether_addr(fdb->eth_addr);
  254. } else
  255. ndm->ndm_family = AF_BRIDGE;
  256. ndm->ndm_state = fdb->state;
  257. ndm->ndm_ifindex = vxlan->dev->ifindex;
  258. ndm->ndm_flags = fdb->flags;
  259. ndm->ndm_type = RTN_UNICAST;
  260. if (!net_eq(dev_net(vxlan->dev), vxlan->net) &&
  261. nla_put_s32(skb, NDA_LINK_NETNSID,
  262. peernet2id_alloc(dev_net(vxlan->dev), vxlan->net)))
  263. goto nla_put_failure;
  264. if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
  265. goto nla_put_failure;
  266. if (send_ip && vxlan_nla_put_addr(skb, NDA_DST, &rdst->remote_ip))
  267. goto nla_put_failure;
  268. if (rdst->remote_port && rdst->remote_port != vxlan->cfg.dst_port &&
  269. nla_put_be16(skb, NDA_PORT, rdst->remote_port))
  270. goto nla_put_failure;
  271. if (rdst->remote_vni != vxlan->default_dst.remote_vni &&
  272. nla_put_u32(skb, NDA_VNI, rdst->remote_vni))
  273. goto nla_put_failure;
  274. if (rdst->remote_ifindex &&
  275. nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex))
  276. goto nla_put_failure;
  277. ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
  278. ci.ndm_confirmed = 0;
  279. ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
  280. ci.ndm_refcnt = 0;
  281. if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
  282. goto nla_put_failure;
  283. nlmsg_end(skb, nlh);
  284. return 0;
  285. nla_put_failure:
  286. nlmsg_cancel(skb, nlh);
  287. return -EMSGSIZE;
  288. }
  289. static inline size_t vxlan_nlmsg_size(void)
  290. {
  291. return NLMSG_ALIGN(sizeof(struct ndmsg))
  292. + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
  293. + nla_total_size(sizeof(struct in6_addr)) /* NDA_DST */
  294. + nla_total_size(sizeof(__be16)) /* NDA_PORT */
  295. + nla_total_size(sizeof(__be32)) /* NDA_VNI */
  296. + nla_total_size(sizeof(__u32)) /* NDA_IFINDEX */
  297. + nla_total_size(sizeof(__s32)) /* NDA_LINK_NETNSID */
  298. + nla_total_size(sizeof(struct nda_cacheinfo));
  299. }
  300. static void vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
  301. struct vxlan_rdst *rd, int type)
  302. {
  303. struct net *net = dev_net(vxlan->dev);
  304. struct sk_buff *skb;
  305. int err = -ENOBUFS;
  306. skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC);
  307. if (skb == NULL)
  308. goto errout;
  309. err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, rd);
  310. if (err < 0) {
  311. /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
  312. WARN_ON(err == -EMSGSIZE);
  313. kfree_skb(skb);
  314. goto errout;
  315. }
  316. rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
  317. return;
  318. errout:
  319. if (err < 0)
  320. rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
  321. }
  322. static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa)
  323. {
  324. struct vxlan_dev *vxlan = netdev_priv(dev);
  325. struct vxlan_fdb f = {
  326. .state = NUD_STALE,
  327. };
  328. struct vxlan_rdst remote = {
  329. .remote_ip = *ipa, /* goes to NDA_DST */
  330. .remote_vni = VXLAN_N_VID,
  331. };
  332. vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
  333. }
  334. static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
  335. {
  336. struct vxlan_fdb f = {
  337. .state = NUD_STALE,
  338. };
  339. struct vxlan_rdst remote = { };
  340. memcpy(f.eth_addr, eth_addr, ETH_ALEN);
  341. vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
  342. }
  343. /* Hash Ethernet address */
  344. static u32 eth_hash(const unsigned char *addr)
  345. {
  346. u64 value = get_unaligned((u64 *)addr);
  347. /* only want 6 bytes */
  348. #ifdef __BIG_ENDIAN
  349. value >>= 16;
  350. #else
  351. value <<= 16;
  352. #endif
  353. return hash_64(value, FDB_HASH_BITS);
  354. }
  355. /* Hash chain to use given mac address */
  356. static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
  357. const u8 *mac)
  358. {
  359. return &vxlan->fdb_head[eth_hash(mac)];
  360. }
  361. /* Look up Ethernet address in forwarding table */
  362. static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
  363. const u8 *mac)
  364. {
  365. struct hlist_head *head = vxlan_fdb_head(vxlan, mac);
  366. struct vxlan_fdb *f;
  367. hlist_for_each_entry_rcu(f, head, hlist) {
  368. if (ether_addr_equal(mac, f->eth_addr))
  369. return f;
  370. }
  371. return NULL;
  372. }
  373. static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
  374. const u8 *mac)
  375. {
  376. struct vxlan_fdb *f;
  377. f = __vxlan_find_mac(vxlan, mac);
  378. if (f)
  379. f->used = jiffies;
  380. return f;
  381. }
  382. /* caller should hold vxlan->hash_lock */
  383. static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f,
  384. union vxlan_addr *ip, __be16 port,
  385. __u32 vni, __u32 ifindex)
  386. {
  387. struct vxlan_rdst *rd;
  388. list_for_each_entry(rd, &f->remotes, list) {
  389. if (vxlan_addr_equal(&rd->remote_ip, ip) &&
  390. rd->remote_port == port &&
  391. rd->remote_vni == vni &&
  392. rd->remote_ifindex == ifindex)
  393. return rd;
  394. }
  395. return NULL;
  396. }
  397. /* Replace destination of unicast mac */
  398. static int vxlan_fdb_replace(struct vxlan_fdb *f,
  399. union vxlan_addr *ip, __be16 port, __u32 vni, __u32 ifindex)
  400. {
  401. struct vxlan_rdst *rd;
  402. rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
  403. if (rd)
  404. return 0;
  405. rd = list_first_entry_or_null(&f->remotes, struct vxlan_rdst, list);
  406. if (!rd)
  407. return 0;
  408. rd->remote_ip = *ip;
  409. rd->remote_port = port;
  410. rd->remote_vni = vni;
  411. rd->remote_ifindex = ifindex;
  412. return 1;
  413. }
  414. /* Add/update destinations for multicast */
  415. static int vxlan_fdb_append(struct vxlan_fdb *f,
  416. union vxlan_addr *ip, __be16 port, __u32 vni,
  417. __u32 ifindex, struct vxlan_rdst **rdp)
  418. {
  419. struct vxlan_rdst *rd;
  420. rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
  421. if (rd)
  422. return 0;
  423. rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
  424. if (rd == NULL)
  425. return -ENOBUFS;
  426. rd->remote_ip = *ip;
  427. rd->remote_port = port;
  428. rd->remote_vni = vni;
  429. rd->remote_ifindex = ifindex;
  430. list_add_tail_rcu(&rd->list, &f->remotes);
  431. *rdp = rd;
  432. return 1;
  433. }
  434. static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
  435. unsigned int off,
  436. struct vxlanhdr *vh, size_t hdrlen,
  437. u32 data, struct gro_remcsum *grc,
  438. bool nopartial)
  439. {
  440. size_t start, offset, plen;
  441. if (skb->remcsum_offload)
  442. return NULL;
  443. if (!NAPI_GRO_CB(skb)->csum_valid)
  444. return NULL;
  445. start = (data & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT;
  446. offset = start + ((data & VXLAN_RCO_UDP) ?
  447. offsetof(struct udphdr, check) :
  448. offsetof(struct tcphdr, check));
  449. plen = hdrlen + offset + sizeof(u16);
  450. /* Pull checksum that will be written */
  451. if (skb_gro_header_hard(skb, off + plen)) {
  452. vh = skb_gro_header_slow(skb, off + plen, off);
  453. if (!vh)
  454. return NULL;
  455. }
  456. skb_gro_remcsum_process(skb, (void *)vh + hdrlen,
  457. start, offset, grc, nopartial);
  458. skb->remcsum_offload = 1;
  459. return vh;
  460. }
  461. static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
  462. struct sk_buff *skb,
  463. struct udp_offload *uoff)
  464. {
  465. struct sk_buff *p, **pp = NULL;
  466. struct vxlanhdr *vh, *vh2;
  467. unsigned int hlen, off_vx;
  468. int flush = 1;
  469. struct vxlan_sock *vs = container_of(uoff, struct vxlan_sock,
  470. udp_offloads);
  471. u32 flags;
  472. struct gro_remcsum grc;
  473. skb_gro_remcsum_init(&grc);
  474. off_vx = skb_gro_offset(skb);
  475. hlen = off_vx + sizeof(*vh);
  476. vh = skb_gro_header_fast(skb, off_vx);
  477. if (skb_gro_header_hard(skb, hlen)) {
  478. vh = skb_gro_header_slow(skb, hlen, off_vx);
  479. if (unlikely(!vh))
  480. goto out;
  481. }
  482. skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
  483. skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
  484. flags = ntohl(vh->vx_flags);
  485. if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
  486. vh = vxlan_gro_remcsum(skb, off_vx, vh, sizeof(struct vxlanhdr),
  487. ntohl(vh->vx_vni), &grc,
  488. !!(vs->flags &
  489. VXLAN_F_REMCSUM_NOPARTIAL));
  490. if (!vh)
  491. goto out;
  492. }
  493. flush = 0;
  494. for (p = *head; p; p = p->next) {
  495. if (!NAPI_GRO_CB(p)->same_flow)
  496. continue;
  497. vh2 = (struct vxlanhdr *)(p->data + off_vx);
  498. if (vh->vx_flags != vh2->vx_flags ||
  499. vh->vx_vni != vh2->vx_vni) {
  500. NAPI_GRO_CB(p)->same_flow = 0;
  501. continue;
  502. }
  503. }
  504. pp = eth_gro_receive(head, skb);
  505. out:
  506. skb_gro_remcsum_cleanup(skb, &grc);
  507. NAPI_GRO_CB(skb)->flush |= flush;
  508. return pp;
  509. }
  510. static int vxlan_gro_complete(struct sk_buff *skb, int nhoff,
  511. struct udp_offload *uoff)
  512. {
  513. udp_tunnel_gro_complete(skb, nhoff);
  514. return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
  515. }
  516. /* Notify netdevs that UDP port started listening */
  517. static void vxlan_notify_add_rx_port(struct vxlan_sock *vs)
  518. {
  519. struct net_device *dev;
  520. struct sock *sk = vs->sock->sk;
  521. struct net *net = sock_net(sk);
  522. sa_family_t sa_family = vxlan_get_sk_family(vs);
  523. __be16 port = inet_sk(sk)->inet_sport;
  524. int err;
  525. if (sa_family == AF_INET) {
  526. err = udp_add_offload(&vs->udp_offloads);
  527. if (err)
  528. pr_warn("vxlan: udp_add_offload failed with status %d\n", err);
  529. }
  530. rcu_read_lock();
  531. for_each_netdev_rcu(net, dev) {
  532. if (dev->netdev_ops->ndo_add_vxlan_port)
  533. dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
  534. port);
  535. }
  536. rcu_read_unlock();
  537. }
  538. /* Notify netdevs that UDP port is no more listening */
  539. static void vxlan_notify_del_rx_port(struct vxlan_sock *vs)
  540. {
  541. struct net_device *dev;
  542. struct sock *sk = vs->sock->sk;
  543. struct net *net = sock_net(sk);
  544. sa_family_t sa_family = vxlan_get_sk_family(vs);
  545. __be16 port = inet_sk(sk)->inet_sport;
  546. rcu_read_lock();
  547. for_each_netdev_rcu(net, dev) {
  548. if (dev->netdev_ops->ndo_del_vxlan_port)
  549. dev->netdev_ops->ndo_del_vxlan_port(dev, sa_family,
  550. port);
  551. }
  552. rcu_read_unlock();
  553. if (sa_family == AF_INET)
  554. udp_del_offload(&vs->udp_offloads);
  555. }
  556. /* Add new entry to forwarding table -- assumes lock held */
  557. static int vxlan_fdb_create(struct vxlan_dev *vxlan,
  558. const u8 *mac, union vxlan_addr *ip,
  559. __u16 state, __u16 flags,
  560. __be16 port, __u32 vni, __u32 ifindex,
  561. __u8 ndm_flags)
  562. {
  563. struct vxlan_rdst *rd = NULL;
  564. struct vxlan_fdb *f;
  565. int notify = 0;
  566. f = __vxlan_find_mac(vxlan, mac);
  567. if (f) {
  568. if (flags & NLM_F_EXCL) {
  569. netdev_dbg(vxlan->dev,
  570. "lost race to create %pM\n", mac);
  571. return -EEXIST;
  572. }
  573. if (f->state != state) {
  574. f->state = state;
  575. f->updated = jiffies;
  576. notify = 1;
  577. }
  578. if (f->flags != ndm_flags) {
  579. f->flags = ndm_flags;
  580. f->updated = jiffies;
  581. notify = 1;
  582. }
  583. if ((flags & NLM_F_REPLACE)) {
  584. /* Only change unicasts */
  585. if (!(is_multicast_ether_addr(f->eth_addr) ||
  586. is_zero_ether_addr(f->eth_addr))) {
  587. notify |= vxlan_fdb_replace(f, ip, port, vni,
  588. ifindex);
  589. } else
  590. return -EOPNOTSUPP;
  591. }
  592. if ((flags & NLM_F_APPEND) &&
  593. (is_multicast_ether_addr(f->eth_addr) ||
  594. is_zero_ether_addr(f->eth_addr))) {
  595. int rc = vxlan_fdb_append(f, ip, port, vni, ifindex,
  596. &rd);
  597. if (rc < 0)
  598. return rc;
  599. notify |= rc;
  600. }
  601. } else {
  602. if (!(flags & NLM_F_CREATE))
  603. return -ENOENT;
  604. if (vxlan->cfg.addrmax &&
  605. vxlan->addrcnt >= vxlan->cfg.addrmax)
  606. return -ENOSPC;
  607. /* Disallow replace to add a multicast entry */
  608. if ((flags & NLM_F_REPLACE) &&
  609. (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac)))
  610. return -EOPNOTSUPP;
  611. netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
  612. f = kmalloc(sizeof(*f), GFP_ATOMIC);
  613. if (!f)
  614. return -ENOMEM;
  615. notify = 1;
  616. f->state = state;
  617. f->flags = ndm_flags;
  618. f->updated = f->used = jiffies;
  619. INIT_LIST_HEAD(&f->remotes);
  620. memcpy(f->eth_addr, mac, ETH_ALEN);
  621. vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
  622. ++vxlan->addrcnt;
  623. hlist_add_head_rcu(&f->hlist,
  624. vxlan_fdb_head(vxlan, mac));
  625. }
  626. if (notify) {
  627. if (rd == NULL)
  628. rd = first_remote_rtnl(f);
  629. vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH);
  630. }
  631. return 0;
  632. }
  633. static void vxlan_fdb_free(struct rcu_head *head)
  634. {
  635. struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
  636. struct vxlan_rdst *rd, *nd;
  637. list_for_each_entry_safe(rd, nd, &f->remotes, list)
  638. kfree(rd);
  639. kfree(f);
  640. }
  641. static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
  642. {
  643. netdev_dbg(vxlan->dev,
  644. "delete %pM\n", f->eth_addr);
  645. --vxlan->addrcnt;
  646. vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH);
  647. hlist_del_rcu(&f->hlist);
  648. call_rcu(&f->rcu, vxlan_fdb_free);
  649. }
  650. static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
  651. union vxlan_addr *ip, __be16 *port, u32 *vni, u32 *ifindex)
  652. {
  653. struct net *net = dev_net(vxlan->dev);
  654. int err;
  655. if (tb[NDA_DST]) {
  656. err = vxlan_nla_get_addr(ip, tb[NDA_DST]);
  657. if (err)
  658. return err;
  659. } else {
  660. union vxlan_addr *remote = &vxlan->default_dst.remote_ip;
  661. if (remote->sa.sa_family == AF_INET) {
  662. ip->sin.sin_addr.s_addr = htonl(INADDR_ANY);
  663. ip->sa.sa_family = AF_INET;
  664. #if IS_ENABLED(CONFIG_IPV6)
  665. } else {
  666. ip->sin6.sin6_addr = in6addr_any;
  667. ip->sa.sa_family = AF_INET6;
  668. #endif
  669. }
  670. }
  671. if (tb[NDA_PORT]) {
  672. if (nla_len(tb[NDA_PORT]) != sizeof(__be16))
  673. return -EINVAL;
  674. *port = nla_get_be16(tb[NDA_PORT]);
  675. } else {
  676. *port = vxlan->cfg.dst_port;
  677. }
  678. if (tb[NDA_VNI]) {
  679. if (nla_len(tb[NDA_VNI]) != sizeof(u32))
  680. return -EINVAL;
  681. *vni = nla_get_u32(tb[NDA_VNI]);
  682. } else {
  683. *vni = vxlan->default_dst.remote_vni;
  684. }
  685. if (tb[NDA_IFINDEX]) {
  686. struct net_device *tdev;
  687. if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
  688. return -EINVAL;
  689. *ifindex = nla_get_u32(tb[NDA_IFINDEX]);
  690. tdev = __dev_get_by_index(net, *ifindex);
  691. if (!tdev)
  692. return -EADDRNOTAVAIL;
  693. } else {
  694. *ifindex = 0;
  695. }
  696. return 0;
  697. }
  698. /* Add static entry (via netlink) */
  699. static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
  700. struct net_device *dev,
  701. const unsigned char *addr, u16 vid, u16 flags)
  702. {
  703. struct vxlan_dev *vxlan = netdev_priv(dev);
  704. /* struct net *net = dev_net(vxlan->dev); */
  705. union vxlan_addr ip;
  706. __be16 port;
  707. u32 vni, ifindex;
  708. int err;
  709. if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
  710. pr_info("RTM_NEWNEIGH with invalid state %#x\n",
  711. ndm->ndm_state);
  712. return -EINVAL;
  713. }
  714. if (tb[NDA_DST] == NULL)
  715. return -EINVAL;
  716. err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex);
  717. if (err)
  718. return err;
  719. if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family)
  720. return -EAFNOSUPPORT;
  721. spin_lock_bh(&vxlan->hash_lock);
  722. err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags,
  723. port, vni, ifindex, ndm->ndm_flags);
  724. spin_unlock_bh(&vxlan->hash_lock);
  725. return err;
  726. }
  727. /* Delete entry (via netlink) */
  728. static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
  729. struct net_device *dev,
  730. const unsigned char *addr, u16 vid)
  731. {
  732. struct vxlan_dev *vxlan = netdev_priv(dev);
  733. struct vxlan_fdb *f;
  734. struct vxlan_rdst *rd = NULL;
  735. union vxlan_addr ip;
  736. __be16 port;
  737. u32 vni, ifindex;
  738. int err;
  739. err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex);
  740. if (err)
  741. return err;
  742. err = -ENOENT;
  743. spin_lock_bh(&vxlan->hash_lock);
  744. f = vxlan_find_mac(vxlan, addr);
  745. if (!f)
  746. goto out;
  747. if (!vxlan_addr_any(&ip)) {
  748. rd = vxlan_fdb_find_rdst(f, &ip, port, vni, ifindex);
  749. if (!rd)
  750. goto out;
  751. }
  752. err = 0;
  753. /* remove a destination if it's not the only one on the list,
  754. * otherwise destroy the fdb entry
  755. */
  756. if (rd && !list_is_singular(&f->remotes)) {
  757. list_del_rcu(&rd->list);
  758. vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
  759. kfree_rcu(rd, rcu);
  760. goto out;
  761. }
  762. vxlan_fdb_destroy(vxlan, f);
  763. out:
  764. spin_unlock_bh(&vxlan->hash_lock);
  765. return err;
  766. }
  767. /* Dump forwarding table */
  768. static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
  769. struct net_device *dev,
  770. struct net_device *filter_dev, int idx)
  771. {
  772. struct vxlan_dev *vxlan = netdev_priv(dev);
  773. unsigned int h;
  774. for (h = 0; h < FDB_HASH_SIZE; ++h) {
  775. struct vxlan_fdb *f;
  776. int err;
  777. hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
  778. struct vxlan_rdst *rd;
  779. list_for_each_entry_rcu(rd, &f->remotes, list) {
  780. if (idx < cb->args[0])
  781. goto skip;
  782. err = vxlan_fdb_info(skb, vxlan, f,
  783. NETLINK_CB(cb->skb).portid,
  784. cb->nlh->nlmsg_seq,
  785. RTM_NEWNEIGH,
  786. NLM_F_MULTI, rd);
  787. if (err < 0)
  788. goto out;
  789. skip:
  790. ++idx;
  791. }
  792. }
  793. }
  794. out:
  795. return idx;
  796. }
  797. /* Watch incoming packets to learn mapping between Ethernet address
  798. * and Tunnel endpoint.
  799. * Return true if packet is bogus and should be dropped.
  800. */
  801. static bool vxlan_snoop(struct net_device *dev,
  802. union vxlan_addr *src_ip, const u8 *src_mac)
  803. {
  804. struct vxlan_dev *vxlan = netdev_priv(dev);
  805. struct vxlan_fdb *f;
  806. f = vxlan_find_mac(vxlan, src_mac);
  807. if (likely(f)) {
  808. struct vxlan_rdst *rdst = first_remote_rcu(f);
  809. if (likely(vxlan_addr_equal(&rdst->remote_ip, src_ip)))
  810. return false;
  811. /* Don't migrate static entries, drop packets */
  812. if (f->state & NUD_NOARP)
  813. return true;
  814. if (net_ratelimit())
  815. netdev_info(dev,
  816. "%pM migrated from %pIS to %pIS\n",
  817. src_mac, &rdst->remote_ip.sa, &src_ip->sa);
  818. rdst->remote_ip = *src_ip;
  819. f->updated = jiffies;
  820. vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH);
  821. } else {
  822. /* learned new entry */
  823. spin_lock(&vxlan->hash_lock);
  824. /* close off race between vxlan_flush and incoming packets */
  825. if (netif_running(dev))
  826. vxlan_fdb_create(vxlan, src_mac, src_ip,
  827. NUD_REACHABLE,
  828. NLM_F_EXCL|NLM_F_CREATE,
  829. vxlan->cfg.dst_port,
  830. vxlan->default_dst.remote_vni,
  831. 0, NTF_SELF);
  832. spin_unlock(&vxlan->hash_lock);
  833. }
  834. return false;
  835. }
  836. /* See if multicast group is already in use by other ID */
  837. static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
  838. {
  839. struct vxlan_dev *vxlan;
  840. /* The vxlan_sock is only used by dev, leaving group has
  841. * no effect on other vxlan devices.
  842. */
  843. if (atomic_read(&dev->vn_sock->refcnt) == 1)
  844. return false;
  845. list_for_each_entry(vxlan, &vn->vxlan_list, next) {
  846. if (!netif_running(vxlan->dev) || vxlan == dev)
  847. continue;
  848. if (vxlan->vn_sock != dev->vn_sock)
  849. continue;
  850. if (!vxlan_addr_equal(&vxlan->default_dst.remote_ip,
  851. &dev->default_dst.remote_ip))
  852. continue;
  853. if (vxlan->default_dst.remote_ifindex !=
  854. dev->default_dst.remote_ifindex)
  855. continue;
  856. return true;
  857. }
  858. return false;
  859. }
  860. static void vxlan_sock_release(struct vxlan_sock *vs)
  861. {
  862. struct sock *sk = vs->sock->sk;
  863. struct net *net = sock_net(sk);
  864. struct vxlan_net *vn = net_generic(net, vxlan_net_id);
  865. if (!atomic_dec_and_test(&vs->refcnt))
  866. return;
  867. spin_lock(&vn->sock_lock);
  868. hlist_del_rcu(&vs->hlist);
  869. vxlan_notify_del_rx_port(vs);
  870. spin_unlock(&vn->sock_lock);
  871. queue_work(vxlan_wq, &vs->del_work);
  872. }
  873. /* Update multicast group membership when first VNI on
  874. * multicast address is brought up
  875. */
  876. static int vxlan_igmp_join(struct vxlan_dev *vxlan)
  877. {
  878. struct vxlan_sock *vs = vxlan->vn_sock;
  879. struct sock *sk = vs->sock->sk;
  880. union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
  881. int ifindex = vxlan->default_dst.remote_ifindex;
  882. int ret = -EINVAL;
  883. lock_sock(sk);
  884. if (ip->sa.sa_family == AF_INET) {
  885. struct ip_mreqn mreq = {
  886. .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
  887. .imr_ifindex = ifindex,
  888. };
  889. ret = ip_mc_join_group(sk, &mreq);
  890. #if IS_ENABLED(CONFIG_IPV6)
  891. } else {
  892. ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex,
  893. &ip->sin6.sin6_addr);
  894. #endif
  895. }
  896. release_sock(sk);
  897. return ret;
  898. }
  899. /* Inverse of vxlan_igmp_join when last VNI is brought down */
  900. static int vxlan_igmp_leave(struct vxlan_dev *vxlan)
  901. {
  902. struct vxlan_sock *vs = vxlan->vn_sock;
  903. struct sock *sk = vs->sock->sk;
  904. union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
  905. int ifindex = vxlan->default_dst.remote_ifindex;
  906. int ret = -EINVAL;
  907. lock_sock(sk);
  908. if (ip->sa.sa_family == AF_INET) {
  909. struct ip_mreqn mreq = {
  910. .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
  911. .imr_ifindex = ifindex,
  912. };
  913. ret = ip_mc_leave_group(sk, &mreq);
  914. #if IS_ENABLED(CONFIG_IPV6)
  915. } else {
  916. ret = ipv6_stub->ipv6_sock_mc_drop(sk, ifindex,
  917. &ip->sin6.sin6_addr);
  918. #endif
  919. }
  920. release_sock(sk);
  921. return ret;
  922. }
  923. static struct vxlanhdr *vxlan_remcsum(struct sk_buff *skb, struct vxlanhdr *vh,
  924. size_t hdrlen, u32 data, bool nopartial)
  925. {
  926. size_t start, offset, plen;
  927. start = (data & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT;
  928. offset = start + ((data & VXLAN_RCO_UDP) ?
  929. offsetof(struct udphdr, check) :
  930. offsetof(struct tcphdr, check));
  931. plen = hdrlen + offset + sizeof(u16);
  932. if (!pskb_may_pull(skb, plen))
  933. return NULL;
  934. vh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
  935. skb_remcsum_process(skb, (void *)vh + hdrlen, start, offset,
  936. nopartial);
  937. return vh;
  938. }
  939. static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
  940. struct vxlan_metadata *md, u32 vni,
  941. struct metadata_dst *tun_dst)
  942. {
  943. struct iphdr *oip = NULL;
  944. struct ipv6hdr *oip6 = NULL;
  945. struct vxlan_dev *vxlan;
  946. struct pcpu_sw_netstats *stats;
  947. union vxlan_addr saddr;
  948. int err = 0;
  949. union vxlan_addr *remote_ip;
  950. /* For flow based devices, map all packets to VNI 0 */
  951. if (vs->flags & VXLAN_F_COLLECT_METADATA)
  952. vni = 0;
  953. /* Is this VNI defined? */
  954. vxlan = vxlan_vs_find_vni(vs, vni);
  955. if (!vxlan)
  956. goto drop;
  957. remote_ip = &vxlan->default_dst.remote_ip;
  958. skb_reset_mac_header(skb);
  959. skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev)));
  960. skb->protocol = eth_type_trans(skb, vxlan->dev);
  961. skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
  962. /* Ignore packet loops (and multicast echo) */
  963. if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
  964. goto drop;
  965. /* Re-examine inner Ethernet packet */
  966. if (remote_ip->sa.sa_family == AF_INET) {
  967. oip = ip_hdr(skb);
  968. saddr.sin.sin_addr.s_addr = oip->saddr;
  969. saddr.sa.sa_family = AF_INET;
  970. #if IS_ENABLED(CONFIG_IPV6)
  971. } else {
  972. oip6 = ipv6_hdr(skb);
  973. saddr.sin6.sin6_addr = oip6->saddr;
  974. saddr.sa.sa_family = AF_INET6;
  975. #endif
  976. }
  977. if (tun_dst) {
  978. skb_dst_set(skb, (struct dst_entry *)tun_dst);
  979. tun_dst = NULL;
  980. }
  981. if ((vxlan->flags & VXLAN_F_LEARN) &&
  982. vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source))
  983. goto drop;
  984. skb_reset_network_header(skb);
  985. /* In flow-based mode, GBP is carried in dst_metadata */
  986. if (!(vs->flags & VXLAN_F_COLLECT_METADATA))
  987. skb->mark = md->gbp;
  988. if (oip6)
  989. err = IP6_ECN_decapsulate(oip6, skb);
  990. if (oip)
  991. err = IP_ECN_decapsulate(oip, skb);
  992. if (unlikely(err)) {
  993. if (log_ecn_error) {
  994. if (oip6)
  995. net_info_ratelimited("non-ECT from %pI6\n",
  996. &oip6->saddr);
  997. if (oip)
  998. net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
  999. &oip->saddr, oip->tos);
  1000. }
  1001. if (err > 1) {
  1002. ++vxlan->dev->stats.rx_frame_errors;
  1003. ++vxlan->dev->stats.rx_errors;
  1004. goto drop;
  1005. }
  1006. }
  1007. stats = this_cpu_ptr(vxlan->dev->tstats);
  1008. u64_stats_update_begin(&stats->syncp);
  1009. stats->rx_packets++;
  1010. stats->rx_bytes += skb->len;
  1011. u64_stats_update_end(&stats->syncp);
  1012. netif_rx(skb);
  1013. return;
  1014. drop:
  1015. if (tun_dst)
  1016. dst_release((struct dst_entry *)tun_dst);
  1017. /* Consume bad packet */
  1018. kfree_skb(skb);
  1019. }
  1020. /* Callback from net/ipv4/udp.c to receive packets */
  1021. static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
  1022. {
  1023. struct metadata_dst *tun_dst = NULL;
  1024. struct ip_tunnel_info *info;
  1025. struct vxlan_sock *vs;
  1026. struct vxlanhdr *vxh;
  1027. u32 flags, vni;
  1028. struct vxlan_metadata _md;
  1029. struct vxlan_metadata *md = &_md;
  1030. /* Need Vxlan and inner Ethernet header to be present */
  1031. if (!pskb_may_pull(skb, VXLAN_HLEN))
  1032. goto error;
  1033. vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
  1034. flags = ntohl(vxh->vx_flags);
  1035. vni = ntohl(vxh->vx_vni);
  1036. if (flags & VXLAN_HF_VNI) {
  1037. flags &= ~VXLAN_HF_VNI;
  1038. } else {
  1039. /* VNI flag always required to be set */
  1040. goto bad_flags;
  1041. }
  1042. if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB)))
  1043. goto drop;
  1044. vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
  1045. vs = rcu_dereference_sk_user_data(sk);
  1046. if (!vs)
  1047. goto drop;
  1048. if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
  1049. vxh = vxlan_remcsum(skb, vxh, sizeof(struct vxlanhdr), vni,
  1050. !!(vs->flags & VXLAN_F_REMCSUM_NOPARTIAL));
  1051. if (!vxh)
  1052. goto drop;
  1053. flags &= ~VXLAN_HF_RCO;
  1054. vni &= VXLAN_VNI_MASK;
  1055. }
  1056. if (vxlan_collect_metadata(vs)) {
  1057. tun_dst = metadata_dst_alloc(sizeof(*md), GFP_ATOMIC);
  1058. if (!tun_dst)
  1059. goto drop;
  1060. info = &tun_dst->u.tun_info;
  1061. if (vxlan_get_sk_family(vs) == AF_INET) {
  1062. const struct iphdr *iph = ip_hdr(skb);
  1063. info->key.u.ipv4.src = iph->saddr;
  1064. info->key.u.ipv4.dst = iph->daddr;
  1065. info->key.tos = iph->tos;
  1066. info->key.ttl = iph->ttl;
  1067. } else {
  1068. const struct ipv6hdr *ip6h = ipv6_hdr(skb);
  1069. info->key.u.ipv6.src = ip6h->saddr;
  1070. info->key.u.ipv6.dst = ip6h->daddr;
  1071. info->key.tos = ipv6_get_dsfield(ip6h);
  1072. info->key.ttl = ip6h->hop_limit;
  1073. }
  1074. info->key.tp_src = udp_hdr(skb)->source;
  1075. info->key.tp_dst = udp_hdr(skb)->dest;
  1076. info->mode = IP_TUNNEL_INFO_RX;
  1077. info->key.tun_flags = TUNNEL_KEY;
  1078. info->key.tun_id = cpu_to_be64(vni >> 8);
  1079. if (udp_hdr(skb)->check != 0)
  1080. info->key.tun_flags |= TUNNEL_CSUM;
  1081. md = ip_tunnel_info_opts(info, sizeof(*md));
  1082. } else {
  1083. memset(md, 0, sizeof(*md));
  1084. }
  1085. /* For backwards compatibility, only allow reserved fields to be
  1086. * used by VXLAN extensions if explicitly requested.
  1087. */
  1088. if ((flags & VXLAN_HF_GBP) && (vs->flags & VXLAN_F_GBP)) {
  1089. struct vxlanhdr_gbp *gbp;
  1090. gbp = (struct vxlanhdr_gbp *)vxh;
  1091. md->gbp = ntohs(gbp->policy_id);
  1092. if (tun_dst)
  1093. info->key.tun_flags |= TUNNEL_VXLAN_OPT;
  1094. if (gbp->dont_learn)
  1095. md->gbp |= VXLAN_GBP_DONT_LEARN;
  1096. if (gbp->policy_applied)
  1097. md->gbp |= VXLAN_GBP_POLICY_APPLIED;
  1098. flags &= ~VXLAN_GBP_USED_BITS;
  1099. }
  1100. if (flags || vni & ~VXLAN_VNI_MASK) {
  1101. /* If there are any unprocessed flags remaining treat
  1102. * this as a malformed packet. This behavior diverges from
  1103. * VXLAN RFC (RFC7348) which stipulates that bits in reserved
  1104. * in reserved fields are to be ignored. The approach here
  1105. * maintains compatibility with previous stack code, and also
  1106. * is more robust and provides a little more security in
  1107. * adding extensions to VXLAN.
  1108. */
  1109. goto bad_flags;
  1110. }
  1111. vxlan_rcv(vs, skb, md, vni >> 8, tun_dst);
  1112. return 0;
  1113. drop:
  1114. /* Consume bad packet */
  1115. kfree_skb(skb);
  1116. return 0;
  1117. bad_flags:
  1118. netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
  1119. ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
  1120. error:
  1121. if (tun_dst)
  1122. dst_release((struct dst_entry *)tun_dst);
  1123. /* Return non vxlan pkt */
  1124. return 1;
  1125. }
  1126. static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
  1127. {
  1128. struct vxlan_dev *vxlan = netdev_priv(dev);
  1129. struct arphdr *parp;
  1130. u8 *arpptr, *sha;
  1131. __be32 sip, tip;
  1132. struct neighbour *n;
  1133. if (dev->flags & IFF_NOARP)
  1134. goto out;
  1135. if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
  1136. dev->stats.tx_dropped++;
  1137. goto out;
  1138. }
  1139. parp = arp_hdr(skb);
  1140. if ((parp->ar_hrd != htons(ARPHRD_ETHER) &&
  1141. parp->ar_hrd != htons(ARPHRD_IEEE802)) ||
  1142. parp->ar_pro != htons(ETH_P_IP) ||
  1143. parp->ar_op != htons(ARPOP_REQUEST) ||
  1144. parp->ar_hln != dev->addr_len ||
  1145. parp->ar_pln != 4)
  1146. goto out;
  1147. arpptr = (u8 *)parp + sizeof(struct arphdr);
  1148. sha = arpptr;
  1149. arpptr += dev->addr_len; /* sha */
  1150. memcpy(&sip, arpptr, sizeof(sip));
  1151. arpptr += sizeof(sip);
  1152. arpptr += dev->addr_len; /* tha */
  1153. memcpy(&tip, arpptr, sizeof(tip));
  1154. if (ipv4_is_loopback(tip) ||
  1155. ipv4_is_multicast(tip))
  1156. goto out;
  1157. n = neigh_lookup(&arp_tbl, &tip, dev);
  1158. if (n) {
  1159. struct vxlan_fdb *f;
  1160. struct sk_buff *reply;
  1161. if (!(n->nud_state & NUD_CONNECTED)) {
  1162. neigh_release(n);
  1163. goto out;
  1164. }
  1165. f = vxlan_find_mac(vxlan, n->ha);
  1166. if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
  1167. /* bridge-local neighbor */
  1168. neigh_release(n);
  1169. goto out;
  1170. }
  1171. reply = arp_create(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
  1172. n->ha, sha);
  1173. neigh_release(n);
  1174. if (reply == NULL)
  1175. goto out;
  1176. skb_reset_mac_header(reply);
  1177. __skb_pull(reply, skb_network_offset(reply));
  1178. reply->ip_summed = CHECKSUM_UNNECESSARY;
  1179. reply->pkt_type = PACKET_HOST;
  1180. if (netif_rx_ni(reply) == NET_RX_DROP)
  1181. dev->stats.rx_dropped++;
  1182. } else if (vxlan->flags & VXLAN_F_L3MISS) {
  1183. union vxlan_addr ipa = {
  1184. .sin.sin_addr.s_addr = tip,
  1185. .sin.sin_family = AF_INET,
  1186. };
  1187. vxlan_ip_miss(dev, &ipa);
  1188. }
  1189. out:
  1190. consume_skb(skb);
  1191. return NETDEV_TX_OK;
  1192. }
  1193. #if IS_ENABLED(CONFIG_IPV6)
  1194. static struct sk_buff *vxlan_na_create(struct sk_buff *request,
  1195. struct neighbour *n, bool isrouter)
  1196. {
  1197. struct net_device *dev = request->dev;
  1198. struct sk_buff *reply;
  1199. struct nd_msg *ns, *na;
  1200. struct ipv6hdr *pip6;
  1201. u8 *daddr;
  1202. int na_olen = 8; /* opt hdr + ETH_ALEN for target */
  1203. int ns_olen;
  1204. int i, len;
  1205. if (dev == NULL)
  1206. return NULL;
  1207. len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) +
  1208. sizeof(*na) + na_olen + dev->needed_tailroom;
  1209. reply = alloc_skb(len, GFP_ATOMIC);
  1210. if (reply == NULL)
  1211. return NULL;
  1212. reply->protocol = htons(ETH_P_IPV6);
  1213. reply->dev = dev;
  1214. skb_reserve(reply, LL_RESERVED_SPACE(request->dev));
  1215. skb_push(reply, sizeof(struct ethhdr));
  1216. skb_set_mac_header(reply, 0);
  1217. ns = (struct nd_msg *)skb_transport_header(request);
  1218. daddr = eth_hdr(request)->h_source;
  1219. ns_olen = request->len - skb_transport_offset(request) - sizeof(*ns);
  1220. for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) {
  1221. if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) {
  1222. daddr = ns->opt + i + sizeof(struct nd_opt_hdr);
  1223. break;
  1224. }
  1225. }
  1226. /* Ethernet header */
  1227. ether_addr_copy(eth_hdr(reply)->h_dest, daddr);
  1228. ether_addr_copy(eth_hdr(reply)->h_source, n->ha);
  1229. eth_hdr(reply)->h_proto = htons(ETH_P_IPV6);
  1230. reply->protocol = htons(ETH_P_IPV6);
  1231. skb_pull(reply, sizeof(struct ethhdr));
  1232. skb_set_network_header(reply, 0);
  1233. skb_put(reply, sizeof(struct ipv6hdr));
  1234. /* IPv6 header */
  1235. pip6 = ipv6_hdr(reply);
  1236. memset(pip6, 0, sizeof(struct ipv6hdr));
  1237. pip6->version = 6;
  1238. pip6->priority = ipv6_hdr(request)->priority;
  1239. pip6->nexthdr = IPPROTO_ICMPV6;
  1240. pip6->hop_limit = 255;
  1241. pip6->daddr = ipv6_hdr(request)->saddr;
  1242. pip6->saddr = *(struct in6_addr *)n->primary_key;
  1243. skb_pull(reply, sizeof(struct ipv6hdr));
  1244. skb_set_transport_header(reply, 0);
  1245. na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen);
  1246. /* Neighbor Advertisement */
  1247. memset(na, 0, sizeof(*na)+na_olen);
  1248. na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
  1249. na->icmph.icmp6_router = isrouter;
  1250. na->icmph.icmp6_override = 1;
  1251. na->icmph.icmp6_solicited = 1;
  1252. na->target = ns->target;
  1253. ether_addr_copy(&na->opt[2], n->ha);
  1254. na->opt[0] = ND_OPT_TARGET_LL_ADDR;
  1255. na->opt[1] = na_olen >> 3;
  1256. na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr,
  1257. &pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6,
  1258. csum_partial(na, sizeof(*na)+na_olen, 0));
  1259. pip6->payload_len = htons(sizeof(*na)+na_olen);
  1260. skb_push(reply, sizeof(struct ipv6hdr));
  1261. reply->ip_summed = CHECKSUM_UNNECESSARY;
  1262. return reply;
  1263. }
  1264. static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
  1265. {
  1266. struct vxlan_dev *vxlan = netdev_priv(dev);
  1267. struct nd_msg *msg;
  1268. const struct ipv6hdr *iphdr;
  1269. const struct in6_addr *saddr, *daddr;
  1270. struct neighbour *n;
  1271. struct inet6_dev *in6_dev;
  1272. in6_dev = __in6_dev_get(dev);
  1273. if (!in6_dev)
  1274. goto out;
  1275. iphdr = ipv6_hdr(skb);
  1276. saddr = &iphdr->saddr;
  1277. daddr = &iphdr->daddr;
  1278. msg = (struct nd_msg *)skb_transport_header(skb);
  1279. if (msg->icmph.icmp6_code != 0 ||
  1280. msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
  1281. goto out;
  1282. if (ipv6_addr_loopback(daddr) ||
  1283. ipv6_addr_is_multicast(&msg->target))
  1284. goto out;
  1285. n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev);
  1286. if (n) {
  1287. struct vxlan_fdb *f;
  1288. struct sk_buff *reply;
  1289. if (!(n->nud_state & NUD_CONNECTED)) {
  1290. neigh_release(n);
  1291. goto out;
  1292. }
  1293. f = vxlan_find_mac(vxlan, n->ha);
  1294. if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
  1295. /* bridge-local neighbor */
  1296. neigh_release(n);
  1297. goto out;
  1298. }
  1299. reply = vxlan_na_create(skb, n,
  1300. !!(f ? f->flags & NTF_ROUTER : 0));
  1301. neigh_release(n);
  1302. if (reply == NULL)
  1303. goto out;
  1304. if (netif_rx_ni(reply) == NET_RX_DROP)
  1305. dev->stats.rx_dropped++;
  1306. } else if (vxlan->flags & VXLAN_F_L3MISS) {
  1307. union vxlan_addr ipa = {
  1308. .sin6.sin6_addr = msg->target,
  1309. .sin6.sin6_family = AF_INET6,
  1310. };
  1311. vxlan_ip_miss(dev, &ipa);
  1312. }
  1313. out:
  1314. consume_skb(skb);
  1315. return NETDEV_TX_OK;
  1316. }
  1317. #endif
  1318. static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
  1319. {
  1320. struct vxlan_dev *vxlan = netdev_priv(dev);
  1321. struct neighbour *n;
  1322. if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
  1323. return false;
  1324. n = NULL;
  1325. switch (ntohs(eth_hdr(skb)->h_proto)) {
  1326. case ETH_P_IP:
  1327. {
  1328. struct iphdr *pip;
  1329. if (!pskb_may_pull(skb, sizeof(struct iphdr)))
  1330. return false;
  1331. pip = ip_hdr(skb);
  1332. n = neigh_lookup(&arp_tbl, &pip->daddr, dev);
  1333. if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
  1334. union vxlan_addr ipa = {
  1335. .sin.sin_addr.s_addr = pip->daddr,
  1336. .sin.sin_family = AF_INET,
  1337. };
  1338. vxlan_ip_miss(dev, &ipa);
  1339. return false;
  1340. }
  1341. break;
  1342. }
  1343. #if IS_ENABLED(CONFIG_IPV6)
  1344. case ETH_P_IPV6:
  1345. {
  1346. struct ipv6hdr *pip6;
  1347. if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
  1348. return false;
  1349. pip6 = ipv6_hdr(skb);
  1350. n = neigh_lookup(ipv6_stub->nd_tbl, &pip6->daddr, dev);
  1351. if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
  1352. union vxlan_addr ipa = {
  1353. .sin6.sin6_addr = pip6->daddr,
  1354. .sin6.sin6_family = AF_INET6,
  1355. };
  1356. vxlan_ip_miss(dev, &ipa);
  1357. return false;
  1358. }
  1359. break;
  1360. }
  1361. #endif
  1362. default:
  1363. return false;
  1364. }
  1365. if (n) {
  1366. bool diff;
  1367. diff = !ether_addr_equal(eth_hdr(skb)->h_dest, n->ha);
  1368. if (diff) {
  1369. memcpy(eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
  1370. dev->addr_len);
  1371. memcpy(eth_hdr(skb)->h_dest, n->ha, dev->addr_len);
  1372. }
  1373. neigh_release(n);
  1374. return diff;
  1375. }
  1376. return false;
  1377. }
  1378. static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, u32 vxflags,
  1379. struct vxlan_metadata *md)
  1380. {
  1381. struct vxlanhdr_gbp *gbp;
  1382. if (!md->gbp)
  1383. return;
  1384. gbp = (struct vxlanhdr_gbp *)vxh;
  1385. vxh->vx_flags |= htonl(VXLAN_HF_GBP);
  1386. if (md->gbp & VXLAN_GBP_DONT_LEARN)
  1387. gbp->dont_learn = 1;
  1388. if (md->gbp & VXLAN_GBP_POLICY_APPLIED)
  1389. gbp->policy_applied = 1;
  1390. gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK);
  1391. }
  1392. #if IS_ENABLED(CONFIG_IPV6)
  1393. static int vxlan6_xmit_skb(struct dst_entry *dst, struct sock *sk,
  1394. struct sk_buff *skb,
  1395. struct net_device *dev, struct in6_addr *saddr,
  1396. struct in6_addr *daddr, __u8 prio, __u8 ttl,
  1397. __be16 src_port, __be16 dst_port, __be32 vni,
  1398. struct vxlan_metadata *md, bool xnet, u32 vxflags)
  1399. {
  1400. struct vxlanhdr *vxh;
  1401. int min_headroom;
  1402. int err;
  1403. bool udp_sum = !(vxflags & VXLAN_F_UDP_ZERO_CSUM6_TX);
  1404. int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
  1405. u16 hdrlen = sizeof(struct vxlanhdr);
  1406. if ((vxflags & VXLAN_F_REMCSUM_TX) &&
  1407. skb->ip_summed == CHECKSUM_PARTIAL) {
  1408. int csum_start = skb_checksum_start_offset(skb);
  1409. if (csum_start <= VXLAN_MAX_REMCSUM_START &&
  1410. !(csum_start & VXLAN_RCO_SHIFT_MASK) &&
  1411. (skb->csum_offset == offsetof(struct udphdr, check) ||
  1412. skb->csum_offset == offsetof(struct tcphdr, check))) {
  1413. udp_sum = false;
  1414. type |= SKB_GSO_TUNNEL_REMCSUM;
  1415. }
  1416. }
  1417. skb_scrub_packet(skb, xnet);
  1418. min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
  1419. + VXLAN_HLEN + sizeof(struct ipv6hdr)
  1420. + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
  1421. /* Need space for new headers (invalidates iph ptr) */
  1422. err = skb_cow_head(skb, min_headroom);
  1423. if (unlikely(err)) {
  1424. kfree_skb(skb);
  1425. goto err;
  1426. }
  1427. skb = vlan_hwaccel_push_inside(skb);
  1428. if (WARN_ON(!skb)) {
  1429. err = -ENOMEM;
  1430. goto err;
  1431. }
  1432. skb = iptunnel_handle_offloads(skb, udp_sum, type);
  1433. if (IS_ERR(skb)) {
  1434. err = -EINVAL;
  1435. goto err;
  1436. }
  1437. vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
  1438. vxh->vx_flags = htonl(VXLAN_HF_VNI);
  1439. vxh->vx_vni = vni;
  1440. if (type & SKB_GSO_TUNNEL_REMCSUM) {
  1441. u32 data = (skb_checksum_start_offset(skb) - hdrlen) >>
  1442. VXLAN_RCO_SHIFT;
  1443. if (skb->csum_offset == offsetof(struct udphdr, check))
  1444. data |= VXLAN_RCO_UDP;
  1445. vxh->vx_vni |= htonl(data);
  1446. vxh->vx_flags |= htonl(VXLAN_HF_RCO);
  1447. if (!skb_is_gso(skb)) {
  1448. skb->ip_summed = CHECKSUM_NONE;
  1449. skb->encapsulation = 0;
  1450. }
  1451. }
  1452. if (vxflags & VXLAN_F_GBP)
  1453. vxlan_build_gbp_hdr(vxh, vxflags, md);
  1454. skb_set_inner_protocol(skb, htons(ETH_P_TEB));
  1455. udp_tunnel6_xmit_skb(dst, sk, skb, dev, saddr, daddr, prio,
  1456. ttl, src_port, dst_port,
  1457. !!(vxflags & VXLAN_F_UDP_ZERO_CSUM6_TX));
  1458. return 0;
  1459. err:
  1460. dst_release(dst);
  1461. return err;
  1462. }
  1463. #endif
  1464. static int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
  1465. __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
  1466. __be16 src_port, __be16 dst_port, __be32 vni,
  1467. struct vxlan_metadata *md, bool xnet, u32 vxflags)
  1468. {
  1469. struct vxlanhdr *vxh;
  1470. int min_headroom;
  1471. int err;
  1472. bool udp_sum = !!(vxflags & VXLAN_F_UDP_CSUM);
  1473. int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
  1474. u16 hdrlen = sizeof(struct vxlanhdr);
  1475. if ((vxflags & VXLAN_F_REMCSUM_TX) &&
  1476. skb->ip_summed == CHECKSUM_PARTIAL) {
  1477. int csum_start = skb_checksum_start_offset(skb);
  1478. if (csum_start <= VXLAN_MAX_REMCSUM_START &&
  1479. !(csum_start & VXLAN_RCO_SHIFT_MASK) &&
  1480. (skb->csum_offset == offsetof(struct udphdr, check) ||
  1481. skb->csum_offset == offsetof(struct tcphdr, check))) {
  1482. udp_sum = false;
  1483. type |= SKB_GSO_TUNNEL_REMCSUM;
  1484. }
  1485. }
  1486. min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
  1487. + VXLAN_HLEN + sizeof(struct iphdr)
  1488. + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
  1489. /* Need space for new headers (invalidates iph ptr) */
  1490. err = skb_cow_head(skb, min_headroom);
  1491. if (unlikely(err)) {
  1492. kfree_skb(skb);
  1493. return err;
  1494. }
  1495. skb = vlan_hwaccel_push_inside(skb);
  1496. if (WARN_ON(!skb))
  1497. return -ENOMEM;
  1498. skb = iptunnel_handle_offloads(skb, udp_sum, type);
  1499. if (IS_ERR(skb))
  1500. return PTR_ERR(skb);
  1501. vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
  1502. vxh->vx_flags = htonl(VXLAN_HF_VNI);
  1503. vxh->vx_vni = vni;
  1504. if (type & SKB_GSO_TUNNEL_REMCSUM) {
  1505. u32 data = (skb_checksum_start_offset(skb) - hdrlen) >>
  1506. VXLAN_RCO_SHIFT;
  1507. if (skb->csum_offset == offsetof(struct udphdr, check))
  1508. data |= VXLAN_RCO_UDP;
  1509. vxh->vx_vni |= htonl(data);
  1510. vxh->vx_flags |= htonl(VXLAN_HF_RCO);
  1511. if (!skb_is_gso(skb)) {
  1512. skb->ip_summed = CHECKSUM_NONE;
  1513. skb->encapsulation = 0;
  1514. }
  1515. }
  1516. if (vxflags & VXLAN_F_GBP)
  1517. vxlan_build_gbp_hdr(vxh, vxflags, md);
  1518. skb_set_inner_protocol(skb, htons(ETH_P_TEB));
  1519. return udp_tunnel_xmit_skb(rt, sk, skb, src, dst, tos,
  1520. ttl, df, src_port, dst_port, xnet,
  1521. !(vxflags & VXLAN_F_UDP_CSUM));
  1522. }
  1523. /* Bypass encapsulation if the destination is local */
  1524. static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
  1525. struct vxlan_dev *dst_vxlan)
  1526. {
  1527. struct pcpu_sw_netstats *tx_stats, *rx_stats;
  1528. union vxlan_addr loopback;
  1529. union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
  1530. struct net_device *dev = skb->dev;
  1531. int len = skb->len;
  1532. tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
  1533. rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
  1534. skb->pkt_type = PACKET_HOST;
  1535. skb->encapsulation = 0;
  1536. skb->dev = dst_vxlan->dev;
  1537. __skb_pull(skb, skb_network_offset(skb));
  1538. if (remote_ip->sa.sa_family == AF_INET) {
  1539. loopback.sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
  1540. loopback.sa.sa_family = AF_INET;
  1541. #if IS_ENABLED(CONFIG_IPV6)
  1542. } else {
  1543. loopback.sin6.sin6_addr = in6addr_loopback;
  1544. loopback.sa.sa_family = AF_INET6;
  1545. #endif
  1546. }
  1547. if (dst_vxlan->flags & VXLAN_F_LEARN)
  1548. vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source);
  1549. u64_stats_update_begin(&tx_stats->syncp);
  1550. tx_stats->tx_packets++;
  1551. tx_stats->tx_bytes += len;
  1552. u64_stats_update_end(&tx_stats->syncp);
  1553. if (netif_rx(skb) == NET_RX_SUCCESS) {
  1554. u64_stats_update_begin(&rx_stats->syncp);
  1555. rx_stats->rx_packets++;
  1556. rx_stats->rx_bytes += len;
  1557. u64_stats_update_end(&rx_stats->syncp);
  1558. } else {
  1559. dev->stats.rx_dropped++;
  1560. }
  1561. }
  1562. static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
  1563. struct vxlan_rdst *rdst, bool did_rsc)
  1564. {
  1565. struct ip_tunnel_info *info;
  1566. struct vxlan_dev *vxlan = netdev_priv(dev);
  1567. struct sock *sk = vxlan->vn_sock->sock->sk;
  1568. unsigned short family = vxlan_get_sk_family(vxlan->vn_sock);
  1569. struct rtable *rt = NULL;
  1570. const struct iphdr *old_iph;
  1571. struct flowi4 fl4;
  1572. union vxlan_addr *dst;
  1573. union vxlan_addr remote_ip;
  1574. struct vxlan_metadata _md;
  1575. struct vxlan_metadata *md = &_md;
  1576. __be16 src_port = 0, dst_port;
  1577. u32 vni;
  1578. __be16 df = 0;
  1579. __u8 tos, ttl;
  1580. int err;
  1581. u32 flags = vxlan->flags;
  1582. info = skb_tunnel_info(skb);
  1583. if (rdst) {
  1584. dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port;
  1585. vni = rdst->remote_vni;
  1586. dst = &rdst->remote_ip;
  1587. } else {
  1588. if (!info) {
  1589. WARN_ONCE(1, "%s: Missing encapsulation instructions\n",
  1590. dev->name);
  1591. goto drop;
  1592. }
  1593. dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
  1594. vni = be64_to_cpu(info->key.tun_id);
  1595. remote_ip.sa.sa_family = family;
  1596. if (family == AF_INET)
  1597. remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst;
  1598. else
  1599. remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst;
  1600. dst = &remote_ip;
  1601. }
  1602. if (vxlan_addr_any(dst)) {
  1603. if (did_rsc) {
  1604. /* short-circuited back to local bridge */
  1605. vxlan_encap_bypass(skb, vxlan, vxlan);
  1606. return;
  1607. }
  1608. goto drop;
  1609. }
  1610. old_iph = ip_hdr(skb);
  1611. ttl = vxlan->cfg.ttl;
  1612. if (!ttl && vxlan_addr_multicast(dst))
  1613. ttl = 1;
  1614. tos = vxlan->cfg.tos;
  1615. if (tos == 1)
  1616. tos = ip_tunnel_get_dsfield(old_iph, skb);
  1617. src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
  1618. vxlan->cfg.port_max, true);
  1619. if (info) {
  1620. if (info->key.tun_flags & TUNNEL_CSUM)
  1621. flags |= VXLAN_F_UDP_CSUM;
  1622. else
  1623. flags &= ~VXLAN_F_UDP_CSUM;
  1624. ttl = info->key.ttl;
  1625. tos = info->key.tos;
  1626. if (info->options_len)
  1627. md = ip_tunnel_info_opts(info, sizeof(*md));
  1628. } else {
  1629. md->gbp = skb->mark;
  1630. }
  1631. if (dst->sa.sa_family == AF_INET) {
  1632. if (info && (info->key.tun_flags & TUNNEL_DONT_FRAGMENT))
  1633. df = htons(IP_DF);
  1634. memset(&fl4, 0, sizeof(fl4));
  1635. fl4.flowi4_oif = rdst ? rdst->remote_ifindex : 0;
  1636. fl4.flowi4_tos = RT_TOS(tos);
  1637. fl4.flowi4_mark = skb->mark;
  1638. fl4.flowi4_proto = IPPROTO_UDP;
  1639. fl4.daddr = dst->sin.sin_addr.s_addr;
  1640. fl4.saddr = vxlan->cfg.saddr.sin.sin_addr.s_addr;
  1641. rt = ip_route_output_key(vxlan->net, &fl4);
  1642. if (IS_ERR(rt)) {
  1643. netdev_dbg(dev, "no route to %pI4\n",
  1644. &dst->sin.sin_addr.s_addr);
  1645. dev->stats.tx_carrier_errors++;
  1646. goto tx_error;
  1647. }
  1648. if (rt->dst.dev == dev) {
  1649. netdev_dbg(dev, "circular route to %pI4\n",
  1650. &dst->sin.sin_addr.s_addr);
  1651. dev->stats.collisions++;
  1652. goto rt_tx_error;
  1653. }
  1654. /* Bypass encapsulation if the destination is local */
  1655. if (rt->rt_flags & RTCF_LOCAL &&
  1656. !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
  1657. struct vxlan_dev *dst_vxlan;
  1658. ip_rt_put(rt);
  1659. dst_vxlan = vxlan_find_vni(vxlan->net, vni,
  1660. dst->sa.sa_family, dst_port,
  1661. vxlan->flags);
  1662. if (!dst_vxlan)
  1663. goto tx_error;
  1664. vxlan_encap_bypass(skb, vxlan, dst_vxlan);
  1665. return;
  1666. }
  1667. tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
  1668. ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
  1669. err = vxlan_xmit_skb(rt, sk, skb, fl4.saddr,
  1670. dst->sin.sin_addr.s_addr, tos, ttl, df,
  1671. src_port, dst_port, htonl(vni << 8), md,
  1672. !net_eq(vxlan->net, dev_net(vxlan->dev)),
  1673. flags);
  1674. if (err < 0) {
  1675. /* skb is already freed. */
  1676. skb = NULL;
  1677. goto rt_tx_error;
  1678. }
  1679. iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
  1680. #if IS_ENABLED(CONFIG_IPV6)
  1681. } else {
  1682. struct dst_entry *ndst;
  1683. struct flowi6 fl6;
  1684. u32 rt6i_flags;
  1685. memset(&fl6, 0, sizeof(fl6));
  1686. fl6.flowi6_oif = rdst ? rdst->remote_ifindex : 0;
  1687. fl6.daddr = dst->sin6.sin6_addr;
  1688. fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr;
  1689. fl6.flowi6_mark = skb->mark;
  1690. fl6.flowi6_proto = IPPROTO_UDP;
  1691. if (ipv6_stub->ipv6_dst_lookup(vxlan->net, sk, &ndst, &fl6)) {
  1692. netdev_dbg(dev, "no route to %pI6\n",
  1693. &dst->sin6.sin6_addr);
  1694. dev->stats.tx_carrier_errors++;
  1695. goto tx_error;
  1696. }
  1697. if (ndst->dev == dev) {
  1698. netdev_dbg(dev, "circular route to %pI6\n",
  1699. &dst->sin6.sin6_addr);
  1700. dst_release(ndst);
  1701. dev->stats.collisions++;
  1702. goto tx_error;
  1703. }
  1704. /* Bypass encapsulation if the destination is local */
  1705. rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags;
  1706. if (rt6i_flags & RTF_LOCAL &&
  1707. !(rt6i_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
  1708. struct vxlan_dev *dst_vxlan;
  1709. dst_release(ndst);
  1710. dst_vxlan = vxlan_find_vni(vxlan->net, vni,
  1711. dst->sa.sa_family, dst_port,
  1712. vxlan->flags);
  1713. if (!dst_vxlan)
  1714. goto tx_error;
  1715. vxlan_encap_bypass(skb, vxlan, dst_vxlan);
  1716. return;
  1717. }
  1718. ttl = ttl ? : ip6_dst_hoplimit(ndst);
  1719. err = vxlan6_xmit_skb(ndst, sk, skb, dev, &fl6.saddr, &fl6.daddr,
  1720. 0, ttl, src_port, dst_port, htonl(vni << 8), md,
  1721. !net_eq(vxlan->net, dev_net(vxlan->dev)),
  1722. flags);
  1723. #endif
  1724. }
  1725. return;
  1726. drop:
  1727. dev->stats.tx_dropped++;
  1728. goto tx_free;
  1729. rt_tx_error:
  1730. ip_rt_put(rt);
  1731. tx_error:
  1732. dev->stats.tx_errors++;
  1733. tx_free:
  1734. dev_kfree_skb(skb);
  1735. }
  1736. /* Transmit local packets over Vxlan
  1737. *
  1738. * Outer IP header inherits ECN and DF from inner header.
  1739. * Outer UDP destination is the VXLAN assigned port.
  1740. * source port is based on hash of flow
  1741. */
  1742. static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
  1743. {
  1744. struct vxlan_dev *vxlan = netdev_priv(dev);
  1745. const struct ip_tunnel_info *info;
  1746. struct ethhdr *eth;
  1747. bool did_rsc = false;
  1748. struct vxlan_rdst *rdst, *fdst = NULL;
  1749. struct vxlan_fdb *f;
  1750. info = skb_tunnel_info(skb);
  1751. skb_reset_mac_header(skb);
  1752. eth = eth_hdr(skb);
  1753. if ((vxlan->flags & VXLAN_F_PROXY)) {
  1754. if (ntohs(eth->h_proto) == ETH_P_ARP)
  1755. return arp_reduce(dev, skb);
  1756. #if IS_ENABLED(CONFIG_IPV6)
  1757. else if (ntohs(eth->h_proto) == ETH_P_IPV6 &&
  1758. pskb_may_pull(skb, sizeof(struct ipv6hdr)
  1759. + sizeof(struct nd_msg)) &&
  1760. ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
  1761. struct nd_msg *msg;
  1762. msg = (struct nd_msg *)skb_transport_header(skb);
  1763. if (msg->icmph.icmp6_code == 0 &&
  1764. msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
  1765. return neigh_reduce(dev, skb);
  1766. }
  1767. eth = eth_hdr(skb);
  1768. #endif
  1769. }
  1770. if (vxlan->flags & VXLAN_F_COLLECT_METADATA &&
  1771. info && info->mode == IP_TUNNEL_INFO_TX) {
  1772. vxlan_xmit_one(skb, dev, NULL, false);
  1773. return NETDEV_TX_OK;
  1774. }
  1775. f = vxlan_find_mac(vxlan, eth->h_dest);
  1776. did_rsc = false;
  1777. if (f && (f->flags & NTF_ROUTER) && (vxlan->flags & VXLAN_F_RSC) &&
  1778. (ntohs(eth->h_proto) == ETH_P_IP ||
  1779. ntohs(eth->h_proto) == ETH_P_IPV6)) {
  1780. did_rsc = route_shortcircuit(dev, skb);
  1781. if (did_rsc)
  1782. f = vxlan_find_mac(vxlan, eth->h_dest);
  1783. }
  1784. if (f == NULL) {
  1785. f = vxlan_find_mac(vxlan, all_zeros_mac);
  1786. if (f == NULL) {
  1787. if ((vxlan->flags & VXLAN_F_L2MISS) &&
  1788. !is_multicast_ether_addr(eth->h_dest))
  1789. vxlan_fdb_miss(vxlan, eth->h_dest);
  1790. dev->stats.tx_dropped++;
  1791. kfree_skb(skb);
  1792. return NETDEV_TX_OK;
  1793. }
  1794. }
  1795. list_for_each_entry_rcu(rdst, &f->remotes, list) {
  1796. struct sk_buff *skb1;
  1797. if (!fdst) {
  1798. fdst = rdst;
  1799. continue;
  1800. }
  1801. skb1 = skb_clone(skb, GFP_ATOMIC);
  1802. if (skb1)
  1803. vxlan_xmit_one(skb1, dev, rdst, did_rsc);
  1804. }
  1805. if (fdst)
  1806. vxlan_xmit_one(skb, dev, fdst, did_rsc);
  1807. else
  1808. kfree_skb(skb);
  1809. return NETDEV_TX_OK;
  1810. }
  1811. /* Walk the forwarding table and purge stale entries */
  1812. static void vxlan_cleanup(unsigned long arg)
  1813. {
  1814. struct vxlan_dev *vxlan = (struct vxlan_dev *) arg;
  1815. unsigned long next_timer = jiffies + FDB_AGE_INTERVAL;
  1816. unsigned int h;
  1817. if (!netif_running(vxlan->dev))
  1818. return;
  1819. for (h = 0; h < FDB_HASH_SIZE; ++h) {
  1820. struct hlist_node *p, *n;
  1821. spin_lock_bh(&vxlan->hash_lock);
  1822. hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
  1823. struct vxlan_fdb *f
  1824. = container_of(p, struct vxlan_fdb, hlist);
  1825. unsigned long timeout;
  1826. if (f->state & NUD_PERMANENT)
  1827. continue;
  1828. timeout = f->used + vxlan->cfg.age_interval * HZ;
  1829. if (time_before_eq(timeout, jiffies)) {
  1830. netdev_dbg(vxlan->dev,
  1831. "garbage collect %pM\n",
  1832. f->eth_addr);
  1833. f->state = NUD_STALE;
  1834. vxlan_fdb_destroy(vxlan, f);
  1835. } else if (time_before(timeout, next_timer))
  1836. next_timer = timeout;
  1837. }
  1838. spin_unlock_bh(&vxlan->hash_lock);
  1839. }
  1840. mod_timer(&vxlan->age_timer, next_timer);
  1841. }
  1842. static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
  1843. {
  1844. struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
  1845. __u32 vni = vxlan->default_dst.remote_vni;
  1846. vxlan->vn_sock = vs;
  1847. spin_lock(&vn->sock_lock);
  1848. hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni));
  1849. spin_unlock(&vn->sock_lock);
  1850. }
  1851. /* Setup stats when device is created */
  1852. static int vxlan_init(struct net_device *dev)
  1853. {
  1854. dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
  1855. if (!dev->tstats)
  1856. return -ENOMEM;
  1857. return 0;
  1858. }
  1859. static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan)
  1860. {
  1861. struct vxlan_fdb *f;
  1862. spin_lock_bh(&vxlan->hash_lock);
  1863. f = __vxlan_find_mac(vxlan, all_zeros_mac);
  1864. if (f)
  1865. vxlan_fdb_destroy(vxlan, f);
  1866. spin_unlock_bh(&vxlan->hash_lock);
  1867. }
  1868. static void vxlan_uninit(struct net_device *dev)
  1869. {
  1870. struct vxlan_dev *vxlan = netdev_priv(dev);
  1871. vxlan_fdb_delete_default(vxlan);
  1872. free_percpu(dev->tstats);
  1873. }
  1874. /* Start ageing timer and join group when device is brought up */
  1875. static int vxlan_open(struct net_device *dev)
  1876. {
  1877. struct vxlan_dev *vxlan = netdev_priv(dev);
  1878. struct vxlan_sock *vs;
  1879. int ret = 0;
  1880. vs = vxlan_sock_add(vxlan->net, vxlan->cfg.dst_port,
  1881. vxlan->cfg.no_share, vxlan->flags);
  1882. if (IS_ERR(vs))
  1883. return PTR_ERR(vs);
  1884. vxlan_vs_add_dev(vs, vxlan);
  1885. if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
  1886. ret = vxlan_igmp_join(vxlan);
  1887. if (ret) {
  1888. vxlan_sock_release(vs);
  1889. return ret;
  1890. }
  1891. }
  1892. if (vxlan->cfg.age_interval)
  1893. mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL);
  1894. return ret;
  1895. }
  1896. /* Purge the forwarding table */
  1897. static void vxlan_flush(struct vxlan_dev *vxlan)
  1898. {
  1899. unsigned int h;
  1900. spin_lock_bh(&vxlan->hash_lock);
  1901. for (h = 0; h < FDB_HASH_SIZE; ++h) {
  1902. struct hlist_node *p, *n;
  1903. hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
  1904. struct vxlan_fdb *f
  1905. = container_of(p, struct vxlan_fdb, hlist);
  1906. /* the all_zeros_mac entry is deleted at vxlan_uninit */
  1907. if (!is_zero_ether_addr(f->eth_addr))
  1908. vxlan_fdb_destroy(vxlan, f);
  1909. }
  1910. }
  1911. spin_unlock_bh(&vxlan->hash_lock);
  1912. }
  1913. /* Cleanup timer and forwarding table on shutdown */
  1914. static int vxlan_stop(struct net_device *dev)
  1915. {
  1916. struct vxlan_dev *vxlan = netdev_priv(dev);
  1917. struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
  1918. struct vxlan_sock *vs = vxlan->vn_sock;
  1919. int ret = 0;
  1920. if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
  1921. !vxlan_group_used(vn, vxlan))
  1922. ret = vxlan_igmp_leave(vxlan);
  1923. del_timer_sync(&vxlan->age_timer);
  1924. vxlan_flush(vxlan);
  1925. vxlan_sock_release(vs);
  1926. return ret;
  1927. }
  1928. /* Stub, nothing needs to be done. */
  1929. static void vxlan_set_multicast_list(struct net_device *dev)
  1930. {
  1931. }
  1932. static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
  1933. {
  1934. struct vxlan_dev *vxlan = netdev_priv(dev);
  1935. struct vxlan_rdst *dst = &vxlan->default_dst;
  1936. struct net_device *lowerdev;
  1937. int max_mtu;
  1938. lowerdev = __dev_get_by_index(vxlan->net, dst->remote_ifindex);
  1939. if (lowerdev == NULL)
  1940. return eth_change_mtu(dev, new_mtu);
  1941. if (dst->remote_ip.sa.sa_family == AF_INET6)
  1942. max_mtu = lowerdev->mtu - VXLAN6_HEADROOM;
  1943. else
  1944. max_mtu = lowerdev->mtu - VXLAN_HEADROOM;
  1945. if (new_mtu < 68 || new_mtu > max_mtu)
  1946. return -EINVAL;
  1947. dev->mtu = new_mtu;
  1948. return 0;
  1949. }
  1950. static const struct net_device_ops vxlan_netdev_ops = {
  1951. .ndo_init = vxlan_init,
  1952. .ndo_uninit = vxlan_uninit,
  1953. .ndo_open = vxlan_open,
  1954. .ndo_stop = vxlan_stop,
  1955. .ndo_start_xmit = vxlan_xmit,
  1956. .ndo_get_stats64 = ip_tunnel_get_stats64,
  1957. .ndo_set_rx_mode = vxlan_set_multicast_list,
  1958. .ndo_change_mtu = vxlan_change_mtu,
  1959. .ndo_validate_addr = eth_validate_addr,
  1960. .ndo_set_mac_address = eth_mac_addr,
  1961. .ndo_fdb_add = vxlan_fdb_add,
  1962. .ndo_fdb_del = vxlan_fdb_delete,
  1963. .ndo_fdb_dump = vxlan_fdb_dump,
  1964. };
  1965. /* Info for udev, that this is a virtual tunnel endpoint */
  1966. static struct device_type vxlan_type = {
  1967. .name = "vxlan",
  1968. };
  1969. /* Calls the ndo_add_vxlan_port of the caller in order to
  1970. * supply the listening VXLAN udp ports. Callers are expected
  1971. * to implement the ndo_add_vxlan_port.
  1972. */
  1973. void vxlan_get_rx_port(struct net_device *dev)
  1974. {
  1975. struct vxlan_sock *vs;
  1976. struct net *net = dev_net(dev);
  1977. struct vxlan_net *vn = net_generic(net, vxlan_net_id);
  1978. sa_family_t sa_family;
  1979. __be16 port;
  1980. unsigned int i;
  1981. spin_lock(&vn->sock_lock);
  1982. for (i = 0; i < PORT_HASH_SIZE; ++i) {
  1983. hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) {
  1984. port = inet_sk(vs->sock->sk)->inet_sport;
  1985. sa_family = vxlan_get_sk_family(vs);
  1986. dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
  1987. port);
  1988. }
  1989. }
  1990. spin_unlock(&vn->sock_lock);
  1991. }
  1992. EXPORT_SYMBOL_GPL(vxlan_get_rx_port);
  1993. /* Initialize the device structure. */
  1994. static void vxlan_setup(struct net_device *dev)
  1995. {
  1996. struct vxlan_dev *vxlan = netdev_priv(dev);
  1997. unsigned int h;
  1998. eth_hw_addr_random(dev);
  1999. ether_setup(dev);
  2000. if (vxlan->default_dst.remote_ip.sa.sa_family == AF_INET6)
  2001. dev->needed_headroom = ETH_HLEN + VXLAN6_HEADROOM;
  2002. else
  2003. dev->needed_headroom = ETH_HLEN + VXLAN_HEADROOM;
  2004. dev->netdev_ops = &vxlan_netdev_ops;
  2005. dev->destructor = free_netdev;
  2006. SET_NETDEV_DEVTYPE(dev, &vxlan_type);
  2007. dev->features |= NETIF_F_LLTX;
  2008. dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
  2009. dev->features |= NETIF_F_RXCSUM;
  2010. dev->features |= NETIF_F_GSO_SOFTWARE;
  2011. dev->vlan_features = dev->features;
  2012. dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
  2013. dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
  2014. dev->hw_features |= NETIF_F_GSO_SOFTWARE;
  2015. dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
  2016. netif_keep_dst(dev);
  2017. dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
  2018. INIT_LIST_HEAD(&vxlan->next);
  2019. spin_lock_init(&vxlan->hash_lock);
  2020. init_timer_deferrable(&vxlan->age_timer);
  2021. vxlan->age_timer.function = vxlan_cleanup;
  2022. vxlan->age_timer.data = (unsigned long) vxlan;
  2023. vxlan->cfg.dst_port = htons(vxlan_port);
  2024. vxlan->dev = dev;
  2025. for (h = 0; h < FDB_HASH_SIZE; ++h)
  2026. INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
  2027. }
  2028. static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
  2029. [IFLA_VXLAN_ID] = { .type = NLA_U32 },
  2030. [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
  2031. [IFLA_VXLAN_GROUP6] = { .len = sizeof(struct in6_addr) },
  2032. [IFLA_VXLAN_LINK] = { .type = NLA_U32 },
  2033. [IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
  2034. [IFLA_VXLAN_LOCAL6] = { .len = sizeof(struct in6_addr) },
  2035. [IFLA_VXLAN_TOS] = { .type = NLA_U8 },
  2036. [IFLA_VXLAN_TTL] = { .type = NLA_U8 },
  2037. [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 },
  2038. [IFLA_VXLAN_AGEING] = { .type = NLA_U32 },
  2039. [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 },
  2040. [IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) },
  2041. [IFLA_VXLAN_PROXY] = { .type = NLA_U8 },
  2042. [IFLA_VXLAN_RSC] = { .type = NLA_U8 },
  2043. [IFLA_VXLAN_L2MISS] = { .type = NLA_U8 },
  2044. [IFLA_VXLAN_L3MISS] = { .type = NLA_U8 },
  2045. [IFLA_VXLAN_COLLECT_METADATA] = { .type = NLA_U8 },
  2046. [IFLA_VXLAN_PORT] = { .type = NLA_U16 },
  2047. [IFLA_VXLAN_UDP_CSUM] = { .type = NLA_U8 },
  2048. [IFLA_VXLAN_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 },
  2049. [IFLA_VXLAN_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 },
  2050. [IFLA_VXLAN_REMCSUM_TX] = { .type = NLA_U8 },
  2051. [IFLA_VXLAN_REMCSUM_RX] = { .type = NLA_U8 },
  2052. [IFLA_VXLAN_GBP] = { .type = NLA_FLAG, },
  2053. [IFLA_VXLAN_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG },
  2054. };
  2055. static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
  2056. {
  2057. if (tb[IFLA_ADDRESS]) {
  2058. if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
  2059. pr_debug("invalid link address (not ethernet)\n");
  2060. return -EINVAL;
  2061. }
  2062. if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
  2063. pr_debug("invalid all zero ethernet address\n");
  2064. return -EADDRNOTAVAIL;
  2065. }
  2066. }
  2067. if (!data)
  2068. return -EINVAL;
  2069. if (data[IFLA_VXLAN_ID]) {
  2070. __u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
  2071. if (id >= VXLAN_VID_MASK)
  2072. return -ERANGE;
  2073. }
  2074. if (data[IFLA_VXLAN_PORT_RANGE]) {
  2075. const struct ifla_vxlan_port_range *p
  2076. = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
  2077. if (ntohs(p->high) < ntohs(p->low)) {
  2078. pr_debug("port range %u .. %u not valid\n",
  2079. ntohs(p->low), ntohs(p->high));
  2080. return -EINVAL;
  2081. }
  2082. }
  2083. return 0;
  2084. }
  2085. static void vxlan_get_drvinfo(struct net_device *netdev,
  2086. struct ethtool_drvinfo *drvinfo)
  2087. {
  2088. strlcpy(drvinfo->version, VXLAN_VERSION, sizeof(drvinfo->version));
  2089. strlcpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver));
  2090. }
  2091. static const struct ethtool_ops vxlan_ethtool_ops = {
  2092. .get_drvinfo = vxlan_get_drvinfo,
  2093. .get_link = ethtool_op_get_link,
  2094. };
  2095. static void vxlan_del_work(struct work_struct *work)
  2096. {
  2097. struct vxlan_sock *vs = container_of(work, struct vxlan_sock, del_work);
  2098. udp_tunnel_sock_release(vs->sock);
  2099. kfree_rcu(vs, rcu);
  2100. }
  2101. static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
  2102. __be16 port, u32 flags)
  2103. {
  2104. struct socket *sock;
  2105. struct udp_port_cfg udp_conf;
  2106. int err;
  2107. memset(&udp_conf, 0, sizeof(udp_conf));
  2108. if (ipv6) {
  2109. udp_conf.family = AF_INET6;
  2110. udp_conf.use_udp6_rx_checksums =
  2111. !(flags & VXLAN_F_UDP_ZERO_CSUM6_RX);
  2112. } else {
  2113. udp_conf.family = AF_INET;
  2114. }
  2115. udp_conf.local_udp_port = port;
  2116. /* Open UDP socket */
  2117. err = udp_sock_create(net, &udp_conf, &sock);
  2118. if (err < 0)
  2119. return ERR_PTR(err);
  2120. return sock;
  2121. }
  2122. /* Create new listen socket if needed */
  2123. static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
  2124. u32 flags)
  2125. {
  2126. struct vxlan_net *vn = net_generic(net, vxlan_net_id);
  2127. struct vxlan_sock *vs;
  2128. struct socket *sock;
  2129. unsigned int h;
  2130. bool ipv6 = !!(flags & VXLAN_F_IPV6);
  2131. struct udp_tunnel_sock_cfg tunnel_cfg;
  2132. vs = kzalloc(sizeof(*vs), GFP_KERNEL);
  2133. if (!vs)
  2134. return ERR_PTR(-ENOMEM);
  2135. for (h = 0; h < VNI_HASH_SIZE; ++h)
  2136. INIT_HLIST_HEAD(&vs->vni_list[h]);
  2137. INIT_WORK(&vs->del_work, vxlan_del_work);
  2138. sock = vxlan_create_sock(net, ipv6, port, flags);
  2139. if (IS_ERR(sock)) {
  2140. pr_info("Cannot bind port %d, err=%ld\n", ntohs(port),
  2141. PTR_ERR(sock));
  2142. kfree(vs);
  2143. return ERR_CAST(sock);
  2144. }
  2145. vs->sock = sock;
  2146. atomic_set(&vs->refcnt, 1);
  2147. vs->flags = (flags & VXLAN_F_RCV_FLAGS);
  2148. /* Initialize the vxlan udp offloads structure */
  2149. vs->udp_offloads.port = port;
  2150. vs->udp_offloads.callbacks.gro_receive = vxlan_gro_receive;
  2151. vs->udp_offloads.callbacks.gro_complete = vxlan_gro_complete;
  2152. spin_lock(&vn->sock_lock);
  2153. hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
  2154. vxlan_notify_add_rx_port(vs);
  2155. spin_unlock(&vn->sock_lock);
  2156. /* Mark socket as an encapsulation socket. */
  2157. tunnel_cfg.sk_user_data = vs;
  2158. tunnel_cfg.encap_type = 1;
  2159. tunnel_cfg.encap_rcv = vxlan_udp_encap_recv;
  2160. tunnel_cfg.encap_destroy = NULL;
  2161. setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
  2162. return vs;
  2163. }
  2164. static struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
  2165. bool no_share, u32 flags)
  2166. {
  2167. struct vxlan_net *vn = net_generic(net, vxlan_net_id);
  2168. struct vxlan_sock *vs;
  2169. bool ipv6 = flags & VXLAN_F_IPV6;
  2170. if (!no_share) {
  2171. spin_lock(&vn->sock_lock);
  2172. vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port,
  2173. flags);
  2174. if (vs) {
  2175. if (!atomic_add_unless(&vs->refcnt, 1, 0))
  2176. vs = ERR_PTR(-EBUSY);
  2177. spin_unlock(&vn->sock_lock);
  2178. return vs;
  2179. }
  2180. spin_unlock(&vn->sock_lock);
  2181. }
  2182. return vxlan_socket_create(net, port, flags);
  2183. }
  2184. static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
  2185. struct vxlan_config *conf)
  2186. {
  2187. struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
  2188. struct vxlan_dev *vxlan = netdev_priv(dev);
  2189. struct vxlan_rdst *dst = &vxlan->default_dst;
  2190. int err;
  2191. bool use_ipv6 = false;
  2192. __be16 default_port = vxlan->cfg.dst_port;
  2193. vxlan->net = src_net;
  2194. dst->remote_vni = conf->vni;
  2195. memcpy(&dst->remote_ip, &conf->remote_ip, sizeof(conf->remote_ip));
  2196. /* Unless IPv6 is explicitly requested, assume IPv4 */
  2197. if (!dst->remote_ip.sa.sa_family)
  2198. dst->remote_ip.sa.sa_family = AF_INET;
  2199. if (dst->remote_ip.sa.sa_family == AF_INET6 ||
  2200. vxlan->cfg.saddr.sa.sa_family == AF_INET6)
  2201. use_ipv6 = true;
  2202. if (conf->remote_ifindex) {
  2203. struct net_device *lowerdev
  2204. = __dev_get_by_index(src_net, conf->remote_ifindex);
  2205. dst->remote_ifindex = conf->remote_ifindex;
  2206. if (!lowerdev) {
  2207. pr_info("ifindex %d does not exist\n", dst->remote_ifindex);
  2208. return -ENODEV;
  2209. }
  2210. #if IS_ENABLED(CONFIG_IPV6)
  2211. if (use_ipv6) {
  2212. struct inet6_dev *idev = __in6_dev_get(lowerdev);
  2213. if (idev && idev->cnf.disable_ipv6) {
  2214. pr_info("IPv6 is disabled via sysctl\n");
  2215. return -EPERM;
  2216. }
  2217. vxlan->flags |= VXLAN_F_IPV6;
  2218. }
  2219. #endif
  2220. if (!conf->mtu)
  2221. dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
  2222. dev->needed_headroom = lowerdev->hard_header_len +
  2223. (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
  2224. } else if (use_ipv6)
  2225. vxlan->flags |= VXLAN_F_IPV6;
  2226. memcpy(&vxlan->cfg, conf, sizeof(*conf));
  2227. if (!vxlan->cfg.dst_port)
  2228. vxlan->cfg.dst_port = default_port;
  2229. vxlan->flags |= conf->flags;
  2230. if (!vxlan->cfg.age_interval)
  2231. vxlan->cfg.age_interval = FDB_AGE_DEFAULT;
  2232. if (vxlan_find_vni(src_net, conf->vni, use_ipv6 ? AF_INET6 : AF_INET,
  2233. vxlan->cfg.dst_port, vxlan->flags))
  2234. return -EEXIST;
  2235. dev->ethtool_ops = &vxlan_ethtool_ops;
  2236. /* create an fdb entry for a valid default destination */
  2237. if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
  2238. err = vxlan_fdb_create(vxlan, all_zeros_mac,
  2239. &vxlan->default_dst.remote_ip,
  2240. NUD_REACHABLE|NUD_PERMANENT,
  2241. NLM_F_EXCL|NLM_F_CREATE,
  2242. vxlan->cfg.dst_port,
  2243. vxlan->default_dst.remote_vni,
  2244. vxlan->default_dst.remote_ifindex,
  2245. NTF_SELF);
  2246. if (err)
  2247. return err;
  2248. }
  2249. err = register_netdevice(dev);
  2250. if (err) {
  2251. vxlan_fdb_delete_default(vxlan);
  2252. return err;
  2253. }
  2254. list_add(&vxlan->next, &vn->vxlan_list);
  2255. return 0;
  2256. }
  2257. struct net_device *vxlan_dev_create(struct net *net, const char *name,
  2258. u8 name_assign_type, struct vxlan_config *conf)
  2259. {
  2260. struct nlattr *tb[IFLA_MAX+1];
  2261. struct net_device *dev;
  2262. int err;
  2263. memset(&tb, 0, sizeof(tb));
  2264. dev = rtnl_create_link(net, name, name_assign_type,
  2265. &vxlan_link_ops, tb);
  2266. if (IS_ERR(dev))
  2267. return dev;
  2268. err = vxlan_dev_configure(net, dev, conf);
  2269. if (err < 0) {
  2270. free_netdev(dev);
  2271. return ERR_PTR(err);
  2272. }
  2273. return dev;
  2274. }
  2275. EXPORT_SYMBOL_GPL(vxlan_dev_create);
  2276. static int vxlan_newlink(struct net *src_net, struct net_device *dev,
  2277. struct nlattr *tb[], struct nlattr *data[])
  2278. {
  2279. struct vxlan_config conf;
  2280. int err;
  2281. if (!data[IFLA_VXLAN_ID])
  2282. return -EINVAL;
  2283. memset(&conf, 0, sizeof(conf));
  2284. conf.vni = nla_get_u32(data[IFLA_VXLAN_ID]);
  2285. if (data[IFLA_VXLAN_GROUP]) {
  2286. conf.remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]);
  2287. } else if (data[IFLA_VXLAN_GROUP6]) {
  2288. if (!IS_ENABLED(CONFIG_IPV6))
  2289. return -EPFNOSUPPORT;
  2290. conf.remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]);
  2291. conf.remote_ip.sa.sa_family = AF_INET6;
  2292. }
  2293. if (data[IFLA_VXLAN_LOCAL]) {
  2294. conf.saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]);
  2295. conf.saddr.sa.sa_family = AF_INET;
  2296. } else if (data[IFLA_VXLAN_LOCAL6]) {
  2297. if (!IS_ENABLED(CONFIG_IPV6))
  2298. return -EPFNOSUPPORT;
  2299. /* TODO: respect scope id */
  2300. conf.saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]);
  2301. conf.saddr.sa.sa_family = AF_INET6;
  2302. }
  2303. if (data[IFLA_VXLAN_LINK])
  2304. conf.remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]);
  2305. if (data[IFLA_VXLAN_TOS])
  2306. conf.tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
  2307. if (data[IFLA_VXLAN_TTL])
  2308. conf.ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
  2309. if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING]))
  2310. conf.flags |= VXLAN_F_LEARN;
  2311. if (data[IFLA_VXLAN_AGEING])
  2312. conf.age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
  2313. if (data[IFLA_VXLAN_PROXY] && nla_get_u8(data[IFLA_VXLAN_PROXY]))
  2314. conf.flags |= VXLAN_F_PROXY;
  2315. if (data[IFLA_VXLAN_RSC] && nla_get_u8(data[IFLA_VXLAN_RSC]))
  2316. conf.flags |= VXLAN_F_RSC;
  2317. if (data[IFLA_VXLAN_L2MISS] && nla_get_u8(data[IFLA_VXLAN_L2MISS]))
  2318. conf.flags |= VXLAN_F_L2MISS;
  2319. if (data[IFLA_VXLAN_L3MISS] && nla_get_u8(data[IFLA_VXLAN_L3MISS]))
  2320. conf.flags |= VXLAN_F_L3MISS;
  2321. if (data[IFLA_VXLAN_LIMIT])
  2322. conf.addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
  2323. if (data[IFLA_VXLAN_COLLECT_METADATA] &&
  2324. nla_get_u8(data[IFLA_VXLAN_COLLECT_METADATA]))
  2325. conf.flags |= VXLAN_F_COLLECT_METADATA;
  2326. if (data[IFLA_VXLAN_PORT_RANGE]) {
  2327. const struct ifla_vxlan_port_range *p
  2328. = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
  2329. conf.port_min = ntohs(p->low);
  2330. conf.port_max = ntohs(p->high);
  2331. }
  2332. if (data[IFLA_VXLAN_PORT])
  2333. conf.dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
  2334. if (data[IFLA_VXLAN_UDP_CSUM] && nla_get_u8(data[IFLA_VXLAN_UDP_CSUM]))
  2335. conf.flags |= VXLAN_F_UDP_CSUM;
  2336. if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX] &&
  2337. nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]))
  2338. conf.flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
  2339. if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX] &&
  2340. nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
  2341. conf.flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
  2342. if (data[IFLA_VXLAN_REMCSUM_TX] &&
  2343. nla_get_u8(data[IFLA_VXLAN_REMCSUM_TX]))
  2344. conf.flags |= VXLAN_F_REMCSUM_TX;
  2345. if (data[IFLA_VXLAN_REMCSUM_RX] &&
  2346. nla_get_u8(data[IFLA_VXLAN_REMCSUM_RX]))
  2347. conf.flags |= VXLAN_F_REMCSUM_RX;
  2348. if (data[IFLA_VXLAN_GBP])
  2349. conf.flags |= VXLAN_F_GBP;
  2350. if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL])
  2351. conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL;
  2352. err = vxlan_dev_configure(src_net, dev, &conf);
  2353. switch (err) {
  2354. case -ENODEV:
  2355. pr_info("ifindex %d does not exist\n", conf.remote_ifindex);
  2356. break;
  2357. case -EPERM:
  2358. pr_info("IPv6 is disabled via sysctl\n");
  2359. break;
  2360. case -EEXIST:
  2361. pr_info("duplicate VNI %u\n", conf.vni);
  2362. break;
  2363. }
  2364. return err;
  2365. }
  2366. static void vxlan_dellink(struct net_device *dev, struct list_head *head)
  2367. {
  2368. struct vxlan_dev *vxlan = netdev_priv(dev);
  2369. struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
  2370. spin_lock(&vn->sock_lock);
  2371. if (!hlist_unhashed(&vxlan->hlist))
  2372. hlist_del_rcu(&vxlan->hlist);
  2373. spin_unlock(&vn->sock_lock);
  2374. list_del(&vxlan->next);
  2375. unregister_netdevice_queue(dev, head);
  2376. }
  2377. static size_t vxlan_get_size(const struct net_device *dev)
  2378. {
  2379. return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */
  2380. nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_GROUP{6} */
  2381. nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
  2382. nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */
  2383. nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
  2384. nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
  2385. nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
  2386. nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */
  2387. nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */
  2388. nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L2MISS */
  2389. nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L3MISS */
  2390. nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_COLLECT_METADATA */
  2391. nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
  2392. nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
  2393. nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
  2394. nla_total_size(sizeof(__be16)) + /* IFLA_VXLAN_PORT */
  2395. nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_CSUM */
  2396. nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_TX */
  2397. nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_RX */
  2398. nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_TX */
  2399. nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_RX */
  2400. 0;
  2401. }
  2402. static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
  2403. {
  2404. const struct vxlan_dev *vxlan = netdev_priv(dev);
  2405. const struct vxlan_rdst *dst = &vxlan->default_dst;
  2406. struct ifla_vxlan_port_range ports = {
  2407. .low = htons(vxlan->cfg.port_min),
  2408. .high = htons(vxlan->cfg.port_max),
  2409. };
  2410. if (nla_put_u32(skb, IFLA_VXLAN_ID, dst->remote_vni))
  2411. goto nla_put_failure;
  2412. if (!vxlan_addr_any(&dst->remote_ip)) {
  2413. if (dst->remote_ip.sa.sa_family == AF_INET) {
  2414. if (nla_put_in_addr(skb, IFLA_VXLAN_GROUP,
  2415. dst->remote_ip.sin.sin_addr.s_addr))
  2416. goto nla_put_failure;
  2417. #if IS_ENABLED(CONFIG_IPV6)
  2418. } else {
  2419. if (nla_put_in6_addr(skb, IFLA_VXLAN_GROUP6,
  2420. &dst->remote_ip.sin6.sin6_addr))
  2421. goto nla_put_failure;
  2422. #endif
  2423. }
  2424. }
  2425. if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex))
  2426. goto nla_put_failure;
  2427. if (!vxlan_addr_any(&vxlan->cfg.saddr)) {
  2428. if (vxlan->cfg.saddr.sa.sa_family == AF_INET) {
  2429. if (nla_put_in_addr(skb, IFLA_VXLAN_LOCAL,
  2430. vxlan->cfg.saddr.sin.sin_addr.s_addr))
  2431. goto nla_put_failure;
  2432. #if IS_ENABLED(CONFIG_IPV6)
  2433. } else {
  2434. if (nla_put_in6_addr(skb, IFLA_VXLAN_LOCAL6,
  2435. &vxlan->cfg.saddr.sin6.sin6_addr))
  2436. goto nla_put_failure;
  2437. #endif
  2438. }
  2439. }
  2440. if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) ||
  2441. nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) ||
  2442. nla_put_u8(skb, IFLA_VXLAN_LEARNING,
  2443. !!(vxlan->flags & VXLAN_F_LEARN)) ||
  2444. nla_put_u8(skb, IFLA_VXLAN_PROXY,
  2445. !!(vxlan->flags & VXLAN_F_PROXY)) ||
  2446. nla_put_u8(skb, IFLA_VXLAN_RSC, !!(vxlan->flags & VXLAN_F_RSC)) ||
  2447. nla_put_u8(skb, IFLA_VXLAN_L2MISS,
  2448. !!(vxlan->flags & VXLAN_F_L2MISS)) ||
  2449. nla_put_u8(skb, IFLA_VXLAN_L3MISS,
  2450. !!(vxlan->flags & VXLAN_F_L3MISS)) ||
  2451. nla_put_u8(skb, IFLA_VXLAN_COLLECT_METADATA,
  2452. !!(vxlan->flags & VXLAN_F_COLLECT_METADATA)) ||
  2453. nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->cfg.age_interval) ||
  2454. nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->cfg.addrmax) ||
  2455. nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port) ||
  2456. nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM,
  2457. !!(vxlan->flags & VXLAN_F_UDP_CSUM)) ||
  2458. nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
  2459. !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) ||
  2460. nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
  2461. !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) ||
  2462. nla_put_u8(skb, IFLA_VXLAN_REMCSUM_TX,
  2463. !!(vxlan->flags & VXLAN_F_REMCSUM_TX)) ||
  2464. nla_put_u8(skb, IFLA_VXLAN_REMCSUM_RX,
  2465. !!(vxlan->flags & VXLAN_F_REMCSUM_RX)))
  2466. goto nla_put_failure;
  2467. if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
  2468. goto nla_put_failure;
  2469. if (vxlan->flags & VXLAN_F_GBP &&
  2470. nla_put_flag(skb, IFLA_VXLAN_GBP))
  2471. goto nla_put_failure;
  2472. if (vxlan->flags & VXLAN_F_REMCSUM_NOPARTIAL &&
  2473. nla_put_flag(skb, IFLA_VXLAN_REMCSUM_NOPARTIAL))
  2474. goto nla_put_failure;
  2475. return 0;
  2476. nla_put_failure:
  2477. return -EMSGSIZE;
  2478. }
  2479. static struct net *vxlan_get_link_net(const struct net_device *dev)
  2480. {
  2481. struct vxlan_dev *vxlan = netdev_priv(dev);
  2482. return vxlan->net;
  2483. }
  2484. static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
  2485. .kind = "vxlan",
  2486. .maxtype = IFLA_VXLAN_MAX,
  2487. .policy = vxlan_policy,
  2488. .priv_size = sizeof(struct vxlan_dev),
  2489. .setup = vxlan_setup,
  2490. .validate = vxlan_validate,
  2491. .newlink = vxlan_newlink,
  2492. .dellink = vxlan_dellink,
  2493. .get_size = vxlan_get_size,
  2494. .fill_info = vxlan_fill_info,
  2495. .get_link_net = vxlan_get_link_net,
  2496. };
  2497. static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn,
  2498. struct net_device *dev)
  2499. {
  2500. struct vxlan_dev *vxlan, *next;
  2501. LIST_HEAD(list_kill);
  2502. list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
  2503. struct vxlan_rdst *dst = &vxlan->default_dst;
  2504. /* In case we created vxlan device with carrier
  2505. * and we loose the carrier due to module unload
  2506. * we also need to remove vxlan device. In other
  2507. * cases, it's not necessary and remote_ifindex
  2508. * is 0 here, so no matches.
  2509. */
  2510. if (dst->remote_ifindex == dev->ifindex)
  2511. vxlan_dellink(vxlan->dev, &list_kill);
  2512. }
  2513. unregister_netdevice_many(&list_kill);
  2514. }
  2515. static int vxlan_lowerdev_event(struct notifier_block *unused,
  2516. unsigned long event, void *ptr)
  2517. {
  2518. struct net_device *dev = netdev_notifier_info_to_dev(ptr);
  2519. struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
  2520. if (event == NETDEV_UNREGISTER)
  2521. vxlan_handle_lowerdev_unregister(vn, dev);
  2522. return NOTIFY_DONE;
  2523. }
  2524. static struct notifier_block vxlan_notifier_block __read_mostly = {
  2525. .notifier_call = vxlan_lowerdev_event,
  2526. };
  2527. static __net_init int vxlan_init_net(struct net *net)
  2528. {
  2529. struct vxlan_net *vn = net_generic(net, vxlan_net_id);
  2530. unsigned int h;
  2531. INIT_LIST_HEAD(&vn->vxlan_list);
  2532. spin_lock_init(&vn->sock_lock);
  2533. for (h = 0; h < PORT_HASH_SIZE; ++h)
  2534. INIT_HLIST_HEAD(&vn->sock_list[h]);
  2535. return 0;
  2536. }
  2537. static void __net_exit vxlan_exit_net(struct net *net)
  2538. {
  2539. struct vxlan_net *vn = net_generic(net, vxlan_net_id);
  2540. struct vxlan_dev *vxlan, *next;
  2541. struct net_device *dev, *aux;
  2542. LIST_HEAD(list);
  2543. rtnl_lock();
  2544. for_each_netdev_safe(net, dev, aux)
  2545. if (dev->rtnl_link_ops == &vxlan_link_ops)
  2546. unregister_netdevice_queue(dev, &list);
  2547. list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
  2548. /* If vxlan->dev is in the same netns, it has already been added
  2549. * to the list by the previous loop.
  2550. */
  2551. if (!net_eq(dev_net(vxlan->dev), net))
  2552. unregister_netdevice_queue(vxlan->dev, &list);
  2553. }
  2554. unregister_netdevice_many(&list);
  2555. rtnl_unlock();
  2556. }
  2557. static struct pernet_operations vxlan_net_ops = {
  2558. .init = vxlan_init_net,
  2559. .exit = vxlan_exit_net,
  2560. .id = &vxlan_net_id,
  2561. .size = sizeof(struct vxlan_net),
  2562. };
  2563. static int __init vxlan_init_module(void)
  2564. {
  2565. int rc;
  2566. vxlan_wq = alloc_workqueue("vxlan", 0, 0);
  2567. if (!vxlan_wq)
  2568. return -ENOMEM;
  2569. get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
  2570. rc = register_pernet_subsys(&vxlan_net_ops);
  2571. if (rc)
  2572. goto out1;
  2573. rc = register_netdevice_notifier(&vxlan_notifier_block);
  2574. if (rc)
  2575. goto out2;
  2576. rc = rtnl_link_register(&vxlan_link_ops);
  2577. if (rc)
  2578. goto out3;
  2579. return 0;
  2580. out3:
  2581. unregister_netdevice_notifier(&vxlan_notifier_block);
  2582. out2:
  2583. unregister_pernet_subsys(&vxlan_net_ops);
  2584. out1:
  2585. destroy_workqueue(vxlan_wq);
  2586. return rc;
  2587. }
  2588. late_initcall(vxlan_init_module);
  2589. static void __exit vxlan_cleanup_module(void)
  2590. {
  2591. rtnl_link_unregister(&vxlan_link_ops);
  2592. unregister_netdevice_notifier(&vxlan_notifier_block);
  2593. destroy_workqueue(vxlan_wq);
  2594. unregister_pernet_subsys(&vxlan_net_ops);
  2595. /* rcu_barrier() is called by netns */
  2596. }
  2597. module_exit(vxlan_cleanup_module);
  2598. MODULE_LICENSE("GPL");
  2599. MODULE_VERSION(VXLAN_VERSION);
  2600. MODULE_AUTHOR("Stephen Hemminger <stephen@networkplumber.org>");
  2601. MODULE_DESCRIPTION("Driver for VXLAN encapsulated traffic");
  2602. MODULE_ALIAS_RTNL_LINK("vxlan");