vrf.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351
  1. /*
  2. * vrf.c: device driver to encapsulate a VRF space
  3. *
  4. * Copyright (c) 2015 Cumulus Networks. All rights reserved.
  5. * Copyright (c) 2015 Shrijeet Mukherjee <shm@cumulusnetworks.com>
  6. * Copyright (c) 2015 David Ahern <dsa@cumulusnetworks.com>
  7. *
  8. * Based on dummy, team and ipvlan drivers
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2 of the License, or
  13. * (at your option) any later version.
  14. */
  15. #include <linux/module.h>
  16. #include <linux/kernel.h>
  17. #include <linux/netdevice.h>
  18. #include <linux/etherdevice.h>
  19. #include <linux/ip.h>
  20. #include <linux/init.h>
  21. #include <linux/moduleparam.h>
  22. #include <linux/netfilter.h>
  23. #include <linux/rtnetlink.h>
  24. #include <net/rtnetlink.h>
  25. #include <linux/u64_stats_sync.h>
  26. #include <linux/hashtable.h>
  27. #include <linux/inetdevice.h>
  28. #include <net/arp.h>
  29. #include <net/ip.h>
  30. #include <net/ip_fib.h>
  31. #include <net/ip6_fib.h>
  32. #include <net/ip6_route.h>
  33. #include <net/route.h>
  34. #include <net/addrconf.h>
  35. #include <net/l3mdev.h>
  36. #include <net/fib_rules.h>
  37. #define DRV_NAME "vrf"
  38. #define DRV_VERSION "1.0"
  39. #define FIB_RULE_PREF 1000 /* default preference for FIB rules */
  40. static bool add_fib_rules = true;
  41. struct net_vrf {
  42. struct rtable __rcu *rth;
  43. struct rtable __rcu *rth_local;
  44. struct rt6_info __rcu *rt6;
  45. struct rt6_info __rcu *rt6_local;
  46. u32 tb_id;
  47. };
  48. struct pcpu_dstats {
  49. u64 tx_pkts;
  50. u64 tx_bytes;
  51. u64 tx_drps;
  52. u64 rx_pkts;
  53. u64 rx_bytes;
  54. u64 rx_drps;
  55. struct u64_stats_sync syncp;
  56. };
  57. static void vrf_rx_stats(struct net_device *dev, int len)
  58. {
  59. struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
  60. u64_stats_update_begin(&dstats->syncp);
  61. dstats->rx_pkts++;
  62. dstats->rx_bytes += len;
  63. u64_stats_update_end(&dstats->syncp);
  64. }
  65. static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb)
  66. {
  67. vrf_dev->stats.tx_errors++;
  68. kfree_skb(skb);
  69. }
  70. static struct rtnl_link_stats64 *vrf_get_stats64(struct net_device *dev,
  71. struct rtnl_link_stats64 *stats)
  72. {
  73. int i;
  74. for_each_possible_cpu(i) {
  75. const struct pcpu_dstats *dstats;
  76. u64 tbytes, tpkts, tdrops, rbytes, rpkts;
  77. unsigned int start;
  78. dstats = per_cpu_ptr(dev->dstats, i);
  79. do {
  80. start = u64_stats_fetch_begin_irq(&dstats->syncp);
  81. tbytes = dstats->tx_bytes;
  82. tpkts = dstats->tx_pkts;
  83. tdrops = dstats->tx_drps;
  84. rbytes = dstats->rx_bytes;
  85. rpkts = dstats->rx_pkts;
  86. } while (u64_stats_fetch_retry_irq(&dstats->syncp, start));
  87. stats->tx_bytes += tbytes;
  88. stats->tx_packets += tpkts;
  89. stats->tx_dropped += tdrops;
  90. stats->rx_bytes += rbytes;
  91. stats->rx_packets += rpkts;
  92. }
  93. return stats;
  94. }
  95. /* Local traffic destined to local address. Reinsert the packet to rx
  96. * path, similar to loopback handling.
  97. */
  98. static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev,
  99. struct dst_entry *dst)
  100. {
  101. int len = skb->len;
  102. skb_orphan(skb);
  103. skb_dst_set(skb, dst);
  104. skb_dst_force(skb);
  105. /* set pkt_type to avoid skb hitting packet taps twice -
  106. * once on Tx and again in Rx processing
  107. */
  108. skb->pkt_type = PACKET_LOOPBACK;
  109. skb->protocol = eth_type_trans(skb, dev);
  110. if (likely(netif_rx(skb) == NET_RX_SUCCESS))
  111. vrf_rx_stats(dev, len);
  112. else
  113. this_cpu_inc(dev->dstats->rx_drps);
  114. return NETDEV_TX_OK;
  115. }
  116. #if IS_ENABLED(CONFIG_IPV6)
  117. static int vrf_ip6_local_out(struct net *net, struct sock *sk,
  118. struct sk_buff *skb)
  119. {
  120. int err;
  121. err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net,
  122. sk, skb, NULL, skb_dst(skb)->dev, dst_output);
  123. if (likely(err == 1))
  124. err = dst_output(net, sk, skb);
  125. return err;
  126. }
  127. static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
  128. struct net_device *dev)
  129. {
  130. const struct ipv6hdr *iph = ipv6_hdr(skb);
  131. struct net *net = dev_net(skb->dev);
  132. struct flowi6 fl6 = {
  133. /* needed to match OIF rule */
  134. .flowi6_oif = dev->ifindex,
  135. .flowi6_iif = LOOPBACK_IFINDEX,
  136. .daddr = iph->daddr,
  137. .saddr = iph->saddr,
  138. .flowlabel = ip6_flowinfo(iph),
  139. .flowi6_mark = skb->mark,
  140. .flowi6_proto = iph->nexthdr,
  141. .flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF,
  142. };
  143. int ret = NET_XMIT_DROP;
  144. struct dst_entry *dst;
  145. struct dst_entry *dst_null = &net->ipv6.ip6_null_entry->dst;
  146. dst = ip6_route_output(net, NULL, &fl6);
  147. if (dst == dst_null)
  148. goto err;
  149. skb_dst_drop(skb);
  150. /* if dst.dev is loopback or the VRF device again this is locally
  151. * originated traffic destined to a local address. Short circuit
  152. * to Rx path using our local dst
  153. */
  154. if (dst->dev == net->loopback_dev || dst->dev == dev) {
  155. struct net_vrf *vrf = netdev_priv(dev);
  156. struct rt6_info *rt6_local;
  157. /* release looked up dst and use cached local dst */
  158. dst_release(dst);
  159. rcu_read_lock();
  160. rt6_local = rcu_dereference(vrf->rt6_local);
  161. if (unlikely(!rt6_local)) {
  162. rcu_read_unlock();
  163. goto err;
  164. }
  165. /* Ordering issue: cached local dst is created on newlink
  166. * before the IPv6 initialization. Using the local dst
  167. * requires rt6i_idev to be set so make sure it is.
  168. */
  169. if (unlikely(!rt6_local->rt6i_idev)) {
  170. rt6_local->rt6i_idev = in6_dev_get(dev);
  171. if (!rt6_local->rt6i_idev) {
  172. rcu_read_unlock();
  173. goto err;
  174. }
  175. }
  176. dst = &rt6_local->dst;
  177. dst_hold(dst);
  178. rcu_read_unlock();
  179. return vrf_local_xmit(skb, dev, &rt6_local->dst);
  180. }
  181. skb_dst_set(skb, dst);
  182. /* strip the ethernet header added for pass through VRF device */
  183. __skb_pull(skb, skb_network_offset(skb));
  184. ret = vrf_ip6_local_out(net, skb->sk, skb);
  185. if (unlikely(net_xmit_eval(ret)))
  186. dev->stats.tx_errors++;
  187. else
  188. ret = NET_XMIT_SUCCESS;
  189. return ret;
  190. err:
  191. vrf_tx_error(dev, skb);
  192. return NET_XMIT_DROP;
  193. }
  194. #else
  195. static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
  196. struct net_device *dev)
  197. {
  198. vrf_tx_error(dev, skb);
  199. return NET_XMIT_DROP;
  200. }
  201. #endif
  202. /* based on ip_local_out; can't use it b/c the dst is switched pointing to us */
  203. static int vrf_ip_local_out(struct net *net, struct sock *sk,
  204. struct sk_buff *skb)
  205. {
  206. int err;
  207. err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk,
  208. skb, NULL, skb_dst(skb)->dev, dst_output);
  209. if (likely(err == 1))
  210. err = dst_output(net, sk, skb);
  211. return err;
  212. }
  213. static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
  214. struct net_device *vrf_dev)
  215. {
  216. struct iphdr *ip4h = ip_hdr(skb);
  217. int ret = NET_XMIT_DROP;
  218. struct flowi4 fl4 = {
  219. /* needed to match OIF rule */
  220. .flowi4_oif = vrf_dev->ifindex,
  221. .flowi4_iif = LOOPBACK_IFINDEX,
  222. .flowi4_tos = RT_TOS(ip4h->tos),
  223. .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF,
  224. .daddr = ip4h->daddr,
  225. };
  226. struct net *net = dev_net(vrf_dev);
  227. struct rtable *rt;
  228. rt = ip_route_output_flow(net, &fl4, NULL);
  229. if (IS_ERR(rt))
  230. goto err;
  231. if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
  232. ip_rt_put(rt);
  233. goto err;
  234. }
  235. skb_dst_drop(skb);
  236. /* if dst.dev is loopback or the VRF device again this is locally
  237. * originated traffic destined to a local address. Short circuit
  238. * to Rx path using our local dst
  239. */
  240. if (rt->dst.dev == net->loopback_dev || rt->dst.dev == vrf_dev) {
  241. struct net_vrf *vrf = netdev_priv(vrf_dev);
  242. struct rtable *rth_local;
  243. struct dst_entry *dst = NULL;
  244. ip_rt_put(rt);
  245. rcu_read_lock();
  246. rth_local = rcu_dereference(vrf->rth_local);
  247. if (likely(rth_local)) {
  248. dst = &rth_local->dst;
  249. dst_hold(dst);
  250. }
  251. rcu_read_unlock();
  252. if (unlikely(!dst))
  253. goto err;
  254. return vrf_local_xmit(skb, vrf_dev, dst);
  255. }
  256. skb_dst_set(skb, &rt->dst);
  257. /* strip the ethernet header added for pass through VRF device */
  258. __skb_pull(skb, skb_network_offset(skb));
  259. if (!ip4h->saddr) {
  260. ip4h->saddr = inet_select_addr(skb_dst(skb)->dev, 0,
  261. RT_SCOPE_LINK);
  262. }
  263. ret = vrf_ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
  264. if (unlikely(net_xmit_eval(ret)))
  265. vrf_dev->stats.tx_errors++;
  266. else
  267. ret = NET_XMIT_SUCCESS;
  268. out:
  269. return ret;
  270. err:
  271. vrf_tx_error(vrf_dev, skb);
  272. goto out;
  273. }
  274. static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
  275. {
  276. switch (skb->protocol) {
  277. case htons(ETH_P_IP):
  278. return vrf_process_v4_outbound(skb, dev);
  279. case htons(ETH_P_IPV6):
  280. return vrf_process_v6_outbound(skb, dev);
  281. default:
  282. vrf_tx_error(dev, skb);
  283. return NET_XMIT_DROP;
  284. }
  285. }
  286. static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
  287. {
  288. netdev_tx_t ret = is_ip_tx_frame(skb, dev);
  289. if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
  290. struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
  291. u64_stats_update_begin(&dstats->syncp);
  292. dstats->tx_pkts++;
  293. dstats->tx_bytes += skb->len;
  294. u64_stats_update_end(&dstats->syncp);
  295. } else {
  296. this_cpu_inc(dev->dstats->tx_drps);
  297. }
  298. return ret;
  299. }
  300. #if IS_ENABLED(CONFIG_IPV6)
  301. /* modelled after ip6_finish_output2 */
  302. static int vrf_finish_output6(struct net *net, struct sock *sk,
  303. struct sk_buff *skb)
  304. {
  305. struct dst_entry *dst = skb_dst(skb);
  306. struct net_device *dev = dst->dev;
  307. struct neighbour *neigh;
  308. struct in6_addr *nexthop;
  309. int ret;
  310. skb->protocol = htons(ETH_P_IPV6);
  311. skb->dev = dev;
  312. rcu_read_lock_bh();
  313. nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
  314. neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
  315. if (unlikely(!neigh))
  316. neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
  317. if (!IS_ERR(neigh)) {
  318. ret = dst_neigh_output(dst, neigh, skb);
  319. rcu_read_unlock_bh();
  320. return ret;
  321. }
  322. rcu_read_unlock_bh();
  323. IP6_INC_STATS(dev_net(dst->dev),
  324. ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
  325. kfree_skb(skb);
  326. return -EINVAL;
  327. }
  328. /* modelled after ip6_output */
  329. static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb)
  330. {
  331. return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
  332. net, sk, skb, NULL, skb_dst(skb)->dev,
  333. vrf_finish_output6,
  334. !(IP6CB(skb)->flags & IP6SKB_REROUTED));
  335. }
  336. /* set dst on skb to send packet to us via dev_xmit path. Allows
  337. * packet to go through device based features such as qdisc, netfilter
  338. * hooks and packet sockets with skb->dev set to vrf device.
  339. */
  340. static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
  341. struct sock *sk,
  342. struct sk_buff *skb)
  343. {
  344. struct net_vrf *vrf = netdev_priv(vrf_dev);
  345. struct dst_entry *dst = NULL;
  346. struct rt6_info *rt6;
  347. /* don't divert link scope packets */
  348. if (rt6_need_strict(&ipv6_hdr(skb)->daddr))
  349. return skb;
  350. rcu_read_lock();
  351. rt6 = rcu_dereference(vrf->rt6);
  352. if (likely(rt6)) {
  353. dst = &rt6->dst;
  354. dst_hold(dst);
  355. }
  356. rcu_read_unlock();
  357. if (unlikely(!dst)) {
  358. vrf_tx_error(vrf_dev, skb);
  359. return NULL;
  360. }
  361. skb_dst_drop(skb);
  362. skb_dst_set(skb, dst);
  363. return skb;
  364. }
  365. /* holding rtnl */
  366. static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
  367. {
  368. struct rt6_info *rt6 = rtnl_dereference(vrf->rt6);
  369. struct rt6_info *rt6_local = rtnl_dereference(vrf->rt6_local);
  370. struct net *net = dev_net(dev);
  371. struct dst_entry *dst;
  372. RCU_INIT_POINTER(vrf->rt6, NULL);
  373. RCU_INIT_POINTER(vrf->rt6_local, NULL);
  374. synchronize_rcu();
  375. /* move dev in dst's to loopback so this VRF device can be deleted
  376. * - based on dst_ifdown
  377. */
  378. if (rt6) {
  379. dst = &rt6->dst;
  380. dev_put(dst->dev);
  381. dst->dev = net->loopback_dev;
  382. dev_hold(dst->dev);
  383. dst_release(dst);
  384. }
  385. if (rt6_local) {
  386. if (rt6_local->rt6i_idev)
  387. in6_dev_put(rt6_local->rt6i_idev);
  388. dst = &rt6_local->dst;
  389. dev_put(dst->dev);
  390. dst->dev = net->loopback_dev;
  391. dev_hold(dst->dev);
  392. dst_release(dst);
  393. }
  394. }
  395. static int vrf_rt6_create(struct net_device *dev)
  396. {
  397. int flags = DST_HOST | DST_NOPOLICY | DST_NOXFRM | DST_NOCACHE;
  398. struct net_vrf *vrf = netdev_priv(dev);
  399. struct net *net = dev_net(dev);
  400. struct fib6_table *rt6i_table;
  401. struct rt6_info *rt6, *rt6_local;
  402. int rc = -ENOMEM;
  403. /* IPv6 can be CONFIG enabled and then disabled runtime */
  404. if (!ipv6_mod_enabled())
  405. return 0;
  406. rt6i_table = fib6_new_table(net, vrf->tb_id);
  407. if (!rt6i_table)
  408. goto out;
  409. /* create a dst for routing packets out a VRF device */
  410. rt6 = ip6_dst_alloc(net, dev, flags);
  411. if (!rt6)
  412. goto out;
  413. dst_hold(&rt6->dst);
  414. rt6->rt6i_table = rt6i_table;
  415. rt6->dst.output = vrf_output6;
  416. /* create a dst for local routing - packets sent locally
  417. * to local address via the VRF device as a loopback
  418. */
  419. rt6_local = ip6_dst_alloc(net, dev, flags);
  420. if (!rt6_local) {
  421. dst_release(&rt6->dst);
  422. goto out;
  423. }
  424. dst_hold(&rt6_local->dst);
  425. rt6_local->rt6i_idev = in6_dev_get(dev);
  426. rt6_local->rt6i_flags = RTF_UP | RTF_NONEXTHOP | RTF_LOCAL;
  427. rt6_local->rt6i_table = rt6i_table;
  428. rt6_local->dst.input = ip6_input;
  429. rcu_assign_pointer(vrf->rt6, rt6);
  430. rcu_assign_pointer(vrf->rt6_local, rt6_local);
  431. rc = 0;
  432. out:
  433. return rc;
  434. }
  435. #else
  436. static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
  437. struct sock *sk,
  438. struct sk_buff *skb)
  439. {
  440. return skb;
  441. }
  442. static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
  443. {
  444. }
  445. static int vrf_rt6_create(struct net_device *dev)
  446. {
  447. return 0;
  448. }
  449. #endif
  450. /* modelled after ip_finish_output2 */
  451. static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
  452. {
  453. struct dst_entry *dst = skb_dst(skb);
  454. struct rtable *rt = (struct rtable *)dst;
  455. struct net_device *dev = dst->dev;
  456. unsigned int hh_len = LL_RESERVED_SPACE(dev);
  457. struct neighbour *neigh;
  458. u32 nexthop;
  459. int ret = -EINVAL;
  460. /* Be paranoid, rather than too clever. */
  461. if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
  462. struct sk_buff *skb2;
  463. skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
  464. if (!skb2) {
  465. ret = -ENOMEM;
  466. goto err;
  467. }
  468. if (skb->sk)
  469. skb_set_owner_w(skb2, skb->sk);
  470. consume_skb(skb);
  471. skb = skb2;
  472. }
  473. rcu_read_lock_bh();
  474. nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr);
  475. neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
  476. if (unlikely(!neigh))
  477. neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
  478. if (!IS_ERR(neigh))
  479. ret = dst_neigh_output(dst, neigh, skb);
  480. rcu_read_unlock_bh();
  481. err:
  482. if (unlikely(ret < 0))
  483. vrf_tx_error(skb->dev, skb);
  484. return ret;
  485. }
  486. static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
  487. {
  488. struct net_device *dev = skb_dst(skb)->dev;
  489. IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
  490. skb->dev = dev;
  491. skb->protocol = htons(ETH_P_IP);
  492. return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
  493. net, sk, skb, NULL, dev,
  494. vrf_finish_output,
  495. !(IPCB(skb)->flags & IPSKB_REROUTED));
  496. }
  497. /* set dst on skb to send packet to us via dev_xmit path. Allows
  498. * packet to go through device based features such as qdisc, netfilter
  499. * hooks and packet sockets with skb->dev set to vrf device.
  500. */
  501. static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
  502. struct sock *sk,
  503. struct sk_buff *skb)
  504. {
  505. struct net_vrf *vrf = netdev_priv(vrf_dev);
  506. struct dst_entry *dst = NULL;
  507. struct rtable *rth;
  508. rcu_read_lock();
  509. rth = rcu_dereference(vrf->rth);
  510. if (likely(rth)) {
  511. dst = &rth->dst;
  512. dst_hold(dst);
  513. }
  514. rcu_read_unlock();
  515. if (unlikely(!dst)) {
  516. vrf_tx_error(vrf_dev, skb);
  517. return NULL;
  518. }
  519. skb_dst_drop(skb);
  520. skb_dst_set(skb, dst);
  521. return skb;
  522. }
  523. /* called with rcu lock held */
  524. static struct sk_buff *vrf_l3_out(struct net_device *vrf_dev,
  525. struct sock *sk,
  526. struct sk_buff *skb,
  527. u16 proto)
  528. {
  529. switch (proto) {
  530. case AF_INET:
  531. return vrf_ip_out(vrf_dev, sk, skb);
  532. case AF_INET6:
  533. return vrf_ip6_out(vrf_dev, sk, skb);
  534. }
  535. return skb;
  536. }
  537. /* holding rtnl */
  538. static void vrf_rtable_release(struct net_device *dev, struct net_vrf *vrf)
  539. {
  540. struct rtable *rth = rtnl_dereference(vrf->rth);
  541. struct rtable *rth_local = rtnl_dereference(vrf->rth_local);
  542. struct net *net = dev_net(dev);
  543. struct dst_entry *dst;
  544. RCU_INIT_POINTER(vrf->rth, NULL);
  545. RCU_INIT_POINTER(vrf->rth_local, NULL);
  546. synchronize_rcu();
  547. /* move dev in dst's to loopback so this VRF device can be deleted
  548. * - based on dst_ifdown
  549. */
  550. if (rth) {
  551. dst = &rth->dst;
  552. dev_put(dst->dev);
  553. dst->dev = net->loopback_dev;
  554. dev_hold(dst->dev);
  555. dst_release(dst);
  556. }
  557. if (rth_local) {
  558. dst = &rth_local->dst;
  559. dev_put(dst->dev);
  560. dst->dev = net->loopback_dev;
  561. dev_hold(dst->dev);
  562. dst_release(dst);
  563. }
  564. }
  565. static int vrf_rtable_create(struct net_device *dev)
  566. {
  567. struct net_vrf *vrf = netdev_priv(dev);
  568. struct rtable *rth, *rth_local;
  569. if (!fib_new_table(dev_net(dev), vrf->tb_id))
  570. return -ENOMEM;
  571. /* create a dst for routing packets out through a VRF device */
  572. rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1, 0);
  573. if (!rth)
  574. return -ENOMEM;
  575. /* create a dst for local ingress routing - packets sent locally
  576. * to local address via the VRF device as a loopback
  577. */
  578. rth_local = rt_dst_alloc(dev, RTCF_LOCAL, RTN_LOCAL, 1, 1, 0);
  579. if (!rth_local) {
  580. dst_release(&rth->dst);
  581. return -ENOMEM;
  582. }
  583. rth->dst.output = vrf_output;
  584. rth->rt_table_id = vrf->tb_id;
  585. rth_local->rt_table_id = vrf->tb_id;
  586. rcu_assign_pointer(vrf->rth, rth);
  587. rcu_assign_pointer(vrf->rth_local, rth_local);
  588. return 0;
  589. }
  590. /**************************** device handling ********************/
  591. /* cycle interface to flush neighbor cache and move routes across tables */
  592. static void cycle_netdev(struct net_device *dev)
  593. {
  594. unsigned int flags = dev->flags;
  595. int ret;
  596. if (!netif_running(dev))
  597. return;
  598. ret = dev_change_flags(dev, flags & ~IFF_UP);
  599. if (ret >= 0)
  600. ret = dev_change_flags(dev, flags);
  601. if (ret < 0) {
  602. netdev_err(dev,
  603. "Failed to cycle device %s; route tables might be wrong!\n",
  604. dev->name);
  605. }
  606. }
  607. static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
  608. {
  609. int ret;
  610. ret = netdev_master_upper_dev_link(port_dev, dev, NULL, NULL);
  611. if (ret < 0)
  612. return ret;
  613. port_dev->priv_flags |= IFF_L3MDEV_SLAVE;
  614. cycle_netdev(port_dev);
  615. return 0;
  616. }
  617. static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
  618. {
  619. if (netif_is_l3_master(port_dev) || netif_is_l3_slave(port_dev))
  620. return -EINVAL;
  621. return do_vrf_add_slave(dev, port_dev);
  622. }
  623. /* inverse of do_vrf_add_slave */
  624. static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
  625. {
  626. netdev_upper_dev_unlink(port_dev, dev);
  627. port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE;
  628. cycle_netdev(port_dev);
  629. return 0;
  630. }
  631. static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
  632. {
  633. return do_vrf_del_slave(dev, port_dev);
  634. }
  635. static void vrf_dev_uninit(struct net_device *dev)
  636. {
  637. struct net_vrf *vrf = netdev_priv(dev);
  638. struct net_device *port_dev;
  639. struct list_head *iter;
  640. vrf_rtable_release(dev, vrf);
  641. vrf_rt6_release(dev, vrf);
  642. netdev_for_each_lower_dev(dev, port_dev, iter)
  643. vrf_del_slave(dev, port_dev);
  644. free_percpu(dev->dstats);
  645. dev->dstats = NULL;
  646. }
  647. static int vrf_dev_init(struct net_device *dev)
  648. {
  649. struct net_vrf *vrf = netdev_priv(dev);
  650. dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
  651. if (!dev->dstats)
  652. goto out_nomem;
  653. /* create the default dst which points back to us */
  654. if (vrf_rtable_create(dev) != 0)
  655. goto out_stats;
  656. if (vrf_rt6_create(dev) != 0)
  657. goto out_rth;
  658. dev->flags = IFF_MASTER | IFF_NOARP;
  659. /* MTU is irrelevant for VRF device; set to 64k similar to lo */
  660. dev->mtu = 64 * 1024;
  661. /* similarly, oper state is irrelevant; set to up to avoid confusion */
  662. dev->operstate = IF_OPER_UP;
  663. netdev_lockdep_set_classes(dev);
  664. return 0;
  665. out_rth:
  666. vrf_rtable_release(dev, vrf);
  667. out_stats:
  668. free_percpu(dev->dstats);
  669. dev->dstats = NULL;
  670. out_nomem:
  671. return -ENOMEM;
  672. }
  673. static const struct net_device_ops vrf_netdev_ops = {
  674. .ndo_init = vrf_dev_init,
  675. .ndo_uninit = vrf_dev_uninit,
  676. .ndo_start_xmit = vrf_xmit,
  677. .ndo_get_stats64 = vrf_get_stats64,
  678. .ndo_add_slave = vrf_add_slave,
  679. .ndo_del_slave = vrf_del_slave,
  680. };
  681. static u32 vrf_fib_table(const struct net_device *dev)
  682. {
  683. struct net_vrf *vrf = netdev_priv(dev);
  684. return vrf->tb_id;
  685. }
  686. static int vrf_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
  687. {
  688. return 0;
  689. }
  690. static struct sk_buff *vrf_rcv_nfhook(u8 pf, unsigned int hook,
  691. struct sk_buff *skb,
  692. struct net_device *dev)
  693. {
  694. struct net *net = dev_net(dev);
  695. nf_reset(skb);
  696. if (NF_HOOK(pf, hook, net, NULL, skb, dev, NULL, vrf_rcv_finish) < 0)
  697. skb = NULL; /* kfree_skb(skb) handled by nf code */
  698. return skb;
  699. }
  700. #if IS_ENABLED(CONFIG_IPV6)
  701. /* neighbor handling is done with actual device; do not want
  702. * to flip skb->dev for those ndisc packets. This really fails
  703. * for multiple next protocols (e.g., NEXTHDR_HOP). But it is
  704. * a start.
  705. */
  706. static bool ipv6_ndisc_frame(const struct sk_buff *skb)
  707. {
  708. const struct ipv6hdr *iph = ipv6_hdr(skb);
  709. bool rc = false;
  710. if (iph->nexthdr == NEXTHDR_ICMP) {
  711. const struct icmp6hdr *icmph;
  712. struct icmp6hdr _icmph;
  713. icmph = skb_header_pointer(skb, sizeof(*iph),
  714. sizeof(_icmph), &_icmph);
  715. if (!icmph)
  716. goto out;
  717. switch (icmph->icmp6_type) {
  718. case NDISC_ROUTER_SOLICITATION:
  719. case NDISC_ROUTER_ADVERTISEMENT:
  720. case NDISC_NEIGHBOUR_SOLICITATION:
  721. case NDISC_NEIGHBOUR_ADVERTISEMENT:
  722. case NDISC_REDIRECT:
  723. rc = true;
  724. break;
  725. }
  726. }
  727. out:
  728. return rc;
  729. }
  730. static struct rt6_info *vrf_ip6_route_lookup(struct net *net,
  731. const struct net_device *dev,
  732. struct flowi6 *fl6,
  733. int ifindex,
  734. int flags)
  735. {
  736. struct net_vrf *vrf = netdev_priv(dev);
  737. struct fib6_table *table = NULL;
  738. struct rt6_info *rt6;
  739. rcu_read_lock();
  740. /* fib6_table does not have a refcnt and can not be freed */
  741. rt6 = rcu_dereference(vrf->rt6);
  742. if (likely(rt6))
  743. table = rt6->rt6i_table;
  744. rcu_read_unlock();
  745. if (!table)
  746. return NULL;
  747. return ip6_pol_route(net, table, ifindex, fl6, flags);
  748. }
  749. static void vrf_ip6_input_dst(struct sk_buff *skb, struct net_device *vrf_dev,
  750. int ifindex)
  751. {
  752. const struct ipv6hdr *iph = ipv6_hdr(skb);
  753. struct flowi6 fl6 = {
  754. .daddr = iph->daddr,
  755. .saddr = iph->saddr,
  756. .flowlabel = ip6_flowinfo(iph),
  757. .flowi6_mark = skb->mark,
  758. .flowi6_proto = iph->nexthdr,
  759. .flowi6_iif = ifindex,
  760. };
  761. struct net *net = dev_net(vrf_dev);
  762. struct rt6_info *rt6;
  763. rt6 = vrf_ip6_route_lookup(net, vrf_dev, &fl6, ifindex,
  764. RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_IFACE);
  765. if (unlikely(!rt6))
  766. return;
  767. if (unlikely(&rt6->dst == &net->ipv6.ip6_null_entry->dst))
  768. return;
  769. skb_dst_set(skb, &rt6->dst);
  770. }
  771. static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
  772. struct sk_buff *skb)
  773. {
  774. int orig_iif = skb->skb_iif;
  775. bool need_strict;
  776. /* loopback traffic; do not push through packet taps again.
  777. * Reset pkt_type for upper layers to process skb
  778. */
  779. if (skb->pkt_type == PACKET_LOOPBACK) {
  780. skb->dev = vrf_dev;
  781. skb->skb_iif = vrf_dev->ifindex;
  782. skb->pkt_type = PACKET_HOST;
  783. goto out;
  784. }
  785. /* if packet is NDISC or addressed to multicast or link-local
  786. * then keep the ingress interface
  787. */
  788. need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
  789. if (!ipv6_ndisc_frame(skb) && !need_strict) {
  790. skb->dev = vrf_dev;
  791. skb->skb_iif = vrf_dev->ifindex;
  792. skb_push(skb, skb->mac_len);
  793. dev_queue_xmit_nit(skb, vrf_dev);
  794. skb_pull(skb, skb->mac_len);
  795. IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
  796. }
  797. if (need_strict)
  798. vrf_ip6_input_dst(skb, vrf_dev, orig_iif);
  799. skb = vrf_rcv_nfhook(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, vrf_dev);
  800. out:
  801. return skb;
  802. }
  803. #else
  804. static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
  805. struct sk_buff *skb)
  806. {
  807. return skb;
  808. }
  809. #endif
  810. static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
  811. struct sk_buff *skb)
  812. {
  813. skb->dev = vrf_dev;
  814. skb->skb_iif = vrf_dev->ifindex;
  815. /* loopback traffic; do not push through packet taps again.
  816. * Reset pkt_type for upper layers to process skb
  817. */
  818. if (skb->pkt_type == PACKET_LOOPBACK) {
  819. skb->pkt_type = PACKET_HOST;
  820. goto out;
  821. }
  822. skb_push(skb, skb->mac_len);
  823. dev_queue_xmit_nit(skb, vrf_dev);
  824. skb_pull(skb, skb->mac_len);
  825. skb = vrf_rcv_nfhook(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, vrf_dev);
  826. out:
  827. return skb;
  828. }
  829. /* called with rcu lock held */
  830. static struct sk_buff *vrf_l3_rcv(struct net_device *vrf_dev,
  831. struct sk_buff *skb,
  832. u16 proto)
  833. {
  834. switch (proto) {
  835. case AF_INET:
  836. return vrf_ip_rcv(vrf_dev, skb);
  837. case AF_INET6:
  838. return vrf_ip6_rcv(vrf_dev, skb);
  839. }
  840. return skb;
  841. }
  842. #if IS_ENABLED(CONFIG_IPV6)
  843. /* send to link-local or multicast address via interface enslaved to
  844. * VRF device. Force lookup to VRF table without changing flow struct
  845. */
  846. static struct dst_entry *vrf_link_scope_lookup(const struct net_device *dev,
  847. struct flowi6 *fl6)
  848. {
  849. struct net *net = dev_net(dev);
  850. int flags = RT6_LOOKUP_F_IFACE;
  851. struct dst_entry *dst = NULL;
  852. struct rt6_info *rt;
  853. /* VRF device does not have a link-local address and
  854. * sending packets to link-local or mcast addresses over
  855. * a VRF device does not make sense
  856. */
  857. if (fl6->flowi6_oif == dev->ifindex) {
  858. dst = &net->ipv6.ip6_null_entry->dst;
  859. dst_hold(dst);
  860. return dst;
  861. }
  862. if (!ipv6_addr_any(&fl6->saddr))
  863. flags |= RT6_LOOKUP_F_HAS_SADDR;
  864. rt = vrf_ip6_route_lookup(net, dev, fl6, fl6->flowi6_oif, flags);
  865. if (rt)
  866. dst = &rt->dst;
  867. return dst;
  868. }
  869. #endif
  870. static const struct l3mdev_ops vrf_l3mdev_ops = {
  871. .l3mdev_fib_table = vrf_fib_table,
  872. .l3mdev_l3_rcv = vrf_l3_rcv,
  873. .l3mdev_l3_out = vrf_l3_out,
  874. #if IS_ENABLED(CONFIG_IPV6)
  875. .l3mdev_link_scope_lookup = vrf_link_scope_lookup,
  876. #endif
  877. };
  878. static void vrf_get_drvinfo(struct net_device *dev,
  879. struct ethtool_drvinfo *info)
  880. {
  881. strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
  882. strlcpy(info->version, DRV_VERSION, sizeof(info->version));
  883. }
  884. static const struct ethtool_ops vrf_ethtool_ops = {
  885. .get_drvinfo = vrf_get_drvinfo,
  886. };
  887. static inline size_t vrf_fib_rule_nl_size(void)
  888. {
  889. size_t sz;
  890. sz = NLMSG_ALIGN(sizeof(struct fib_rule_hdr));
  891. sz += nla_total_size(sizeof(u8)); /* FRA_L3MDEV */
  892. sz += nla_total_size(sizeof(u32)); /* FRA_PRIORITY */
  893. return sz;
  894. }
  895. static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it)
  896. {
  897. struct fib_rule_hdr *frh;
  898. struct nlmsghdr *nlh;
  899. struct sk_buff *skb;
  900. int err;
  901. if (family == AF_INET6 && !ipv6_mod_enabled())
  902. return 0;
  903. skb = nlmsg_new(vrf_fib_rule_nl_size(), GFP_KERNEL);
  904. if (!skb)
  905. return -ENOMEM;
  906. nlh = nlmsg_put(skb, 0, 0, 0, sizeof(*frh), 0);
  907. if (!nlh)
  908. goto nla_put_failure;
  909. /* rule only needs to appear once */
  910. nlh->nlmsg_flags &= NLM_F_EXCL;
  911. frh = nlmsg_data(nlh);
  912. memset(frh, 0, sizeof(*frh));
  913. frh->family = family;
  914. frh->action = FR_ACT_TO_TBL;
  915. if (nla_put_u32(skb, FRA_L3MDEV, 1))
  916. goto nla_put_failure;
  917. if (nla_put_u32(skb, FRA_PRIORITY, FIB_RULE_PREF))
  918. goto nla_put_failure;
  919. nlmsg_end(skb, nlh);
  920. /* fib_nl_{new,del}rule handling looks for net from skb->sk */
  921. skb->sk = dev_net(dev)->rtnl;
  922. if (add_it) {
  923. err = fib_nl_newrule(skb, nlh);
  924. if (err == -EEXIST)
  925. err = 0;
  926. } else {
  927. err = fib_nl_delrule(skb, nlh);
  928. if (err == -ENOENT)
  929. err = 0;
  930. }
  931. nlmsg_free(skb);
  932. return err;
  933. nla_put_failure:
  934. nlmsg_free(skb);
  935. return -EMSGSIZE;
  936. }
  937. static int vrf_add_fib_rules(const struct net_device *dev)
  938. {
  939. int err;
  940. err = vrf_fib_rule(dev, AF_INET, true);
  941. if (err < 0)
  942. goto out_err;
  943. err = vrf_fib_rule(dev, AF_INET6, true);
  944. if (err < 0)
  945. goto ipv6_err;
  946. return 0;
  947. ipv6_err:
  948. vrf_fib_rule(dev, AF_INET, false);
  949. out_err:
  950. netdev_err(dev, "Failed to add FIB rules.\n");
  951. return err;
  952. }
  953. static void vrf_setup(struct net_device *dev)
  954. {
  955. ether_setup(dev);
  956. /* Initialize the device structure. */
  957. dev->netdev_ops = &vrf_netdev_ops;
  958. dev->l3mdev_ops = &vrf_l3mdev_ops;
  959. dev->ethtool_ops = &vrf_ethtool_ops;
  960. dev->destructor = free_netdev;
  961. /* Fill in device structure with ethernet-generic values. */
  962. eth_hw_addr_random(dev);
  963. /* don't acquire vrf device's netif_tx_lock when transmitting */
  964. dev->features |= NETIF_F_LLTX;
  965. /* don't allow vrf devices to change network namespaces. */
  966. dev->features |= NETIF_F_NETNS_LOCAL;
  967. /* does not make sense for a VLAN to be added to a vrf device */
  968. dev->features |= NETIF_F_VLAN_CHALLENGED;
  969. /* enable offload features */
  970. dev->features |= NETIF_F_GSO_SOFTWARE;
  971. dev->features |= NETIF_F_RXCSUM | NETIF_F_HW_CSUM;
  972. dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA;
  973. dev->hw_features = dev->features;
  974. dev->hw_enc_features = dev->features;
  975. /* default to no qdisc; user can add if desired */
  976. dev->priv_flags |= IFF_NO_QUEUE;
  977. }
  978. static int vrf_validate(struct nlattr *tb[], struct nlattr *data[])
  979. {
  980. if (tb[IFLA_ADDRESS]) {
  981. if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
  982. return -EINVAL;
  983. if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
  984. return -EADDRNOTAVAIL;
  985. }
  986. return 0;
  987. }
  988. static void vrf_dellink(struct net_device *dev, struct list_head *head)
  989. {
  990. unregister_netdevice_queue(dev, head);
  991. }
  992. static int vrf_newlink(struct net *src_net, struct net_device *dev,
  993. struct nlattr *tb[], struct nlattr *data[])
  994. {
  995. struct net_vrf *vrf = netdev_priv(dev);
  996. int err;
  997. if (!data || !data[IFLA_VRF_TABLE])
  998. return -EINVAL;
  999. vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]);
  1000. dev->priv_flags |= IFF_L3MDEV_MASTER;
  1001. err = register_netdevice(dev);
  1002. if (err)
  1003. goto out;
  1004. if (add_fib_rules) {
  1005. err = vrf_add_fib_rules(dev);
  1006. if (err) {
  1007. unregister_netdevice(dev);
  1008. goto out;
  1009. }
  1010. add_fib_rules = false;
  1011. }
  1012. out:
  1013. return err;
  1014. }
  1015. static size_t vrf_nl_getsize(const struct net_device *dev)
  1016. {
  1017. return nla_total_size(sizeof(u32)); /* IFLA_VRF_TABLE */
  1018. }
  1019. static int vrf_fillinfo(struct sk_buff *skb,
  1020. const struct net_device *dev)
  1021. {
  1022. struct net_vrf *vrf = netdev_priv(dev);
  1023. return nla_put_u32(skb, IFLA_VRF_TABLE, vrf->tb_id);
  1024. }
  1025. static size_t vrf_get_slave_size(const struct net_device *bond_dev,
  1026. const struct net_device *slave_dev)
  1027. {
  1028. return nla_total_size(sizeof(u32)); /* IFLA_VRF_PORT_TABLE */
  1029. }
  1030. static int vrf_fill_slave_info(struct sk_buff *skb,
  1031. const struct net_device *vrf_dev,
  1032. const struct net_device *slave_dev)
  1033. {
  1034. struct net_vrf *vrf = netdev_priv(vrf_dev);
  1035. if (nla_put_u32(skb, IFLA_VRF_PORT_TABLE, vrf->tb_id))
  1036. return -EMSGSIZE;
  1037. return 0;
  1038. }
  1039. static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = {
  1040. [IFLA_VRF_TABLE] = { .type = NLA_U32 },
  1041. };
  1042. static struct rtnl_link_ops vrf_link_ops __read_mostly = {
  1043. .kind = DRV_NAME,
  1044. .priv_size = sizeof(struct net_vrf),
  1045. .get_size = vrf_nl_getsize,
  1046. .policy = vrf_nl_policy,
  1047. .validate = vrf_validate,
  1048. .fill_info = vrf_fillinfo,
  1049. .get_slave_size = vrf_get_slave_size,
  1050. .fill_slave_info = vrf_fill_slave_info,
  1051. .newlink = vrf_newlink,
  1052. .dellink = vrf_dellink,
  1053. .setup = vrf_setup,
  1054. .maxtype = IFLA_VRF_MAX,
  1055. };
  1056. static int vrf_device_event(struct notifier_block *unused,
  1057. unsigned long event, void *ptr)
  1058. {
  1059. struct net_device *dev = netdev_notifier_info_to_dev(ptr);
  1060. /* only care about unregister events to drop slave references */
  1061. if (event == NETDEV_UNREGISTER) {
  1062. struct net_device *vrf_dev;
  1063. if (!netif_is_l3_slave(dev))
  1064. goto out;
  1065. vrf_dev = netdev_master_upper_dev_get(dev);
  1066. vrf_del_slave(vrf_dev, dev);
  1067. }
  1068. out:
  1069. return NOTIFY_DONE;
  1070. }
  1071. static struct notifier_block vrf_notifier_block __read_mostly = {
  1072. .notifier_call = vrf_device_event,
  1073. };
  1074. static int __init vrf_init_module(void)
  1075. {
  1076. int rc;
  1077. register_netdevice_notifier(&vrf_notifier_block);
  1078. rc = rtnl_link_register(&vrf_link_ops);
  1079. if (rc < 0)
  1080. goto error;
  1081. return 0;
  1082. error:
  1083. unregister_netdevice_notifier(&vrf_notifier_block);
  1084. return rc;
  1085. }
  1086. module_init(vrf_init_module);
  1087. MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
  1088. MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
  1089. MODULE_LICENSE("GPL");
  1090. MODULE_ALIAS_RTNL_LINK(DRV_NAME);
  1091. MODULE_VERSION(DRV_VERSION);