vrf.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713
  1. /*
  2. * vrf.c: device driver to encapsulate a VRF space
  3. *
  4. * Copyright (c) 2015 Cumulus Networks. All rights reserved.
  5. * Copyright (c) 2015 Shrijeet Mukherjee <shm@cumulusnetworks.com>
  6. * Copyright (c) 2015 David Ahern <dsa@cumulusnetworks.com>
  7. *
  8. * Based on dummy, team and ipvlan drivers
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2 of the License, or
  13. * (at your option) any later version.
  14. */
  15. #include <linux/module.h>
  16. #include <linux/kernel.h>
  17. #include <linux/netdevice.h>
  18. #include <linux/etherdevice.h>
  19. #include <linux/ip.h>
  20. #include <linux/init.h>
  21. #include <linux/moduleparam.h>
  22. #include <linux/netfilter.h>
  23. #include <linux/rtnetlink.h>
  24. #include <net/rtnetlink.h>
  25. #include <linux/u64_stats_sync.h>
  26. #include <linux/hashtable.h>
  27. #include <linux/inetdevice.h>
  28. #include <net/arp.h>
  29. #include <net/ip.h>
  30. #include <net/ip_fib.h>
  31. #include <net/ip6_route.h>
  32. #include <net/rtnetlink.h>
  33. #include <net/route.h>
  34. #include <net/addrconf.h>
  35. #include <net/vrf.h>
  36. #define DRV_NAME "vrf"
  37. #define DRV_VERSION "1.0"
  38. #define vrf_is_slave(dev) ((dev)->flags & IFF_SLAVE)
  39. #define vrf_master_get_rcu(dev) \
  40. ((struct net_device *)rcu_dereference(dev->rx_handler_data))
  41. struct pcpu_dstats {
  42. u64 tx_pkts;
  43. u64 tx_bytes;
  44. u64 tx_drps;
  45. u64 rx_pkts;
  46. u64 rx_bytes;
  47. struct u64_stats_sync syncp;
  48. };
  49. static struct dst_entry *vrf_ip_check(struct dst_entry *dst, u32 cookie)
  50. {
  51. return dst;
  52. }
  53. static int vrf_ip_local_out(struct sk_buff *skb)
  54. {
  55. return ip_local_out(skb);
  56. }
  57. static unsigned int vrf_v4_mtu(const struct dst_entry *dst)
  58. {
  59. /* TO-DO: return max ethernet size? */
  60. return dst->dev->mtu;
  61. }
  62. static void vrf_dst_destroy(struct dst_entry *dst)
  63. {
  64. /* our dst lives forever - or until the device is closed */
  65. }
  66. static unsigned int vrf_default_advmss(const struct dst_entry *dst)
  67. {
  68. return 65535 - 40;
  69. }
  70. static struct dst_ops vrf_dst_ops = {
  71. .family = AF_INET,
  72. .local_out = vrf_ip_local_out,
  73. .check = vrf_ip_check,
  74. .mtu = vrf_v4_mtu,
  75. .destroy = vrf_dst_destroy,
  76. .default_advmss = vrf_default_advmss,
  77. };
  78. static bool is_ip_rx_frame(struct sk_buff *skb)
  79. {
  80. switch (skb->protocol) {
  81. case htons(ETH_P_IP):
  82. case htons(ETH_P_IPV6):
  83. return true;
  84. }
  85. return false;
  86. }
  87. static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb)
  88. {
  89. vrf_dev->stats.tx_errors++;
  90. kfree_skb(skb);
  91. }
  92. /* note: already called with rcu_read_lock */
  93. static rx_handler_result_t vrf_handle_frame(struct sk_buff **pskb)
  94. {
  95. struct sk_buff *skb = *pskb;
  96. if (is_ip_rx_frame(skb)) {
  97. struct net_device *dev = vrf_master_get_rcu(skb->dev);
  98. struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
  99. u64_stats_update_begin(&dstats->syncp);
  100. dstats->rx_pkts++;
  101. dstats->rx_bytes += skb->len;
  102. u64_stats_update_end(&dstats->syncp);
  103. skb->dev = dev;
  104. return RX_HANDLER_ANOTHER;
  105. }
  106. return RX_HANDLER_PASS;
  107. }
  108. static struct rtnl_link_stats64 *vrf_get_stats64(struct net_device *dev,
  109. struct rtnl_link_stats64 *stats)
  110. {
  111. int i;
  112. for_each_possible_cpu(i) {
  113. const struct pcpu_dstats *dstats;
  114. u64 tbytes, tpkts, tdrops, rbytes, rpkts;
  115. unsigned int start;
  116. dstats = per_cpu_ptr(dev->dstats, i);
  117. do {
  118. start = u64_stats_fetch_begin_irq(&dstats->syncp);
  119. tbytes = dstats->tx_bytes;
  120. tpkts = dstats->tx_pkts;
  121. tdrops = dstats->tx_drps;
  122. rbytes = dstats->rx_bytes;
  123. rpkts = dstats->rx_pkts;
  124. } while (u64_stats_fetch_retry_irq(&dstats->syncp, start));
  125. stats->tx_bytes += tbytes;
  126. stats->tx_packets += tpkts;
  127. stats->tx_dropped += tdrops;
  128. stats->rx_bytes += rbytes;
  129. stats->rx_packets += rpkts;
  130. }
  131. return stats;
  132. }
  133. static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
  134. struct net_device *dev)
  135. {
  136. vrf_tx_error(dev, skb);
  137. return NET_XMIT_DROP;
  138. }
  139. static int vrf_send_v4_prep(struct sk_buff *skb, struct flowi4 *fl4,
  140. struct net_device *vrf_dev)
  141. {
  142. struct rtable *rt;
  143. int err = 1;
  144. rt = ip_route_output_flow(dev_net(vrf_dev), fl4, NULL);
  145. if (IS_ERR(rt))
  146. goto out;
  147. /* TO-DO: what about broadcast ? */
  148. if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
  149. ip_rt_put(rt);
  150. goto out;
  151. }
  152. skb_dst_drop(skb);
  153. skb_dst_set(skb, &rt->dst);
  154. err = 0;
  155. out:
  156. return err;
  157. }
  158. static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
  159. struct net_device *vrf_dev)
  160. {
  161. struct iphdr *ip4h = ip_hdr(skb);
  162. int ret = NET_XMIT_DROP;
  163. struct flowi4 fl4 = {
  164. /* needed to match OIF rule */
  165. .flowi4_oif = vrf_dev->ifindex,
  166. .flowi4_iif = LOOPBACK_IFINDEX,
  167. .flowi4_tos = RT_TOS(ip4h->tos),
  168. .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_VRFSRC,
  169. .daddr = ip4h->daddr,
  170. };
  171. if (vrf_send_v4_prep(skb, &fl4, vrf_dev))
  172. goto err;
  173. if (!ip4h->saddr) {
  174. ip4h->saddr = inet_select_addr(skb_dst(skb)->dev, 0,
  175. RT_SCOPE_LINK);
  176. }
  177. ret = ip_local_out(skb);
  178. if (unlikely(net_xmit_eval(ret)))
  179. vrf_dev->stats.tx_errors++;
  180. else
  181. ret = NET_XMIT_SUCCESS;
  182. out:
  183. return ret;
  184. err:
  185. vrf_tx_error(vrf_dev, skb);
  186. goto out;
  187. }
  188. static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
  189. {
  190. /* strip the ethernet header added for pass through VRF device */
  191. __skb_pull(skb, skb_network_offset(skb));
  192. switch (skb->protocol) {
  193. case htons(ETH_P_IP):
  194. return vrf_process_v4_outbound(skb, dev);
  195. case htons(ETH_P_IPV6):
  196. return vrf_process_v6_outbound(skb, dev);
  197. default:
  198. vrf_tx_error(dev, skb);
  199. return NET_XMIT_DROP;
  200. }
  201. }
  202. static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
  203. {
  204. netdev_tx_t ret = is_ip_tx_frame(skb, dev);
  205. if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
  206. struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
  207. u64_stats_update_begin(&dstats->syncp);
  208. dstats->tx_pkts++;
  209. dstats->tx_bytes += skb->len;
  210. u64_stats_update_end(&dstats->syncp);
  211. } else {
  212. this_cpu_inc(dev->dstats->tx_drps);
  213. }
  214. return ret;
  215. }
  216. /* modelled after ip_finish_output2 */
  217. static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
  218. {
  219. struct dst_entry *dst = skb_dst(skb);
  220. struct rtable *rt = (struct rtable *)dst;
  221. struct net_device *dev = dst->dev;
  222. unsigned int hh_len = LL_RESERVED_SPACE(dev);
  223. struct neighbour *neigh;
  224. u32 nexthop;
  225. int ret = -EINVAL;
  226. /* Be paranoid, rather than too clever. */
  227. if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
  228. struct sk_buff *skb2;
  229. skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
  230. if (!skb2) {
  231. ret = -ENOMEM;
  232. goto err;
  233. }
  234. if (skb->sk)
  235. skb_set_owner_w(skb2, skb->sk);
  236. consume_skb(skb);
  237. skb = skb2;
  238. }
  239. rcu_read_lock_bh();
  240. nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr);
  241. neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
  242. if (unlikely(!neigh))
  243. neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
  244. if (!IS_ERR(neigh))
  245. ret = dst_neigh_output(dst, neigh, skb);
  246. rcu_read_unlock_bh();
  247. err:
  248. if (unlikely(ret < 0))
  249. vrf_tx_error(skb->dev, skb);
  250. return ret;
  251. }
  252. static int vrf_output(struct sock *sk, struct sk_buff *skb)
  253. {
  254. struct net_device *dev = skb_dst(skb)->dev;
  255. struct net *net = dev_net(dev);
  256. IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
  257. skb->dev = dev;
  258. skb->protocol = htons(ETH_P_IP);
  259. return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
  260. net, sk, skb, NULL, dev,
  261. vrf_finish_output,
  262. !(IPCB(skb)->flags & IPSKB_REROUTED));
  263. }
  264. static void vrf_rtable_destroy(struct net_vrf *vrf)
  265. {
  266. struct dst_entry *dst = (struct dst_entry *)vrf->rth;
  267. dst_destroy(dst);
  268. vrf->rth = NULL;
  269. }
  270. static struct rtable *vrf_rtable_create(struct net_device *dev)
  271. {
  272. struct net_vrf *vrf = netdev_priv(dev);
  273. struct rtable *rth;
  274. rth = dst_alloc(&vrf_dst_ops, dev, 2,
  275. DST_OBSOLETE_NONE,
  276. (DST_HOST | DST_NOPOLICY | DST_NOXFRM));
  277. if (rth) {
  278. rth->dst.output = vrf_output;
  279. rth->rt_genid = rt_genid_ipv4(dev_net(dev));
  280. rth->rt_flags = 0;
  281. rth->rt_type = RTN_UNICAST;
  282. rth->rt_is_input = 0;
  283. rth->rt_iif = 0;
  284. rth->rt_pmtu = 0;
  285. rth->rt_gateway = 0;
  286. rth->rt_uses_gateway = 0;
  287. rth->rt_table_id = vrf->tb_id;
  288. INIT_LIST_HEAD(&rth->rt_uncached);
  289. rth->rt_uncached_list = NULL;
  290. }
  291. return rth;
  292. }
  293. /**************************** device handling ********************/
  294. /* cycle interface to flush neighbor cache and move routes across tables */
  295. static void cycle_netdev(struct net_device *dev)
  296. {
  297. unsigned int flags = dev->flags;
  298. int ret;
  299. if (!netif_running(dev))
  300. return;
  301. ret = dev_change_flags(dev, flags & ~IFF_UP);
  302. if (ret >= 0)
  303. ret = dev_change_flags(dev, flags);
  304. if (ret < 0) {
  305. netdev_err(dev,
  306. "Failed to cycle device %s; route tables might be wrong!\n",
  307. dev->name);
  308. }
  309. }
  310. static struct slave *__vrf_find_slave_dev(struct slave_queue *queue,
  311. struct net_device *dev)
  312. {
  313. struct list_head *head = &queue->all_slaves;
  314. struct slave *slave;
  315. list_for_each_entry(slave, head, list) {
  316. if (slave->dev == dev)
  317. return slave;
  318. }
  319. return NULL;
  320. }
  321. /* inverse of __vrf_insert_slave */
  322. static void __vrf_remove_slave(struct slave_queue *queue, struct slave *slave)
  323. {
  324. list_del(&slave->list);
  325. }
  326. static void __vrf_insert_slave(struct slave_queue *queue, struct slave *slave)
  327. {
  328. list_add(&slave->list, &queue->all_slaves);
  329. }
  330. static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
  331. {
  332. struct net_vrf_dev *vrf_ptr = kmalloc(sizeof(*vrf_ptr), GFP_KERNEL);
  333. struct slave *slave = kzalloc(sizeof(*slave), GFP_KERNEL);
  334. struct net_vrf *vrf = netdev_priv(dev);
  335. struct slave_queue *queue = &vrf->queue;
  336. int ret = -ENOMEM;
  337. if (!slave || !vrf_ptr)
  338. goto out_fail;
  339. slave->dev = port_dev;
  340. vrf_ptr->ifindex = dev->ifindex;
  341. vrf_ptr->tb_id = vrf->tb_id;
  342. /* register the packet handler for slave ports */
  343. ret = netdev_rx_handler_register(port_dev, vrf_handle_frame, dev);
  344. if (ret) {
  345. netdev_err(port_dev,
  346. "Device %s failed to register rx_handler\n",
  347. port_dev->name);
  348. goto out_fail;
  349. }
  350. ret = netdev_master_upper_dev_link(port_dev, dev);
  351. if (ret < 0)
  352. goto out_unregister;
  353. port_dev->flags |= IFF_SLAVE;
  354. __vrf_insert_slave(queue, slave);
  355. rcu_assign_pointer(port_dev->vrf_ptr, vrf_ptr);
  356. cycle_netdev(port_dev);
  357. return 0;
  358. out_unregister:
  359. netdev_rx_handler_unregister(port_dev);
  360. out_fail:
  361. kfree(vrf_ptr);
  362. kfree(slave);
  363. return ret;
  364. }
  365. static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
  366. {
  367. if (netif_is_vrf(port_dev) || vrf_is_slave(port_dev))
  368. return -EINVAL;
  369. return do_vrf_add_slave(dev, port_dev);
  370. }
  371. /* inverse of do_vrf_add_slave */
  372. static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
  373. {
  374. struct net_vrf_dev *vrf_ptr = rtnl_dereference(port_dev->vrf_ptr);
  375. struct net_vrf *vrf = netdev_priv(dev);
  376. struct slave_queue *queue = &vrf->queue;
  377. struct slave *slave;
  378. RCU_INIT_POINTER(port_dev->vrf_ptr, NULL);
  379. netdev_upper_dev_unlink(port_dev, dev);
  380. port_dev->flags &= ~IFF_SLAVE;
  381. netdev_rx_handler_unregister(port_dev);
  382. /* after netdev_rx_handler_unregister for synchronize_rcu */
  383. kfree(vrf_ptr);
  384. cycle_netdev(port_dev);
  385. slave = __vrf_find_slave_dev(queue, port_dev);
  386. if (slave)
  387. __vrf_remove_slave(queue, slave);
  388. kfree(slave);
  389. return 0;
  390. }
  391. static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
  392. {
  393. return do_vrf_del_slave(dev, port_dev);
  394. }
  395. static void vrf_dev_uninit(struct net_device *dev)
  396. {
  397. struct net_vrf *vrf = netdev_priv(dev);
  398. struct slave_queue *queue = &vrf->queue;
  399. struct list_head *head = &queue->all_slaves;
  400. struct slave *slave, *next;
  401. vrf_rtable_destroy(vrf);
  402. list_for_each_entry_safe(slave, next, head, list)
  403. vrf_del_slave(dev, slave->dev);
  404. free_percpu(dev->dstats);
  405. dev->dstats = NULL;
  406. }
  407. static int vrf_dev_init(struct net_device *dev)
  408. {
  409. struct net_vrf *vrf = netdev_priv(dev);
  410. INIT_LIST_HEAD(&vrf->queue.all_slaves);
  411. dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
  412. if (!dev->dstats)
  413. goto out_nomem;
  414. /* create the default dst which points back to us */
  415. vrf->rth = vrf_rtable_create(dev);
  416. if (!vrf->rth)
  417. goto out_stats;
  418. dev->flags = IFF_MASTER | IFF_NOARP;
  419. return 0;
  420. out_stats:
  421. free_percpu(dev->dstats);
  422. dev->dstats = NULL;
  423. out_nomem:
  424. return -ENOMEM;
  425. }
  426. static const struct net_device_ops vrf_netdev_ops = {
  427. .ndo_init = vrf_dev_init,
  428. .ndo_uninit = vrf_dev_uninit,
  429. .ndo_start_xmit = vrf_xmit,
  430. .ndo_get_stats64 = vrf_get_stats64,
  431. .ndo_add_slave = vrf_add_slave,
  432. .ndo_del_slave = vrf_del_slave,
  433. };
  434. static void vrf_get_drvinfo(struct net_device *dev,
  435. struct ethtool_drvinfo *info)
  436. {
  437. strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
  438. strlcpy(info->version, DRV_VERSION, sizeof(info->version));
  439. }
  440. static const struct ethtool_ops vrf_ethtool_ops = {
  441. .get_drvinfo = vrf_get_drvinfo,
  442. };
  443. static void vrf_setup(struct net_device *dev)
  444. {
  445. ether_setup(dev);
  446. /* Initialize the device structure. */
  447. dev->netdev_ops = &vrf_netdev_ops;
  448. dev->ethtool_ops = &vrf_ethtool_ops;
  449. dev->destructor = free_netdev;
  450. /* Fill in device structure with ethernet-generic values. */
  451. eth_hw_addr_random(dev);
  452. /* don't acquire vrf device's netif_tx_lock when transmitting */
  453. dev->features |= NETIF_F_LLTX;
  454. /* don't allow vrf devices to change network namespaces. */
  455. dev->features |= NETIF_F_NETNS_LOCAL;
  456. }
  457. static int vrf_validate(struct nlattr *tb[], struct nlattr *data[])
  458. {
  459. if (tb[IFLA_ADDRESS]) {
  460. if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
  461. return -EINVAL;
  462. if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
  463. return -EADDRNOTAVAIL;
  464. }
  465. return 0;
  466. }
  467. static void vrf_dellink(struct net_device *dev, struct list_head *head)
  468. {
  469. struct net_vrf_dev *vrf_ptr = rtnl_dereference(dev->vrf_ptr);
  470. RCU_INIT_POINTER(dev->vrf_ptr, NULL);
  471. kfree_rcu(vrf_ptr, rcu);
  472. unregister_netdevice_queue(dev, head);
  473. }
  474. static int vrf_newlink(struct net *src_net, struct net_device *dev,
  475. struct nlattr *tb[], struct nlattr *data[])
  476. {
  477. struct net_vrf *vrf = netdev_priv(dev);
  478. struct net_vrf_dev *vrf_ptr;
  479. int err;
  480. if (!data || !data[IFLA_VRF_TABLE])
  481. return -EINVAL;
  482. vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]);
  483. dev->priv_flags |= IFF_VRF_MASTER;
  484. err = -ENOMEM;
  485. vrf_ptr = kmalloc(sizeof(*dev->vrf_ptr), GFP_KERNEL);
  486. if (!vrf_ptr)
  487. goto out_fail;
  488. vrf_ptr->ifindex = dev->ifindex;
  489. vrf_ptr->tb_id = vrf->tb_id;
  490. err = register_netdevice(dev);
  491. if (err < 0)
  492. goto out_fail;
  493. rcu_assign_pointer(dev->vrf_ptr, vrf_ptr);
  494. return 0;
  495. out_fail:
  496. kfree(vrf_ptr);
  497. free_netdev(dev);
  498. return err;
  499. }
  500. static size_t vrf_nl_getsize(const struct net_device *dev)
  501. {
  502. return nla_total_size(sizeof(u32)); /* IFLA_VRF_TABLE */
  503. }
  504. static int vrf_fillinfo(struct sk_buff *skb,
  505. const struct net_device *dev)
  506. {
  507. struct net_vrf *vrf = netdev_priv(dev);
  508. return nla_put_u32(skb, IFLA_VRF_TABLE, vrf->tb_id);
  509. }
  510. static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = {
  511. [IFLA_VRF_TABLE] = { .type = NLA_U32 },
  512. };
  513. static struct rtnl_link_ops vrf_link_ops __read_mostly = {
  514. .kind = DRV_NAME,
  515. .priv_size = sizeof(struct net_vrf),
  516. .get_size = vrf_nl_getsize,
  517. .policy = vrf_nl_policy,
  518. .validate = vrf_validate,
  519. .fill_info = vrf_fillinfo,
  520. .newlink = vrf_newlink,
  521. .dellink = vrf_dellink,
  522. .setup = vrf_setup,
  523. .maxtype = IFLA_VRF_MAX,
  524. };
  525. static int vrf_device_event(struct notifier_block *unused,
  526. unsigned long event, void *ptr)
  527. {
  528. struct net_device *dev = netdev_notifier_info_to_dev(ptr);
  529. /* only care about unregister events to drop slave references */
  530. if (event == NETDEV_UNREGISTER) {
  531. struct net_vrf_dev *vrf_ptr = rtnl_dereference(dev->vrf_ptr);
  532. struct net_device *vrf_dev;
  533. if (!vrf_ptr || netif_is_vrf(dev))
  534. goto out;
  535. vrf_dev = netdev_master_upper_dev_get(dev);
  536. vrf_del_slave(vrf_dev, dev);
  537. }
  538. out:
  539. return NOTIFY_DONE;
  540. }
  541. static struct notifier_block vrf_notifier_block __read_mostly = {
  542. .notifier_call = vrf_device_event,
  543. };
  544. static int __init vrf_init_module(void)
  545. {
  546. int rc;
  547. vrf_dst_ops.kmem_cachep =
  548. kmem_cache_create("vrf_ip_dst_cache",
  549. sizeof(struct rtable), 0,
  550. SLAB_HWCACHE_ALIGN,
  551. NULL);
  552. if (!vrf_dst_ops.kmem_cachep)
  553. return -ENOMEM;
  554. register_netdevice_notifier(&vrf_notifier_block);
  555. rc = rtnl_link_register(&vrf_link_ops);
  556. if (rc < 0)
  557. goto error;
  558. return 0;
  559. error:
  560. unregister_netdevice_notifier(&vrf_notifier_block);
  561. kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
  562. return rc;
  563. }
  564. static void __exit vrf_cleanup_module(void)
  565. {
  566. rtnl_link_unregister(&vrf_link_ops);
  567. unregister_netdevice_notifier(&vrf_notifier_block);
  568. kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
  569. }
  570. module_init(vrf_init_module);
  571. module_exit(vrf_cleanup_module);
  572. MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
  573. MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
  574. MODULE_LICENSE("GPL");
  575. MODULE_ALIAS_RTNL_LINK(DRV_NAME);
  576. MODULE_VERSION(DRV_VERSION);