vrf.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683
  1. /*
  2. * vrf.c: device driver to encapsulate a VRF space
  3. *
  4. * Copyright (c) 2015 Cumulus Networks. All rights reserved.
  5. * Copyright (c) 2015 Shrijeet Mukherjee <shm@cumulusnetworks.com>
  6. * Copyright (c) 2015 David Ahern <dsa@cumulusnetworks.com>
  7. *
  8. * Based on dummy, team and ipvlan drivers
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2 of the License, or
  13. * (at your option) any later version.
  14. */
  15. #include <linux/module.h>
  16. #include <linux/kernel.h>
  17. #include <linux/netdevice.h>
  18. #include <linux/etherdevice.h>
  19. #include <linux/ip.h>
  20. #include <linux/init.h>
  21. #include <linux/moduleparam.h>
  22. #include <linux/netfilter.h>
  23. #include <linux/rtnetlink.h>
  24. #include <net/rtnetlink.h>
  25. #include <linux/u64_stats_sync.h>
  26. #include <linux/hashtable.h>
  27. #include <linux/inetdevice.h>
  28. #include <net/ip.h>
  29. #include <net/ip_fib.h>
  30. #include <net/ip6_route.h>
  31. #include <net/rtnetlink.h>
  32. #include <net/route.h>
  33. #include <net/addrconf.h>
  34. #include <net/vrf.h>
  35. #define DRV_NAME "vrf"
  36. #define DRV_VERSION "1.0"
  37. #define vrf_is_slave(dev) ((dev)->flags & IFF_SLAVE)
  38. #define vrf_master_get_rcu(dev) \
  39. ((struct net_device *)rcu_dereference(dev->rx_handler_data))
  40. struct pcpu_dstats {
  41. u64 tx_pkts;
  42. u64 tx_bytes;
  43. u64 tx_drps;
  44. u64 rx_pkts;
  45. u64 rx_bytes;
  46. struct u64_stats_sync syncp;
  47. };
  48. static struct dst_entry *vrf_ip_check(struct dst_entry *dst, u32 cookie)
  49. {
  50. return dst;
  51. }
  52. static int vrf_ip_local_out(struct sk_buff *skb)
  53. {
  54. return ip_local_out(skb);
  55. }
  56. static unsigned int vrf_v4_mtu(const struct dst_entry *dst)
  57. {
  58. /* TO-DO: return max ethernet size? */
  59. return dst->dev->mtu;
  60. }
  61. static void vrf_dst_destroy(struct dst_entry *dst)
  62. {
  63. /* our dst lives forever - or until the device is closed */
  64. }
  65. static unsigned int vrf_default_advmss(const struct dst_entry *dst)
  66. {
  67. return 65535 - 40;
  68. }
  69. static struct dst_ops vrf_dst_ops = {
  70. .family = AF_INET,
  71. .local_out = vrf_ip_local_out,
  72. .check = vrf_ip_check,
  73. .mtu = vrf_v4_mtu,
  74. .destroy = vrf_dst_destroy,
  75. .default_advmss = vrf_default_advmss,
  76. };
  77. static bool is_ip_rx_frame(struct sk_buff *skb)
  78. {
  79. switch (skb->protocol) {
  80. case htons(ETH_P_IP):
  81. case htons(ETH_P_IPV6):
  82. return true;
  83. }
  84. return false;
  85. }
  86. /* note: already called with rcu_read_lock */
  87. static rx_handler_result_t vrf_handle_frame(struct sk_buff **pskb)
  88. {
  89. struct sk_buff *skb = *pskb;
  90. if (is_ip_rx_frame(skb)) {
  91. struct net_device *dev = vrf_master_get_rcu(skb->dev);
  92. struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
  93. u64_stats_update_begin(&dstats->syncp);
  94. dstats->rx_pkts++;
  95. dstats->rx_bytes += skb->len;
  96. u64_stats_update_end(&dstats->syncp);
  97. skb->dev = dev;
  98. return RX_HANDLER_ANOTHER;
  99. }
  100. return RX_HANDLER_PASS;
  101. }
  102. static struct rtnl_link_stats64 *vrf_get_stats64(struct net_device *dev,
  103. struct rtnl_link_stats64 *stats)
  104. {
  105. int i;
  106. for_each_possible_cpu(i) {
  107. const struct pcpu_dstats *dstats;
  108. u64 tbytes, tpkts, tdrops, rbytes, rpkts;
  109. unsigned int start;
  110. dstats = per_cpu_ptr(dev->dstats, i);
  111. do {
  112. start = u64_stats_fetch_begin_irq(&dstats->syncp);
  113. tbytes = dstats->tx_bytes;
  114. tpkts = dstats->tx_pkts;
  115. tdrops = dstats->tx_drps;
  116. rbytes = dstats->rx_bytes;
  117. rpkts = dstats->rx_pkts;
  118. } while (u64_stats_fetch_retry_irq(&dstats->syncp, start));
  119. stats->tx_bytes += tbytes;
  120. stats->tx_packets += tpkts;
  121. stats->tx_dropped += tdrops;
  122. stats->rx_bytes += rbytes;
  123. stats->rx_packets += rpkts;
  124. }
  125. return stats;
  126. }
  127. static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
  128. struct net_device *dev)
  129. {
  130. return 0;
  131. }
  132. static int vrf_send_v4_prep(struct sk_buff *skb, struct flowi4 *fl4,
  133. struct net_device *vrf_dev)
  134. {
  135. struct rtable *rt;
  136. int err = 1;
  137. rt = ip_route_output_flow(dev_net(vrf_dev), fl4, NULL);
  138. if (IS_ERR(rt))
  139. goto out;
  140. /* TO-DO: what about broadcast ? */
  141. if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
  142. ip_rt_put(rt);
  143. goto out;
  144. }
  145. skb_dst_drop(skb);
  146. skb_dst_set(skb, &rt->dst);
  147. err = 0;
  148. out:
  149. return err;
  150. }
  151. static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
  152. struct net_device *vrf_dev)
  153. {
  154. struct iphdr *ip4h = ip_hdr(skb);
  155. int ret = NET_XMIT_DROP;
  156. struct flowi4 fl4 = {
  157. /* needed to match OIF rule */
  158. .flowi4_oif = vrf_dev->ifindex,
  159. .flowi4_iif = LOOPBACK_IFINDEX,
  160. .flowi4_tos = RT_TOS(ip4h->tos),
  161. .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_VRFSRC,
  162. .daddr = ip4h->daddr,
  163. };
  164. if (vrf_send_v4_prep(skb, &fl4, vrf_dev))
  165. goto err;
  166. if (!ip4h->saddr) {
  167. ip4h->saddr = inet_select_addr(skb_dst(skb)->dev, 0,
  168. RT_SCOPE_LINK);
  169. }
  170. ret = ip_local_out(skb);
  171. if (unlikely(net_xmit_eval(ret)))
  172. vrf_dev->stats.tx_errors++;
  173. else
  174. ret = NET_XMIT_SUCCESS;
  175. out:
  176. return ret;
  177. err:
  178. vrf_dev->stats.tx_errors++;
  179. kfree_skb(skb);
  180. goto out;
  181. }
  182. static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
  183. {
  184. switch (skb->protocol) {
  185. case htons(ETH_P_IP):
  186. return vrf_process_v4_outbound(skb, dev);
  187. case htons(ETH_P_IPV6):
  188. return vrf_process_v6_outbound(skb, dev);
  189. default:
  190. return NET_XMIT_DROP;
  191. }
  192. }
  193. static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
  194. {
  195. netdev_tx_t ret = is_ip_tx_frame(skb, dev);
  196. if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
  197. struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
  198. u64_stats_update_begin(&dstats->syncp);
  199. dstats->tx_pkts++;
  200. dstats->tx_bytes += skb->len;
  201. u64_stats_update_end(&dstats->syncp);
  202. } else {
  203. this_cpu_inc(dev->dstats->tx_drps);
  204. }
  205. return ret;
  206. }
  207. static netdev_tx_t vrf_finish(struct sock *sk, struct sk_buff *skb)
  208. {
  209. return dev_queue_xmit(skb);
  210. }
  211. static int vrf_output(struct sock *sk, struct sk_buff *skb)
  212. {
  213. struct net_device *dev = skb_dst(skb)->dev;
  214. IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
  215. skb->dev = dev;
  216. skb->protocol = htons(ETH_P_IP);
  217. return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, sk, skb,
  218. NULL, dev,
  219. vrf_finish,
  220. !(IPCB(skb)->flags & IPSKB_REROUTED));
  221. }
  222. static void vrf_rtable_destroy(struct net_vrf *vrf)
  223. {
  224. struct dst_entry *dst = (struct dst_entry *)vrf->rth;
  225. if (dst)
  226. dst_destroy(dst);
  227. vrf->rth = NULL;
  228. }
  229. static struct rtable *vrf_rtable_create(struct net_device *dev)
  230. {
  231. struct rtable *rth;
  232. rth = dst_alloc(&vrf_dst_ops, dev, 2,
  233. DST_OBSOLETE_NONE,
  234. (DST_HOST | DST_NOPOLICY | DST_NOXFRM));
  235. if (rth) {
  236. rth->dst.output = vrf_output;
  237. rth->rt_genid = rt_genid_ipv4(dev_net(dev));
  238. rth->rt_flags = 0;
  239. rth->rt_type = RTN_UNICAST;
  240. rth->rt_is_input = 0;
  241. rth->rt_iif = 0;
  242. rth->rt_pmtu = 0;
  243. rth->rt_gateway = 0;
  244. rth->rt_uses_gateway = 0;
  245. INIT_LIST_HEAD(&rth->rt_uncached);
  246. rth->rt_uncached_list = NULL;
  247. rth->rt_lwtstate = NULL;
  248. }
  249. return rth;
  250. }
  251. /**************************** device handling ********************/
  252. /* cycle interface to flush neighbor cache and move routes across tables */
  253. static void cycle_netdev(struct net_device *dev)
  254. {
  255. unsigned int flags = dev->flags;
  256. int ret;
  257. if (!netif_running(dev))
  258. return;
  259. ret = dev_change_flags(dev, flags & ~IFF_UP);
  260. if (ret >= 0)
  261. ret = dev_change_flags(dev, flags);
  262. if (ret < 0) {
  263. netdev_err(dev,
  264. "Failed to cycle device %s; route tables might be wrong!\n",
  265. dev->name);
  266. }
  267. }
  268. static struct slave *__vrf_find_slave_dev(struct slave_queue *queue,
  269. struct net_device *dev)
  270. {
  271. struct list_head *head = &queue->all_slaves;
  272. struct slave *slave;
  273. list_for_each_entry(slave, head, list) {
  274. if (slave->dev == dev)
  275. return slave;
  276. }
  277. return NULL;
  278. }
  279. /* inverse of __vrf_insert_slave */
  280. static void __vrf_remove_slave(struct slave_queue *queue, struct slave *slave)
  281. {
  282. list_del(&slave->list);
  283. queue->num_slaves--;
  284. }
  285. static void __vrf_insert_slave(struct slave_queue *queue, struct slave *slave)
  286. {
  287. list_add(&slave->list, &queue->all_slaves);
  288. queue->num_slaves++;
  289. }
  290. static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
  291. {
  292. struct net_vrf_dev *vrf_ptr = kmalloc(sizeof(*vrf_ptr), GFP_KERNEL);
  293. struct slave *slave = kzalloc(sizeof(*slave), GFP_KERNEL);
  294. struct slave *duplicate_slave;
  295. struct net_vrf *vrf = netdev_priv(dev);
  296. struct slave_queue *queue = &vrf->queue;
  297. int ret = -ENOMEM;
  298. if (!slave || !vrf_ptr)
  299. goto out_fail;
  300. slave->dev = port_dev;
  301. vrf_ptr->ifindex = dev->ifindex;
  302. vrf_ptr->tb_id = vrf->tb_id;
  303. duplicate_slave = __vrf_find_slave_dev(queue, port_dev);
  304. if (duplicate_slave) {
  305. ret = -EBUSY;
  306. goto out_fail;
  307. }
  308. __vrf_insert_slave(queue, slave);
  309. /* register the packet handler for slave ports */
  310. ret = netdev_rx_handler_register(port_dev, vrf_handle_frame, dev);
  311. if (ret) {
  312. netdev_err(port_dev,
  313. "Device %s failed to register rx_handler\n",
  314. port_dev->name);
  315. goto out_remove;
  316. }
  317. ret = netdev_master_upper_dev_link(port_dev, dev);
  318. if (ret < 0)
  319. goto out_unregister;
  320. port_dev->flags |= IFF_SLAVE;
  321. rcu_assign_pointer(port_dev->vrf_ptr, vrf_ptr);
  322. cycle_netdev(port_dev);
  323. return 0;
  324. out_unregister:
  325. netdev_rx_handler_unregister(port_dev);
  326. out_remove:
  327. __vrf_remove_slave(queue, slave);
  328. out_fail:
  329. kfree(vrf_ptr);
  330. kfree(slave);
  331. return ret;
  332. }
  333. static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
  334. {
  335. if (!netif_is_vrf(dev) || netif_is_vrf(port_dev) ||
  336. vrf_is_slave(port_dev))
  337. return -EINVAL;
  338. return do_vrf_add_slave(dev, port_dev);
  339. }
  340. /* inverse of do_vrf_add_slave */
  341. static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
  342. {
  343. struct net_vrf_dev *vrf_ptr = rtnl_dereference(port_dev->vrf_ptr);
  344. struct net_vrf *vrf = netdev_priv(dev);
  345. struct slave_queue *queue = &vrf->queue;
  346. struct slave *slave;
  347. RCU_INIT_POINTER(port_dev->vrf_ptr, NULL);
  348. netdev_upper_dev_unlink(port_dev, dev);
  349. port_dev->flags &= ~IFF_SLAVE;
  350. netdev_rx_handler_unregister(port_dev);
  351. /* after netdev_rx_handler_unregister for synchronize_rcu */
  352. kfree(vrf_ptr);
  353. cycle_netdev(port_dev);
  354. slave = __vrf_find_slave_dev(queue, port_dev);
  355. if (slave)
  356. __vrf_remove_slave(queue, slave);
  357. kfree(slave);
  358. return 0;
  359. }
  360. static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
  361. {
  362. if (!netif_is_vrf(dev))
  363. return -EINVAL;
  364. return do_vrf_del_slave(dev, port_dev);
  365. }
  366. static void vrf_dev_uninit(struct net_device *dev)
  367. {
  368. struct net_vrf *vrf = netdev_priv(dev);
  369. struct slave_queue *queue = &vrf->queue;
  370. struct list_head *head = &queue->all_slaves;
  371. struct slave *slave, *next;
  372. vrf_rtable_destroy(vrf);
  373. list_for_each_entry_safe(slave, next, head, list)
  374. vrf_del_slave(dev, slave->dev);
  375. if (dev->dstats)
  376. free_percpu(dev->dstats);
  377. dev->dstats = NULL;
  378. }
  379. static int vrf_dev_init(struct net_device *dev)
  380. {
  381. struct net_vrf *vrf = netdev_priv(dev);
  382. INIT_LIST_HEAD(&vrf->queue.all_slaves);
  383. dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
  384. if (!dev->dstats)
  385. goto out_nomem;
  386. /* create the default dst which points back to us */
  387. vrf->rth = vrf_rtable_create(dev);
  388. if (!vrf->rth)
  389. goto out_stats;
  390. dev->flags = IFF_MASTER | IFF_NOARP;
  391. return 0;
  392. out_stats:
  393. free_percpu(dev->dstats);
  394. dev->dstats = NULL;
  395. out_nomem:
  396. return -ENOMEM;
  397. }
  398. static const struct net_device_ops vrf_netdev_ops = {
  399. .ndo_init = vrf_dev_init,
  400. .ndo_uninit = vrf_dev_uninit,
  401. .ndo_start_xmit = vrf_xmit,
  402. .ndo_get_stats64 = vrf_get_stats64,
  403. .ndo_add_slave = vrf_add_slave,
  404. .ndo_del_slave = vrf_del_slave,
  405. };
  406. static void vrf_get_drvinfo(struct net_device *dev,
  407. struct ethtool_drvinfo *info)
  408. {
  409. strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
  410. strlcpy(info->version, DRV_VERSION, sizeof(info->version));
  411. }
  412. static const struct ethtool_ops vrf_ethtool_ops = {
  413. .get_drvinfo = vrf_get_drvinfo,
  414. };
  415. static void vrf_setup(struct net_device *dev)
  416. {
  417. ether_setup(dev);
  418. /* Initialize the device structure. */
  419. dev->netdev_ops = &vrf_netdev_ops;
  420. dev->ethtool_ops = &vrf_ethtool_ops;
  421. dev->destructor = free_netdev;
  422. /* Fill in device structure with ethernet-generic values. */
  423. eth_hw_addr_random(dev);
  424. /* don't acquire vrf device's netif_tx_lock when transmitting */
  425. dev->features |= NETIF_F_LLTX;
  426. /* don't allow vrf devices to change network namespaces. */
  427. dev->features |= NETIF_F_NETNS_LOCAL;
  428. }
  429. static int vrf_validate(struct nlattr *tb[], struct nlattr *data[])
  430. {
  431. if (tb[IFLA_ADDRESS]) {
  432. if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
  433. return -EINVAL;
  434. if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
  435. return -EADDRNOTAVAIL;
  436. }
  437. return 0;
  438. }
  439. static void vrf_dellink(struct net_device *dev, struct list_head *head)
  440. {
  441. struct net_vrf_dev *vrf_ptr = rtnl_dereference(dev->vrf_ptr);
  442. RCU_INIT_POINTER(dev->vrf_ptr, NULL);
  443. kfree_rcu(vrf_ptr, rcu);
  444. unregister_netdevice_queue(dev, head);
  445. }
  446. static int vrf_newlink(struct net *src_net, struct net_device *dev,
  447. struct nlattr *tb[], struct nlattr *data[])
  448. {
  449. struct net_vrf *vrf = netdev_priv(dev);
  450. struct net_vrf_dev *vrf_ptr;
  451. int err;
  452. if (!data || !data[IFLA_VRF_TABLE])
  453. return -EINVAL;
  454. vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]);
  455. dev->priv_flags |= IFF_VRF_MASTER;
  456. err = -ENOMEM;
  457. vrf_ptr = kmalloc(sizeof(*dev->vrf_ptr), GFP_KERNEL);
  458. if (!vrf_ptr)
  459. goto out_fail;
  460. vrf_ptr->ifindex = dev->ifindex;
  461. vrf_ptr->tb_id = vrf->tb_id;
  462. err = register_netdevice(dev);
  463. if (err < 0)
  464. goto out_fail;
  465. rcu_assign_pointer(dev->vrf_ptr, vrf_ptr);
  466. return 0;
  467. out_fail:
  468. kfree(vrf_ptr);
  469. free_netdev(dev);
  470. return err;
  471. }
  472. static size_t vrf_nl_getsize(const struct net_device *dev)
  473. {
  474. return nla_total_size(sizeof(u32)); /* IFLA_VRF_TABLE */
  475. }
  476. static int vrf_fillinfo(struct sk_buff *skb,
  477. const struct net_device *dev)
  478. {
  479. struct net_vrf *vrf = netdev_priv(dev);
  480. return nla_put_u32(skb, IFLA_VRF_TABLE, vrf->tb_id);
  481. }
  482. static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = {
  483. [IFLA_VRF_TABLE] = { .type = NLA_U32 },
  484. };
  485. static struct rtnl_link_ops vrf_link_ops __read_mostly = {
  486. .kind = DRV_NAME,
  487. .priv_size = sizeof(struct net_vrf),
  488. .get_size = vrf_nl_getsize,
  489. .policy = vrf_nl_policy,
  490. .validate = vrf_validate,
  491. .fill_info = vrf_fillinfo,
  492. .newlink = vrf_newlink,
  493. .dellink = vrf_dellink,
  494. .setup = vrf_setup,
  495. .maxtype = IFLA_VRF_MAX,
  496. };
  497. static int vrf_device_event(struct notifier_block *unused,
  498. unsigned long event, void *ptr)
  499. {
  500. struct net_device *dev = netdev_notifier_info_to_dev(ptr);
  501. /* only care about unregister events to drop slave references */
  502. if (event == NETDEV_UNREGISTER) {
  503. struct net_vrf_dev *vrf_ptr = rtnl_dereference(dev->vrf_ptr);
  504. struct net_device *vrf_dev;
  505. if (!vrf_ptr || netif_is_vrf(dev))
  506. goto out;
  507. vrf_dev = __dev_get_by_index(dev_net(dev), vrf_ptr->ifindex);
  508. if (vrf_dev)
  509. vrf_del_slave(vrf_dev, dev);
  510. }
  511. out:
  512. return NOTIFY_DONE;
  513. }
  514. static struct notifier_block vrf_notifier_block __read_mostly = {
  515. .notifier_call = vrf_device_event,
  516. };
  517. static int __init vrf_init_module(void)
  518. {
  519. int rc;
  520. vrf_dst_ops.kmem_cachep =
  521. kmem_cache_create("vrf_ip_dst_cache",
  522. sizeof(struct rtable), 0,
  523. SLAB_HWCACHE_ALIGN | SLAB_PANIC,
  524. NULL);
  525. if (!vrf_dst_ops.kmem_cachep)
  526. return -ENOMEM;
  527. register_netdevice_notifier(&vrf_notifier_block);
  528. rc = rtnl_link_register(&vrf_link_ops);
  529. if (rc < 0)
  530. goto error;
  531. return 0;
  532. error:
  533. unregister_netdevice_notifier(&vrf_notifier_block);
  534. kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
  535. return rc;
  536. }
  537. static void __exit vrf_cleanup_module(void)
  538. {
  539. rtnl_link_unregister(&vrf_link_ops);
  540. unregister_netdevice_notifier(&vrf_notifier_block);
  541. kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
  542. }
  543. module_init(vrf_init_module);
  544. module_exit(vrf_cleanup_module);
  545. MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
  546. MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
  547. MODULE_LICENSE("GPL");
  548. MODULE_ALIAS_RTNL_LINK(DRV_NAME);
  549. MODULE_VERSION(DRV_VERSION);