vrf.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711
  1. /*
  2. * vrf.c: device driver to encapsulate a VRF space
  3. *
  4. * Copyright (c) 2015 Cumulus Networks. All rights reserved.
  5. * Copyright (c) 2015 Shrijeet Mukherjee <shm@cumulusnetworks.com>
  6. * Copyright (c) 2015 David Ahern <dsa@cumulusnetworks.com>
  7. *
  8. * Based on dummy, team and ipvlan drivers
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2 of the License, or
  13. * (at your option) any later version.
  14. */
  15. #include <linux/module.h>
  16. #include <linux/kernel.h>
  17. #include <linux/netdevice.h>
  18. #include <linux/etherdevice.h>
  19. #include <linux/ip.h>
  20. #include <linux/init.h>
  21. #include <linux/moduleparam.h>
  22. #include <linux/netfilter.h>
  23. #include <linux/rtnetlink.h>
  24. #include <net/rtnetlink.h>
  25. #include <linux/u64_stats_sync.h>
  26. #include <linux/hashtable.h>
  27. #include <linux/inetdevice.h>
  28. #include <net/arp.h>
  29. #include <net/ip.h>
  30. #include <net/ip_fib.h>
  31. #include <net/ip6_route.h>
  32. #include <net/rtnetlink.h>
  33. #include <net/route.h>
  34. #include <net/addrconf.h>
  35. #include <net/vrf.h>
  36. #define DRV_NAME "vrf"
  37. #define DRV_VERSION "1.0"
  38. #define vrf_is_slave(dev) ((dev)->flags & IFF_SLAVE)
  39. #define vrf_master_get_rcu(dev) \
  40. ((struct net_device *)rcu_dereference(dev->rx_handler_data))
  41. struct pcpu_dstats {
  42. u64 tx_pkts;
  43. u64 tx_bytes;
  44. u64 tx_drps;
  45. u64 rx_pkts;
  46. u64 rx_bytes;
  47. struct u64_stats_sync syncp;
  48. };
  49. static struct dst_entry *vrf_ip_check(struct dst_entry *dst, u32 cookie)
  50. {
  51. return dst;
  52. }
  53. static int vrf_ip_local_out(struct sk_buff *skb)
  54. {
  55. return ip_local_out(skb);
  56. }
  57. static unsigned int vrf_v4_mtu(const struct dst_entry *dst)
  58. {
  59. /* TO-DO: return max ethernet size? */
  60. return dst->dev->mtu;
  61. }
  62. static void vrf_dst_destroy(struct dst_entry *dst)
  63. {
  64. /* our dst lives forever - or until the device is closed */
  65. }
  66. static unsigned int vrf_default_advmss(const struct dst_entry *dst)
  67. {
  68. return 65535 - 40;
  69. }
  70. static struct dst_ops vrf_dst_ops = {
  71. .family = AF_INET,
  72. .local_out = vrf_ip_local_out,
  73. .check = vrf_ip_check,
  74. .mtu = vrf_v4_mtu,
  75. .destroy = vrf_dst_destroy,
  76. .default_advmss = vrf_default_advmss,
  77. };
  78. static bool is_ip_rx_frame(struct sk_buff *skb)
  79. {
  80. switch (skb->protocol) {
  81. case htons(ETH_P_IP):
  82. case htons(ETH_P_IPV6):
  83. return true;
  84. }
  85. return false;
  86. }
  87. static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb)
  88. {
  89. vrf_dev->stats.tx_errors++;
  90. kfree_skb(skb);
  91. }
  92. /* note: already called with rcu_read_lock */
  93. static rx_handler_result_t vrf_handle_frame(struct sk_buff **pskb)
  94. {
  95. struct sk_buff *skb = *pskb;
  96. if (is_ip_rx_frame(skb)) {
  97. struct net_device *dev = vrf_master_get_rcu(skb->dev);
  98. struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
  99. u64_stats_update_begin(&dstats->syncp);
  100. dstats->rx_pkts++;
  101. dstats->rx_bytes += skb->len;
  102. u64_stats_update_end(&dstats->syncp);
  103. skb->dev = dev;
  104. return RX_HANDLER_ANOTHER;
  105. }
  106. return RX_HANDLER_PASS;
  107. }
  108. static struct rtnl_link_stats64 *vrf_get_stats64(struct net_device *dev,
  109. struct rtnl_link_stats64 *stats)
  110. {
  111. int i;
  112. for_each_possible_cpu(i) {
  113. const struct pcpu_dstats *dstats;
  114. u64 tbytes, tpkts, tdrops, rbytes, rpkts;
  115. unsigned int start;
  116. dstats = per_cpu_ptr(dev->dstats, i);
  117. do {
  118. start = u64_stats_fetch_begin_irq(&dstats->syncp);
  119. tbytes = dstats->tx_bytes;
  120. tpkts = dstats->tx_pkts;
  121. tdrops = dstats->tx_drps;
  122. rbytes = dstats->rx_bytes;
  123. rpkts = dstats->rx_pkts;
  124. } while (u64_stats_fetch_retry_irq(&dstats->syncp, start));
  125. stats->tx_bytes += tbytes;
  126. stats->tx_packets += tpkts;
  127. stats->tx_dropped += tdrops;
  128. stats->rx_bytes += rbytes;
  129. stats->rx_packets += rpkts;
  130. }
  131. return stats;
  132. }
  133. static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
  134. struct net_device *dev)
  135. {
  136. vrf_tx_error(dev, skb);
  137. return NET_XMIT_DROP;
  138. }
  139. static int vrf_send_v4_prep(struct sk_buff *skb, struct flowi4 *fl4,
  140. struct net_device *vrf_dev)
  141. {
  142. struct rtable *rt;
  143. int err = 1;
  144. rt = ip_route_output_flow(dev_net(vrf_dev), fl4, NULL);
  145. if (IS_ERR(rt))
  146. goto out;
  147. /* TO-DO: what about broadcast ? */
  148. if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
  149. ip_rt_put(rt);
  150. goto out;
  151. }
  152. skb_dst_drop(skb);
  153. skb_dst_set(skb, &rt->dst);
  154. err = 0;
  155. out:
  156. return err;
  157. }
  158. static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
  159. struct net_device *vrf_dev)
  160. {
  161. struct iphdr *ip4h = ip_hdr(skb);
  162. int ret = NET_XMIT_DROP;
  163. struct flowi4 fl4 = {
  164. /* needed to match OIF rule */
  165. .flowi4_oif = vrf_dev->ifindex,
  166. .flowi4_iif = LOOPBACK_IFINDEX,
  167. .flowi4_tos = RT_TOS(ip4h->tos),
  168. .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_VRFSRC |
  169. FLOWI_FLAG_SKIP_NH_OIF,
  170. .daddr = ip4h->daddr,
  171. };
  172. if (vrf_send_v4_prep(skb, &fl4, vrf_dev))
  173. goto err;
  174. if (!ip4h->saddr) {
  175. ip4h->saddr = inet_select_addr(skb_dst(skb)->dev, 0,
  176. RT_SCOPE_LINK);
  177. }
  178. ret = ip_local_out(skb);
  179. if (unlikely(net_xmit_eval(ret)))
  180. vrf_dev->stats.tx_errors++;
  181. else
  182. ret = NET_XMIT_SUCCESS;
  183. out:
  184. return ret;
  185. err:
  186. vrf_tx_error(vrf_dev, skb);
  187. goto out;
  188. }
  189. static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
  190. {
  191. /* strip the ethernet header added for pass through VRF device */
  192. __skb_pull(skb, skb_network_offset(skb));
  193. switch (skb->protocol) {
  194. case htons(ETH_P_IP):
  195. return vrf_process_v4_outbound(skb, dev);
  196. case htons(ETH_P_IPV6):
  197. return vrf_process_v6_outbound(skb, dev);
  198. default:
  199. vrf_tx_error(dev, skb);
  200. return NET_XMIT_DROP;
  201. }
  202. }
  203. static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
  204. {
  205. netdev_tx_t ret = is_ip_tx_frame(skb, dev);
  206. if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
  207. struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
  208. u64_stats_update_begin(&dstats->syncp);
  209. dstats->tx_pkts++;
  210. dstats->tx_bytes += skb->len;
  211. u64_stats_update_end(&dstats->syncp);
  212. } else {
  213. this_cpu_inc(dev->dstats->tx_drps);
  214. }
  215. return ret;
  216. }
  217. /* modelled after ip_finish_output2 */
  218. static int vrf_finish_output(struct sock *sk, struct sk_buff *skb)
  219. {
  220. struct dst_entry *dst = skb_dst(skb);
  221. struct rtable *rt = (struct rtable *)dst;
  222. struct net_device *dev = dst->dev;
  223. unsigned int hh_len = LL_RESERVED_SPACE(dev);
  224. struct neighbour *neigh;
  225. u32 nexthop;
  226. int ret = -EINVAL;
  227. /* Be paranoid, rather than too clever. */
  228. if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
  229. struct sk_buff *skb2;
  230. skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
  231. if (!skb2) {
  232. ret = -ENOMEM;
  233. goto err;
  234. }
  235. if (skb->sk)
  236. skb_set_owner_w(skb2, skb->sk);
  237. consume_skb(skb);
  238. skb = skb2;
  239. }
  240. rcu_read_lock_bh();
  241. nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr);
  242. neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
  243. if (unlikely(!neigh))
  244. neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
  245. if (!IS_ERR(neigh))
  246. ret = dst_neigh_output(dst, neigh, skb);
  247. rcu_read_unlock_bh();
  248. err:
  249. if (unlikely(ret < 0))
  250. vrf_tx_error(skb->dev, skb);
  251. return ret;
  252. }
  253. static int vrf_output(struct sock *sk, struct sk_buff *skb)
  254. {
  255. struct net_device *dev = skb_dst(skb)->dev;
  256. IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
  257. skb->dev = dev;
  258. skb->protocol = htons(ETH_P_IP);
  259. return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, sk, skb,
  260. NULL, dev,
  261. vrf_finish_output,
  262. !(IPCB(skb)->flags & IPSKB_REROUTED));
  263. }
  264. static void vrf_rtable_destroy(struct net_vrf *vrf)
  265. {
  266. struct dst_entry *dst = (struct dst_entry *)vrf->rth;
  267. dst_destroy(dst);
  268. vrf->rth = NULL;
  269. }
  270. static struct rtable *vrf_rtable_create(struct net_device *dev)
  271. {
  272. struct rtable *rth;
  273. rth = dst_alloc(&vrf_dst_ops, dev, 2,
  274. DST_OBSOLETE_NONE,
  275. (DST_HOST | DST_NOPOLICY | DST_NOXFRM));
  276. if (rth) {
  277. rth->dst.output = vrf_output;
  278. rth->rt_genid = rt_genid_ipv4(dev_net(dev));
  279. rth->rt_flags = 0;
  280. rth->rt_type = RTN_UNICAST;
  281. rth->rt_is_input = 0;
  282. rth->rt_iif = 0;
  283. rth->rt_pmtu = 0;
  284. rth->rt_gateway = 0;
  285. rth->rt_uses_gateway = 0;
  286. INIT_LIST_HEAD(&rth->rt_uncached);
  287. rth->rt_uncached_list = NULL;
  288. }
  289. return rth;
  290. }
  291. /**************************** device handling ********************/
  292. /* cycle interface to flush neighbor cache and move routes across tables */
  293. static void cycle_netdev(struct net_device *dev)
  294. {
  295. unsigned int flags = dev->flags;
  296. int ret;
  297. if (!netif_running(dev))
  298. return;
  299. ret = dev_change_flags(dev, flags & ~IFF_UP);
  300. if (ret >= 0)
  301. ret = dev_change_flags(dev, flags);
  302. if (ret < 0) {
  303. netdev_err(dev,
  304. "Failed to cycle device %s; route tables might be wrong!\n",
  305. dev->name);
  306. }
  307. }
  308. static struct slave *__vrf_find_slave_dev(struct slave_queue *queue,
  309. struct net_device *dev)
  310. {
  311. struct list_head *head = &queue->all_slaves;
  312. struct slave *slave;
  313. list_for_each_entry(slave, head, list) {
  314. if (slave->dev == dev)
  315. return slave;
  316. }
  317. return NULL;
  318. }
  319. /* inverse of __vrf_insert_slave */
  320. static void __vrf_remove_slave(struct slave_queue *queue, struct slave *slave)
  321. {
  322. list_del(&slave->list);
  323. }
  324. static void __vrf_insert_slave(struct slave_queue *queue, struct slave *slave)
  325. {
  326. list_add(&slave->list, &queue->all_slaves);
  327. }
  328. static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
  329. {
  330. struct net_vrf_dev *vrf_ptr = kmalloc(sizeof(*vrf_ptr), GFP_KERNEL);
  331. struct slave *slave = kzalloc(sizeof(*slave), GFP_KERNEL);
  332. struct net_vrf *vrf = netdev_priv(dev);
  333. struct slave_queue *queue = &vrf->queue;
  334. int ret = -ENOMEM;
  335. if (!slave || !vrf_ptr)
  336. goto out_fail;
  337. slave->dev = port_dev;
  338. vrf_ptr->ifindex = dev->ifindex;
  339. vrf_ptr->tb_id = vrf->tb_id;
  340. /* register the packet handler for slave ports */
  341. ret = netdev_rx_handler_register(port_dev, vrf_handle_frame, dev);
  342. if (ret) {
  343. netdev_err(port_dev,
  344. "Device %s failed to register rx_handler\n",
  345. port_dev->name);
  346. goto out_fail;
  347. }
  348. ret = netdev_master_upper_dev_link(port_dev, dev);
  349. if (ret < 0)
  350. goto out_unregister;
  351. port_dev->flags |= IFF_SLAVE;
  352. __vrf_insert_slave(queue, slave);
  353. rcu_assign_pointer(port_dev->vrf_ptr, vrf_ptr);
  354. cycle_netdev(port_dev);
  355. return 0;
  356. out_unregister:
  357. netdev_rx_handler_unregister(port_dev);
  358. out_fail:
  359. kfree(vrf_ptr);
  360. kfree(slave);
  361. return ret;
  362. }
  363. static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
  364. {
  365. if (netif_is_vrf(port_dev) || vrf_is_slave(port_dev))
  366. return -EINVAL;
  367. return do_vrf_add_slave(dev, port_dev);
  368. }
  369. /* inverse of do_vrf_add_slave */
  370. static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
  371. {
  372. struct net_vrf_dev *vrf_ptr = rtnl_dereference(port_dev->vrf_ptr);
  373. struct net_vrf *vrf = netdev_priv(dev);
  374. struct slave_queue *queue = &vrf->queue;
  375. struct slave *slave;
  376. RCU_INIT_POINTER(port_dev->vrf_ptr, NULL);
  377. netdev_upper_dev_unlink(port_dev, dev);
  378. port_dev->flags &= ~IFF_SLAVE;
  379. netdev_rx_handler_unregister(port_dev);
  380. /* after netdev_rx_handler_unregister for synchronize_rcu */
  381. kfree(vrf_ptr);
  382. cycle_netdev(port_dev);
  383. slave = __vrf_find_slave_dev(queue, port_dev);
  384. if (slave)
  385. __vrf_remove_slave(queue, slave);
  386. kfree(slave);
  387. return 0;
  388. }
  389. static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
  390. {
  391. return do_vrf_del_slave(dev, port_dev);
  392. }
  393. static void vrf_dev_uninit(struct net_device *dev)
  394. {
  395. struct net_vrf *vrf = netdev_priv(dev);
  396. struct slave_queue *queue = &vrf->queue;
  397. struct list_head *head = &queue->all_slaves;
  398. struct slave *slave, *next;
  399. vrf_rtable_destroy(vrf);
  400. list_for_each_entry_safe(slave, next, head, list)
  401. vrf_del_slave(dev, slave->dev);
  402. free_percpu(dev->dstats);
  403. dev->dstats = NULL;
  404. }
  405. static int vrf_dev_init(struct net_device *dev)
  406. {
  407. struct net_vrf *vrf = netdev_priv(dev);
  408. INIT_LIST_HEAD(&vrf->queue.all_slaves);
  409. dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
  410. if (!dev->dstats)
  411. goto out_nomem;
  412. /* create the default dst which points back to us */
  413. vrf->rth = vrf_rtable_create(dev);
  414. if (!vrf->rth)
  415. goto out_stats;
  416. dev->flags = IFF_MASTER | IFF_NOARP;
  417. return 0;
  418. out_stats:
  419. free_percpu(dev->dstats);
  420. dev->dstats = NULL;
  421. out_nomem:
  422. return -ENOMEM;
  423. }
  424. static const struct net_device_ops vrf_netdev_ops = {
  425. .ndo_init = vrf_dev_init,
  426. .ndo_uninit = vrf_dev_uninit,
  427. .ndo_start_xmit = vrf_xmit,
  428. .ndo_get_stats64 = vrf_get_stats64,
  429. .ndo_add_slave = vrf_add_slave,
  430. .ndo_del_slave = vrf_del_slave,
  431. };
  432. static void vrf_get_drvinfo(struct net_device *dev,
  433. struct ethtool_drvinfo *info)
  434. {
  435. strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
  436. strlcpy(info->version, DRV_VERSION, sizeof(info->version));
  437. }
  438. static const struct ethtool_ops vrf_ethtool_ops = {
  439. .get_drvinfo = vrf_get_drvinfo,
  440. };
  441. static void vrf_setup(struct net_device *dev)
  442. {
  443. ether_setup(dev);
  444. /* Initialize the device structure. */
  445. dev->netdev_ops = &vrf_netdev_ops;
  446. dev->ethtool_ops = &vrf_ethtool_ops;
  447. dev->destructor = free_netdev;
  448. /* Fill in device structure with ethernet-generic values. */
  449. eth_hw_addr_random(dev);
  450. /* don't acquire vrf device's netif_tx_lock when transmitting */
  451. dev->features |= NETIF_F_LLTX;
  452. /* don't allow vrf devices to change network namespaces. */
  453. dev->features |= NETIF_F_NETNS_LOCAL;
  454. }
  455. static int vrf_validate(struct nlattr *tb[], struct nlattr *data[])
  456. {
  457. if (tb[IFLA_ADDRESS]) {
  458. if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
  459. return -EINVAL;
  460. if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
  461. return -EADDRNOTAVAIL;
  462. }
  463. return 0;
  464. }
  465. static void vrf_dellink(struct net_device *dev, struct list_head *head)
  466. {
  467. struct net_vrf_dev *vrf_ptr = rtnl_dereference(dev->vrf_ptr);
  468. RCU_INIT_POINTER(dev->vrf_ptr, NULL);
  469. kfree_rcu(vrf_ptr, rcu);
  470. unregister_netdevice_queue(dev, head);
  471. }
  472. static int vrf_newlink(struct net *src_net, struct net_device *dev,
  473. struct nlattr *tb[], struct nlattr *data[])
  474. {
  475. struct net_vrf *vrf = netdev_priv(dev);
  476. struct net_vrf_dev *vrf_ptr;
  477. int err;
  478. if (!data || !data[IFLA_VRF_TABLE])
  479. return -EINVAL;
  480. vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]);
  481. dev->priv_flags |= IFF_VRF_MASTER;
  482. err = -ENOMEM;
  483. vrf_ptr = kmalloc(sizeof(*dev->vrf_ptr), GFP_KERNEL);
  484. if (!vrf_ptr)
  485. goto out_fail;
  486. vrf_ptr->ifindex = dev->ifindex;
  487. vrf_ptr->tb_id = vrf->tb_id;
  488. err = register_netdevice(dev);
  489. if (err < 0)
  490. goto out_fail;
  491. rcu_assign_pointer(dev->vrf_ptr, vrf_ptr);
  492. return 0;
  493. out_fail:
  494. kfree(vrf_ptr);
  495. free_netdev(dev);
  496. return err;
  497. }
  498. static size_t vrf_nl_getsize(const struct net_device *dev)
  499. {
  500. return nla_total_size(sizeof(u32)); /* IFLA_VRF_TABLE */
  501. }
  502. static int vrf_fillinfo(struct sk_buff *skb,
  503. const struct net_device *dev)
  504. {
  505. struct net_vrf *vrf = netdev_priv(dev);
  506. return nla_put_u32(skb, IFLA_VRF_TABLE, vrf->tb_id);
  507. }
  508. static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = {
  509. [IFLA_VRF_TABLE] = { .type = NLA_U32 },
  510. };
  511. static struct rtnl_link_ops vrf_link_ops __read_mostly = {
  512. .kind = DRV_NAME,
  513. .priv_size = sizeof(struct net_vrf),
  514. .get_size = vrf_nl_getsize,
  515. .policy = vrf_nl_policy,
  516. .validate = vrf_validate,
  517. .fill_info = vrf_fillinfo,
  518. .newlink = vrf_newlink,
  519. .dellink = vrf_dellink,
  520. .setup = vrf_setup,
  521. .maxtype = IFLA_VRF_MAX,
  522. };
  523. static int vrf_device_event(struct notifier_block *unused,
  524. unsigned long event, void *ptr)
  525. {
  526. struct net_device *dev = netdev_notifier_info_to_dev(ptr);
  527. /* only care about unregister events to drop slave references */
  528. if (event == NETDEV_UNREGISTER) {
  529. struct net_vrf_dev *vrf_ptr = rtnl_dereference(dev->vrf_ptr);
  530. struct net_device *vrf_dev;
  531. if (!vrf_ptr || netif_is_vrf(dev))
  532. goto out;
  533. vrf_dev = netdev_master_upper_dev_get(dev);
  534. vrf_del_slave(vrf_dev, dev);
  535. }
  536. out:
  537. return NOTIFY_DONE;
  538. }
  539. static struct notifier_block vrf_notifier_block __read_mostly = {
  540. .notifier_call = vrf_device_event,
  541. };
  542. static int __init vrf_init_module(void)
  543. {
  544. int rc;
  545. vrf_dst_ops.kmem_cachep =
  546. kmem_cache_create("vrf_ip_dst_cache",
  547. sizeof(struct rtable), 0,
  548. SLAB_HWCACHE_ALIGN,
  549. NULL);
  550. if (!vrf_dst_ops.kmem_cachep)
  551. return -ENOMEM;
  552. register_netdevice_notifier(&vrf_notifier_block);
  553. rc = rtnl_link_register(&vrf_link_ops);
  554. if (rc < 0)
  555. goto error;
  556. return 0;
  557. error:
  558. unregister_netdevice_notifier(&vrf_notifier_block);
  559. kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
  560. return rc;
  561. }
  562. static void __exit vrf_cleanup_module(void)
  563. {
  564. rtnl_link_unregister(&vrf_link_ops);
  565. unregister_netdevice_notifier(&vrf_notifier_block);
  566. kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
  567. }
  568. module_init(vrf_init_module);
  569. module_exit(vrf_cleanup_module);
  570. MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
  571. MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
  572. MODULE_LICENSE("GPL");
  573. MODULE_ALIAS_RTNL_LINK(DRV_NAME);
  574. MODULE_VERSION(DRV_VERSION);