ip_vti.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633
  1. /*
  2. * Linux NET3: IP/IP protocol decoder modified to support
  3. * virtual tunnel interface
  4. *
  5. * Authors:
  6. * Saurabh Mohan (saurabh.mohan@vyatta.com) 05/07/2012
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License
  10. * as published by the Free Software Foundation; either version
  11. * 2 of the License, or (at your option) any later version.
  12. *
  13. */
  14. /*
  15. This version of net/ipv4/ip_vti.c is cloned of net/ipv4/ipip.c
  16. For comments look at net/ipv4/ip_gre.c --ANK
  17. */
  18. #include <linux/capability.h>
  19. #include <linux/module.h>
  20. #include <linux/types.h>
  21. #include <linux/kernel.h>
  22. #include <linux/uaccess.h>
  23. #include <linux/skbuff.h>
  24. #include <linux/netdevice.h>
  25. #include <linux/in.h>
  26. #include <linux/tcp.h>
  27. #include <linux/udp.h>
  28. #include <linux/if_arp.h>
  29. #include <linux/init.h>
  30. #include <linux/netfilter_ipv4.h>
  31. #include <linux/if_ether.h>
  32. #include <linux/icmpv6.h>
  33. #include <net/sock.h>
  34. #include <net/ip.h>
  35. #include <net/icmp.h>
  36. #include <net/ip_tunnels.h>
  37. #include <net/inet_ecn.h>
  38. #include <net/xfrm.h>
  39. #include <net/net_namespace.h>
  40. #include <net/netns/generic.h>
  41. static struct rtnl_link_ops vti_link_ops __read_mostly;
  42. static unsigned int vti_net_id __read_mostly;
  43. static int vti_tunnel_init(struct net_device *dev);
  44. static int vti_input(struct sk_buff *skb, int nexthdr, __be32 spi,
  45. int encap_type)
  46. {
  47. struct ip_tunnel *tunnel;
  48. const struct iphdr *iph = ip_hdr(skb);
  49. struct net *net = dev_net(skb->dev);
  50. struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
  51. tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
  52. iph->saddr, iph->daddr, 0);
  53. if (tunnel) {
  54. if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
  55. goto drop;
  56. XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel;
  57. return xfrm_input(skb, nexthdr, spi, encap_type);
  58. }
  59. return -EINVAL;
  60. drop:
  61. kfree_skb(skb);
  62. return 0;
  63. }
  64. static int vti_rcv(struct sk_buff *skb)
  65. {
  66. XFRM_SPI_SKB_CB(skb)->family = AF_INET;
  67. XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
  68. return vti_input(skb, ip_hdr(skb)->protocol, 0, 0);
  69. }
  70. static int vti_rcv_cb(struct sk_buff *skb, int err)
  71. {
  72. unsigned short family;
  73. struct net_device *dev;
  74. struct pcpu_sw_netstats *tstats;
  75. struct xfrm_state *x;
  76. struct xfrm_mode *inner_mode;
  77. struct ip_tunnel *tunnel = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4;
  78. u32 orig_mark = skb->mark;
  79. int ret;
  80. if (!tunnel)
  81. return 1;
  82. dev = tunnel->dev;
  83. if (err) {
  84. dev->stats.rx_errors++;
  85. dev->stats.rx_dropped++;
  86. return 0;
  87. }
  88. x = xfrm_input_state(skb);
  89. inner_mode = x->inner_mode;
  90. if (x->sel.family == AF_UNSPEC) {
  91. inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
  92. if (inner_mode == NULL) {
  93. XFRM_INC_STATS(dev_net(skb->dev),
  94. LINUX_MIB_XFRMINSTATEMODEERROR);
  95. return -EINVAL;
  96. }
  97. }
  98. family = inner_mode->afinfo->family;
  99. skb->mark = be32_to_cpu(tunnel->parms.i_key);
  100. ret = xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family);
  101. skb->mark = orig_mark;
  102. if (!ret)
  103. return -EPERM;
  104. skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(skb->dev)));
  105. skb->dev = dev;
  106. tstats = this_cpu_ptr(dev->tstats);
  107. u64_stats_update_begin(&tstats->syncp);
  108. tstats->rx_packets++;
  109. tstats->rx_bytes += skb->len;
  110. u64_stats_update_end(&tstats->syncp);
  111. return 0;
  112. }
  113. static bool vti_state_check(const struct xfrm_state *x, __be32 dst, __be32 src)
  114. {
  115. xfrm_address_t *daddr = (xfrm_address_t *)&dst;
  116. xfrm_address_t *saddr = (xfrm_address_t *)&src;
  117. /* if there is no transform then this tunnel is not functional.
  118. * Or if the xfrm is not mode tunnel.
  119. */
  120. if (!x || x->props.mode != XFRM_MODE_TUNNEL ||
  121. x->props.family != AF_INET)
  122. return false;
  123. if (!dst)
  124. return xfrm_addr_equal(saddr, &x->props.saddr, AF_INET);
  125. if (!xfrm_state_addr_check(x, daddr, saddr, AF_INET))
  126. return false;
  127. return true;
  128. }
  129. static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
  130. struct flowi *fl)
  131. {
  132. struct ip_tunnel *tunnel = netdev_priv(dev);
  133. struct ip_tunnel_parm *parms = &tunnel->parms;
  134. struct dst_entry *dst = skb_dst(skb);
  135. struct net_device *tdev; /* Device to other host */
  136. int pkt_len = skb->len;
  137. int err;
  138. int mtu;
  139. if (!dst) {
  140. dev->stats.tx_carrier_errors++;
  141. goto tx_error_icmp;
  142. }
  143. dst_hold(dst);
  144. dst = xfrm_lookup(tunnel->net, dst, fl, NULL, 0);
  145. if (IS_ERR(dst)) {
  146. dev->stats.tx_carrier_errors++;
  147. goto tx_error_icmp;
  148. }
  149. if (!vti_state_check(dst->xfrm, parms->iph.daddr, parms->iph.saddr)) {
  150. dev->stats.tx_carrier_errors++;
  151. dst_release(dst);
  152. goto tx_error_icmp;
  153. }
  154. tdev = dst->dev;
  155. if (tdev == dev) {
  156. dst_release(dst);
  157. dev->stats.collisions++;
  158. goto tx_error;
  159. }
  160. mtu = dst_mtu(dst);
  161. if (skb->len > mtu) {
  162. skb_dst_update_pmtu(skb, mtu);
  163. if (skb->protocol == htons(ETH_P_IP)) {
  164. icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
  165. htonl(mtu));
  166. } else {
  167. if (mtu < IPV6_MIN_MTU)
  168. mtu = IPV6_MIN_MTU;
  169. icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
  170. }
  171. dst_release(dst);
  172. goto tx_error;
  173. }
  174. skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev)));
  175. skb_dst_set(skb, dst);
  176. skb->dev = skb_dst(skb)->dev;
  177. err = dst_output(tunnel->net, skb->sk, skb);
  178. if (net_xmit_eval(err) == 0)
  179. err = pkt_len;
  180. iptunnel_xmit_stats(dev, err);
  181. return NETDEV_TX_OK;
  182. tx_error_icmp:
  183. dst_link_failure(skb);
  184. tx_error:
  185. dev->stats.tx_errors++;
  186. kfree_skb(skb);
  187. return NETDEV_TX_OK;
  188. }
  189. /* This function assumes it is being called from dev_queue_xmit()
  190. * and that skb is filled properly by that function.
  191. */
  192. static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
  193. {
  194. struct ip_tunnel *tunnel = netdev_priv(dev);
  195. struct flowi fl;
  196. memset(&fl, 0, sizeof(fl));
  197. switch (skb->protocol) {
  198. case htons(ETH_P_IP):
  199. xfrm_decode_session(skb, &fl, AF_INET);
  200. memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
  201. break;
  202. case htons(ETH_P_IPV6):
  203. xfrm_decode_session(skb, &fl, AF_INET6);
  204. memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
  205. break;
  206. default:
  207. dev->stats.tx_errors++;
  208. dev_kfree_skb(skb);
  209. return NETDEV_TX_OK;
  210. }
  211. /* override mark with tunnel output key */
  212. fl.flowi_mark = be32_to_cpu(tunnel->parms.o_key);
  213. return vti_xmit(skb, dev, &fl);
  214. }
  215. static int vti4_err(struct sk_buff *skb, u32 info)
  216. {
  217. __be32 spi;
  218. __u32 mark;
  219. struct xfrm_state *x;
  220. struct ip_tunnel *tunnel;
  221. struct ip_esp_hdr *esph;
  222. struct ip_auth_hdr *ah ;
  223. struct ip_comp_hdr *ipch;
  224. struct net *net = dev_net(skb->dev);
  225. const struct iphdr *iph = (const struct iphdr *)skb->data;
  226. int protocol = iph->protocol;
  227. struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
  228. tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
  229. iph->daddr, iph->saddr, 0);
  230. if (!tunnel)
  231. return -1;
  232. mark = be32_to_cpu(tunnel->parms.o_key);
  233. switch (protocol) {
  234. case IPPROTO_ESP:
  235. esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
  236. spi = esph->spi;
  237. break;
  238. case IPPROTO_AH:
  239. ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2));
  240. spi = ah->spi;
  241. break;
  242. case IPPROTO_COMP:
  243. ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2));
  244. spi = htonl(ntohs(ipch->cpi));
  245. break;
  246. default:
  247. return 0;
  248. }
  249. switch (icmp_hdr(skb)->type) {
  250. case ICMP_DEST_UNREACH:
  251. if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
  252. return 0;
  253. case ICMP_REDIRECT:
  254. break;
  255. default:
  256. return 0;
  257. }
  258. x = xfrm_state_lookup(net, mark, (const xfrm_address_t *)&iph->daddr,
  259. spi, protocol, AF_INET);
  260. if (!x)
  261. return 0;
  262. if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
  263. ipv4_update_pmtu(skb, net, info, 0, 0, protocol, 0);
  264. else
  265. ipv4_redirect(skb, net, 0, 0, protocol, 0);
  266. xfrm_state_put(x);
  267. return 0;
  268. }
  269. static int
  270. vti_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  271. {
  272. int err = 0;
  273. struct ip_tunnel_parm p;
  274. if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
  275. return -EFAULT;
  276. if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
  277. if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPIP ||
  278. p.iph.ihl != 5)
  279. return -EINVAL;
  280. }
  281. if (!(p.i_flags & GRE_KEY))
  282. p.i_key = 0;
  283. if (!(p.o_flags & GRE_KEY))
  284. p.o_key = 0;
  285. p.i_flags = VTI_ISVTI;
  286. err = ip_tunnel_ioctl(dev, &p, cmd);
  287. if (err)
  288. return err;
  289. if (cmd != SIOCDELTUNNEL) {
  290. p.i_flags |= GRE_KEY;
  291. p.o_flags |= GRE_KEY;
  292. }
  293. if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
  294. return -EFAULT;
  295. return 0;
  296. }
  297. static const struct net_device_ops vti_netdev_ops = {
  298. .ndo_init = vti_tunnel_init,
  299. .ndo_uninit = ip_tunnel_uninit,
  300. .ndo_start_xmit = vti_tunnel_xmit,
  301. .ndo_do_ioctl = vti_tunnel_ioctl,
  302. .ndo_change_mtu = ip_tunnel_change_mtu,
  303. .ndo_get_stats64 = ip_tunnel_get_stats64,
  304. .ndo_get_iflink = ip_tunnel_get_iflink,
  305. };
  306. static void vti_tunnel_setup(struct net_device *dev)
  307. {
  308. dev->netdev_ops = &vti_netdev_ops;
  309. dev->type = ARPHRD_TUNNEL;
  310. ip_tunnel_setup(dev, vti_net_id);
  311. }
  312. static int vti_tunnel_init(struct net_device *dev)
  313. {
  314. struct ip_tunnel *tunnel = netdev_priv(dev);
  315. struct iphdr *iph = &tunnel->parms.iph;
  316. memcpy(dev->dev_addr, &iph->saddr, 4);
  317. memcpy(dev->broadcast, &iph->daddr, 4);
  318. dev->flags = IFF_NOARP;
  319. dev->addr_len = 4;
  320. dev->features |= NETIF_F_LLTX;
  321. netif_keep_dst(dev);
  322. return ip_tunnel_init(dev);
  323. }
  324. static void __net_init vti_fb_tunnel_init(struct net_device *dev)
  325. {
  326. struct ip_tunnel *tunnel = netdev_priv(dev);
  327. struct iphdr *iph = &tunnel->parms.iph;
  328. iph->version = 4;
  329. iph->protocol = IPPROTO_IPIP;
  330. iph->ihl = 5;
  331. }
  332. static struct xfrm4_protocol vti_esp4_protocol __read_mostly = {
  333. .handler = vti_rcv,
  334. .input_handler = vti_input,
  335. .cb_handler = vti_rcv_cb,
  336. .err_handler = vti4_err,
  337. .priority = 100,
  338. };
  339. static struct xfrm4_protocol vti_ah4_protocol __read_mostly = {
  340. .handler = vti_rcv,
  341. .input_handler = vti_input,
  342. .cb_handler = vti_rcv_cb,
  343. .err_handler = vti4_err,
  344. .priority = 100,
  345. };
  346. static struct xfrm4_protocol vti_ipcomp4_protocol __read_mostly = {
  347. .handler = vti_rcv,
  348. .input_handler = vti_input,
  349. .cb_handler = vti_rcv_cb,
  350. .err_handler = vti4_err,
  351. .priority = 100,
  352. };
  353. static int __net_init vti_init_net(struct net *net)
  354. {
  355. int err;
  356. struct ip_tunnel_net *itn;
  357. err = ip_tunnel_init_net(net, vti_net_id, &vti_link_ops, "ip_vti0");
  358. if (err)
  359. return err;
  360. itn = net_generic(net, vti_net_id);
  361. if (itn->fb_tunnel_dev)
  362. vti_fb_tunnel_init(itn->fb_tunnel_dev);
  363. return 0;
  364. }
  365. static void __net_exit vti_exit_batch_net(struct list_head *list_net)
  366. {
  367. ip_tunnel_delete_nets(list_net, vti_net_id, &vti_link_ops);
  368. }
  369. static struct pernet_operations vti_net_ops = {
  370. .init = vti_init_net,
  371. .exit_batch = vti_exit_batch_net,
  372. .id = &vti_net_id,
  373. .size = sizeof(struct ip_tunnel_net),
  374. };
  375. static int vti_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
  376. struct netlink_ext_ack *extack)
  377. {
  378. return 0;
  379. }
  380. static void vti_netlink_parms(struct nlattr *data[],
  381. struct ip_tunnel_parm *parms,
  382. __u32 *fwmark)
  383. {
  384. memset(parms, 0, sizeof(*parms));
  385. parms->iph.protocol = IPPROTO_IPIP;
  386. if (!data)
  387. return;
  388. parms->i_flags = VTI_ISVTI;
  389. if (data[IFLA_VTI_LINK])
  390. parms->link = nla_get_u32(data[IFLA_VTI_LINK]);
  391. if (data[IFLA_VTI_IKEY])
  392. parms->i_key = nla_get_be32(data[IFLA_VTI_IKEY]);
  393. if (data[IFLA_VTI_OKEY])
  394. parms->o_key = nla_get_be32(data[IFLA_VTI_OKEY]);
  395. if (data[IFLA_VTI_LOCAL])
  396. parms->iph.saddr = nla_get_in_addr(data[IFLA_VTI_LOCAL]);
  397. if (data[IFLA_VTI_REMOTE])
  398. parms->iph.daddr = nla_get_in_addr(data[IFLA_VTI_REMOTE]);
  399. if (data[IFLA_VTI_FWMARK])
  400. *fwmark = nla_get_u32(data[IFLA_VTI_FWMARK]);
  401. }
  402. static int vti_newlink(struct net *src_net, struct net_device *dev,
  403. struct nlattr *tb[], struct nlattr *data[],
  404. struct netlink_ext_ack *extack)
  405. {
  406. struct ip_tunnel_parm parms;
  407. __u32 fwmark = 0;
  408. vti_netlink_parms(data, &parms, &fwmark);
  409. return ip_tunnel_newlink(dev, tb, &parms, fwmark);
  410. }
  411. static int vti_changelink(struct net_device *dev, struct nlattr *tb[],
  412. struct nlattr *data[],
  413. struct netlink_ext_ack *extack)
  414. {
  415. struct ip_tunnel *t = netdev_priv(dev);
  416. __u32 fwmark = t->fwmark;
  417. struct ip_tunnel_parm p;
  418. vti_netlink_parms(data, &p, &fwmark);
  419. return ip_tunnel_changelink(dev, tb, &p, fwmark);
  420. }
  421. static size_t vti_get_size(const struct net_device *dev)
  422. {
  423. return
  424. /* IFLA_VTI_LINK */
  425. nla_total_size(4) +
  426. /* IFLA_VTI_IKEY */
  427. nla_total_size(4) +
  428. /* IFLA_VTI_OKEY */
  429. nla_total_size(4) +
  430. /* IFLA_VTI_LOCAL */
  431. nla_total_size(4) +
  432. /* IFLA_VTI_REMOTE */
  433. nla_total_size(4) +
  434. /* IFLA_VTI_FWMARK */
  435. nla_total_size(4) +
  436. 0;
  437. }
  438. static int vti_fill_info(struct sk_buff *skb, const struct net_device *dev)
  439. {
  440. struct ip_tunnel *t = netdev_priv(dev);
  441. struct ip_tunnel_parm *p = &t->parms;
  442. if (nla_put_u32(skb, IFLA_VTI_LINK, p->link) ||
  443. nla_put_be32(skb, IFLA_VTI_IKEY, p->i_key) ||
  444. nla_put_be32(skb, IFLA_VTI_OKEY, p->o_key) ||
  445. nla_put_in_addr(skb, IFLA_VTI_LOCAL, p->iph.saddr) ||
  446. nla_put_in_addr(skb, IFLA_VTI_REMOTE, p->iph.daddr) ||
  447. nla_put_u32(skb, IFLA_VTI_FWMARK, t->fwmark))
  448. return -EMSGSIZE;
  449. return 0;
  450. }
  451. static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
  452. [IFLA_VTI_LINK] = { .type = NLA_U32 },
  453. [IFLA_VTI_IKEY] = { .type = NLA_U32 },
  454. [IFLA_VTI_OKEY] = { .type = NLA_U32 },
  455. [IFLA_VTI_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
  456. [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
  457. [IFLA_VTI_FWMARK] = { .type = NLA_U32 },
  458. };
  459. static struct rtnl_link_ops vti_link_ops __read_mostly = {
  460. .kind = "vti",
  461. .maxtype = IFLA_VTI_MAX,
  462. .policy = vti_policy,
  463. .priv_size = sizeof(struct ip_tunnel),
  464. .setup = vti_tunnel_setup,
  465. .validate = vti_tunnel_validate,
  466. .newlink = vti_newlink,
  467. .changelink = vti_changelink,
  468. .dellink = ip_tunnel_dellink,
  469. .get_size = vti_get_size,
  470. .fill_info = vti_fill_info,
  471. .get_link_net = ip_tunnel_get_link_net,
  472. };
  473. static int __init vti_init(void)
  474. {
  475. const char *msg;
  476. int err;
  477. pr_info("IPv4 over IPsec tunneling driver\n");
  478. msg = "tunnel device";
  479. err = register_pernet_device(&vti_net_ops);
  480. if (err < 0)
  481. goto pernet_dev_failed;
  482. msg = "tunnel protocols";
  483. err = xfrm4_protocol_register(&vti_esp4_protocol, IPPROTO_ESP);
  484. if (err < 0)
  485. goto xfrm_proto_esp_failed;
  486. err = xfrm4_protocol_register(&vti_ah4_protocol, IPPROTO_AH);
  487. if (err < 0)
  488. goto xfrm_proto_ah_failed;
  489. err = xfrm4_protocol_register(&vti_ipcomp4_protocol, IPPROTO_COMP);
  490. if (err < 0)
  491. goto xfrm_proto_comp_failed;
  492. msg = "netlink interface";
  493. err = rtnl_link_register(&vti_link_ops);
  494. if (err < 0)
  495. goto rtnl_link_failed;
  496. return err;
  497. rtnl_link_failed:
  498. xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
  499. xfrm_proto_comp_failed:
  500. xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
  501. xfrm_proto_ah_failed:
  502. xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
  503. xfrm_proto_esp_failed:
  504. unregister_pernet_device(&vti_net_ops);
  505. pernet_dev_failed:
  506. pr_err("vti init: failed to register %s\n", msg);
  507. return err;
  508. }
  509. static void __exit vti_fini(void)
  510. {
  511. rtnl_link_unregister(&vti_link_ops);
  512. xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
  513. xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
  514. xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
  515. unregister_pernet_device(&vti_net_ops);
  516. }
  517. module_init(vti_init);
  518. module_exit(vti_fini);
  519. MODULE_LICENSE("GPL");
  520. MODULE_ALIAS_RTNL_LINK("vti");
  521. MODULE_ALIAS_NETDEV("ip_vti0");