xfrm_interface.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * XFRM virtual interface
  4. *
  5. * Copyright (C) 2018 secunet Security Networks AG
  6. *
  7. * Author:
  8. * Steffen Klassert <steffen.klassert@secunet.com>
  9. */
  10. #include <linux/module.h>
  11. #include <linux/capability.h>
  12. #include <linux/errno.h>
  13. #include <linux/types.h>
  14. #include <linux/sockios.h>
  15. #include <linux/icmp.h>
  16. #include <linux/if.h>
  17. #include <linux/in.h>
  18. #include <linux/ip.h>
  19. #include <linux/net.h>
  20. #include <linux/in6.h>
  21. #include <linux/netdevice.h>
  22. #include <linux/if_link.h>
  23. #include <linux/if_arp.h>
  24. #include <linux/icmpv6.h>
  25. #include <linux/init.h>
  26. #include <linux/route.h>
  27. #include <linux/rtnetlink.h>
  28. #include <linux/netfilter_ipv6.h>
  29. #include <linux/slab.h>
  30. #include <linux/hash.h>
  31. #include <linux/uaccess.h>
  32. #include <linux/atomic.h>
  33. #include <net/icmp.h>
  34. #include <net/ip.h>
  35. #include <net/ipv6.h>
  36. #include <net/ip6_route.h>
  37. #include <net/addrconf.h>
  38. #include <net/xfrm.h>
  39. #include <net/net_namespace.h>
  40. #include <net/netns/generic.h>
  41. #include <linux/etherdevice.h>
  42. static int xfrmi_dev_init(struct net_device *dev);
  43. static void xfrmi_dev_setup(struct net_device *dev);
  44. static struct rtnl_link_ops xfrmi_link_ops __read_mostly;
  45. static unsigned int xfrmi_net_id __read_mostly;
  46. struct xfrmi_net {
  47. /* lists for storing interfaces in use */
  48. struct xfrm_if __rcu *xfrmi[1];
  49. };
  50. #define for_each_xfrmi_rcu(start, xi) \
  51. for (xi = rcu_dereference(start); xi; xi = rcu_dereference(xi->next))
  52. static struct xfrm_if *xfrmi_lookup(struct net *net, struct xfrm_state *x)
  53. {
  54. struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
  55. struct xfrm_if *xi;
  56. for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) {
  57. if (x->if_id == xi->p.if_id &&
  58. (xi->dev->flags & IFF_UP))
  59. return xi;
  60. }
  61. return NULL;
  62. }
  63. static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb)
  64. {
  65. struct xfrmi_net *xfrmn;
  66. int ifindex;
  67. struct xfrm_if *xi;
  68. if (!skb->dev)
  69. return NULL;
  70. xfrmn = net_generic(dev_net(skb->dev), xfrmi_net_id);
  71. ifindex = skb->dev->ifindex;
  72. for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) {
  73. if (ifindex == xi->dev->ifindex &&
  74. (xi->dev->flags & IFF_UP))
  75. return xi;
  76. }
  77. return NULL;
  78. }
  79. static void xfrmi_link(struct xfrmi_net *xfrmn, struct xfrm_if *xi)
  80. {
  81. struct xfrm_if __rcu **xip = &xfrmn->xfrmi[0];
  82. rcu_assign_pointer(xi->next , rtnl_dereference(*xip));
  83. rcu_assign_pointer(*xip, xi);
  84. }
  85. static void xfrmi_unlink(struct xfrmi_net *xfrmn, struct xfrm_if *xi)
  86. {
  87. struct xfrm_if __rcu **xip;
  88. struct xfrm_if *iter;
  89. for (xip = &xfrmn->xfrmi[0];
  90. (iter = rtnl_dereference(*xip)) != NULL;
  91. xip = &iter->next) {
  92. if (xi == iter) {
  93. rcu_assign_pointer(*xip, xi->next);
  94. break;
  95. }
  96. }
  97. }
  98. static void xfrmi_dev_free(struct net_device *dev)
  99. {
  100. free_percpu(dev->tstats);
  101. }
  102. static int xfrmi_create2(struct net_device *dev)
  103. {
  104. struct xfrm_if *xi = netdev_priv(dev);
  105. struct net *net = dev_net(dev);
  106. struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
  107. int err;
  108. dev->rtnl_link_ops = &xfrmi_link_ops;
  109. err = register_netdevice(dev);
  110. if (err < 0)
  111. goto out;
  112. strcpy(xi->p.name, dev->name);
  113. dev_hold(dev);
  114. xfrmi_link(xfrmn, xi);
  115. return 0;
  116. out:
  117. return err;
  118. }
  119. static struct xfrm_if *xfrmi_create(struct net *net, struct xfrm_if_parms *p)
  120. {
  121. struct net_device *dev;
  122. struct xfrm_if *xi;
  123. char name[IFNAMSIZ];
  124. int err;
  125. if (p->name[0]) {
  126. strlcpy(name, p->name, IFNAMSIZ);
  127. } else {
  128. err = -EINVAL;
  129. goto failed;
  130. }
  131. dev = alloc_netdev(sizeof(*xi), name, NET_NAME_UNKNOWN, xfrmi_dev_setup);
  132. if (!dev) {
  133. err = -EAGAIN;
  134. goto failed;
  135. }
  136. dev_net_set(dev, net);
  137. xi = netdev_priv(dev);
  138. xi->p = *p;
  139. xi->net = net;
  140. xi->dev = dev;
  141. xi->phydev = dev_get_by_index(net, p->link);
  142. if (!xi->phydev) {
  143. err = -ENODEV;
  144. goto failed_free;
  145. }
  146. err = xfrmi_create2(dev);
  147. if (err < 0)
  148. goto failed_dev_put;
  149. return xi;
  150. failed_dev_put:
  151. dev_put(xi->phydev);
  152. failed_free:
  153. free_netdev(dev);
  154. failed:
  155. return ERR_PTR(err);
  156. }
  157. static struct xfrm_if *xfrmi_locate(struct net *net, struct xfrm_if_parms *p,
  158. int create)
  159. {
  160. struct xfrm_if __rcu **xip;
  161. struct xfrm_if *xi;
  162. struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
  163. for (xip = &xfrmn->xfrmi[0];
  164. (xi = rtnl_dereference(*xip)) != NULL;
  165. xip = &xi->next) {
  166. if (xi->p.if_id == p->if_id) {
  167. if (create)
  168. return ERR_PTR(-EEXIST);
  169. return xi;
  170. }
  171. }
  172. if (!create)
  173. return ERR_PTR(-ENODEV);
  174. return xfrmi_create(net, p);
  175. }
  176. static void xfrmi_dev_uninit(struct net_device *dev)
  177. {
  178. struct xfrm_if *xi = netdev_priv(dev);
  179. struct xfrmi_net *xfrmn = net_generic(xi->net, xfrmi_net_id);
  180. xfrmi_unlink(xfrmn, xi);
  181. dev_put(xi->phydev);
  182. dev_put(dev);
  183. }
  184. static void xfrmi_scrub_packet(struct sk_buff *skb, bool xnet)
  185. {
  186. skb->tstamp = 0;
  187. skb->pkt_type = PACKET_HOST;
  188. skb->skb_iif = 0;
  189. skb->ignore_df = 0;
  190. skb_dst_drop(skb);
  191. nf_reset(skb);
  192. nf_reset_trace(skb);
  193. if (!xnet)
  194. return;
  195. ipvs_reset(skb);
  196. secpath_reset(skb);
  197. skb_orphan(skb);
  198. skb->mark = 0;
  199. }
  200. static int xfrmi_rcv_cb(struct sk_buff *skb, int err)
  201. {
  202. struct pcpu_sw_netstats *tstats;
  203. struct xfrm_mode *inner_mode;
  204. struct net_device *dev;
  205. struct xfrm_state *x;
  206. struct xfrm_if *xi;
  207. bool xnet;
  208. if (err && !skb->sp)
  209. return 0;
  210. x = xfrm_input_state(skb);
  211. xi = xfrmi_lookup(xs_net(x), x);
  212. if (!xi)
  213. return 1;
  214. dev = xi->dev;
  215. skb->dev = dev;
  216. if (err) {
  217. dev->stats.rx_errors++;
  218. dev->stats.rx_dropped++;
  219. return 0;
  220. }
  221. xnet = !net_eq(xi->net, dev_net(skb->dev));
  222. if (xnet) {
  223. inner_mode = x->inner_mode;
  224. if (x->sel.family == AF_UNSPEC) {
  225. inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
  226. if (inner_mode == NULL) {
  227. XFRM_INC_STATS(dev_net(skb->dev),
  228. LINUX_MIB_XFRMINSTATEMODEERROR);
  229. return -EINVAL;
  230. }
  231. }
  232. if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb,
  233. inner_mode->afinfo->family))
  234. return -EPERM;
  235. }
  236. xfrmi_scrub_packet(skb, xnet);
  237. tstats = this_cpu_ptr(dev->tstats);
  238. u64_stats_update_begin(&tstats->syncp);
  239. tstats->rx_packets++;
  240. tstats->rx_bytes += skb->len;
  241. u64_stats_update_end(&tstats->syncp);
  242. return 0;
  243. }
  244. static int
  245. xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
  246. {
  247. struct xfrm_if *xi = netdev_priv(dev);
  248. struct net_device_stats *stats = &xi->dev->stats;
  249. struct dst_entry *dst = skb_dst(skb);
  250. unsigned int length = skb->len;
  251. struct net_device *tdev;
  252. struct xfrm_state *x;
  253. int err = -1;
  254. int mtu;
  255. if (!dst)
  256. goto tx_err_link_failure;
  257. dst_hold(dst);
  258. dst = xfrm_lookup_with_ifid(xi->net, dst, fl, NULL, 0, xi->p.if_id);
  259. if (IS_ERR(dst)) {
  260. err = PTR_ERR(dst);
  261. dst = NULL;
  262. goto tx_err_link_failure;
  263. }
  264. x = dst->xfrm;
  265. if (!x)
  266. goto tx_err_link_failure;
  267. if (x->if_id != xi->p.if_id)
  268. goto tx_err_link_failure;
  269. tdev = dst->dev;
  270. if (tdev == dev) {
  271. stats->collisions++;
  272. net_warn_ratelimited("%s: Local routing loop detected!\n",
  273. xi->p.name);
  274. goto tx_err_dst_release;
  275. }
  276. mtu = dst_mtu(dst);
  277. if (!skb->ignore_df && skb->len > mtu) {
  278. skb_dst_update_pmtu(skb, mtu);
  279. if (skb->protocol == htons(ETH_P_IPV6)) {
  280. if (mtu < IPV6_MIN_MTU)
  281. mtu = IPV6_MIN_MTU;
  282. icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
  283. } else {
  284. icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
  285. htonl(mtu));
  286. }
  287. dst_release(dst);
  288. return -EMSGSIZE;
  289. }
  290. xfrmi_scrub_packet(skb, !net_eq(xi->net, dev_net(dev)));
  291. skb_dst_set(skb, dst);
  292. skb->dev = tdev;
  293. err = dst_output(xi->net, skb->sk, skb);
  294. if (net_xmit_eval(err) == 0) {
  295. struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
  296. u64_stats_update_begin(&tstats->syncp);
  297. tstats->tx_bytes += length;
  298. tstats->tx_packets++;
  299. u64_stats_update_end(&tstats->syncp);
  300. } else {
  301. stats->tx_errors++;
  302. stats->tx_aborted_errors++;
  303. }
  304. return 0;
  305. tx_err_link_failure:
  306. stats->tx_carrier_errors++;
  307. dst_link_failure(skb);
  308. tx_err_dst_release:
  309. dst_release(dst);
  310. return err;
  311. }
  312. static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
  313. {
  314. struct xfrm_if *xi = netdev_priv(dev);
  315. struct net_device_stats *stats = &xi->dev->stats;
  316. struct flowi fl;
  317. int ret;
  318. memset(&fl, 0, sizeof(fl));
  319. switch (skb->protocol) {
  320. case htons(ETH_P_IPV6):
  321. xfrm_decode_session(skb, &fl, AF_INET6);
  322. memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
  323. break;
  324. case htons(ETH_P_IP):
  325. xfrm_decode_session(skb, &fl, AF_INET);
  326. memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
  327. break;
  328. default:
  329. goto tx_err;
  330. }
  331. fl.flowi_oif = xi->phydev->ifindex;
  332. ret = xfrmi_xmit2(skb, dev, &fl);
  333. if (ret < 0)
  334. goto tx_err;
  335. return NETDEV_TX_OK;
  336. tx_err:
  337. stats->tx_errors++;
  338. stats->tx_dropped++;
  339. kfree_skb(skb);
  340. return NETDEV_TX_OK;
  341. }
  342. static int xfrmi4_err(struct sk_buff *skb, u32 info)
  343. {
  344. const struct iphdr *iph = (const struct iphdr *)skb->data;
  345. struct net *net = dev_net(skb->dev);
  346. int protocol = iph->protocol;
  347. struct ip_comp_hdr *ipch;
  348. struct ip_esp_hdr *esph;
  349. struct ip_auth_hdr *ah ;
  350. struct xfrm_state *x;
  351. struct xfrm_if *xi;
  352. __be32 spi;
  353. switch (protocol) {
  354. case IPPROTO_ESP:
  355. esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
  356. spi = esph->spi;
  357. break;
  358. case IPPROTO_AH:
  359. ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2));
  360. spi = ah->spi;
  361. break;
  362. case IPPROTO_COMP:
  363. ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2));
  364. spi = htonl(ntohs(ipch->cpi));
  365. break;
  366. default:
  367. return 0;
  368. }
  369. switch (icmp_hdr(skb)->type) {
  370. case ICMP_DEST_UNREACH:
  371. if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
  372. return 0;
  373. case ICMP_REDIRECT:
  374. break;
  375. default:
  376. return 0;
  377. }
  378. x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
  379. spi, protocol, AF_INET);
  380. if (!x)
  381. return 0;
  382. xi = xfrmi_lookup(net, x);
  383. if (!xi) {
  384. xfrm_state_put(x);
  385. return -1;
  386. }
  387. if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
  388. ipv4_update_pmtu(skb, net, info, 0, 0, protocol, 0);
  389. else
  390. ipv4_redirect(skb, net, 0, 0, protocol, 0);
  391. xfrm_state_put(x);
  392. return 0;
  393. }
  394. static int xfrmi6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
  395. u8 type, u8 code, int offset, __be32 info)
  396. {
  397. const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
  398. struct net *net = dev_net(skb->dev);
  399. int protocol = iph->nexthdr;
  400. struct ip_comp_hdr *ipch;
  401. struct ip_esp_hdr *esph;
  402. struct ip_auth_hdr *ah;
  403. struct xfrm_state *x;
  404. struct xfrm_if *xi;
  405. __be32 spi;
  406. switch (protocol) {
  407. case IPPROTO_ESP:
  408. esph = (struct ip_esp_hdr *)(skb->data + offset);
  409. spi = esph->spi;
  410. break;
  411. case IPPROTO_AH:
  412. ah = (struct ip_auth_hdr *)(skb->data + offset);
  413. spi = ah->spi;
  414. break;
  415. case IPPROTO_COMP:
  416. ipch = (struct ip_comp_hdr *)(skb->data + offset);
  417. spi = htonl(ntohs(ipch->cpi));
  418. break;
  419. default:
  420. return 0;
  421. }
  422. if (type != ICMPV6_PKT_TOOBIG &&
  423. type != NDISC_REDIRECT)
  424. return 0;
  425. x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
  426. spi, protocol, AF_INET6);
  427. if (!x)
  428. return 0;
  429. xi = xfrmi_lookup(net, x);
  430. if (!xi) {
  431. xfrm_state_put(x);
  432. return -1;
  433. }
  434. if (type == NDISC_REDIRECT)
  435. ip6_redirect(skb, net, skb->dev->ifindex, 0,
  436. sock_net_uid(net, NULL));
  437. else
  438. ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
  439. xfrm_state_put(x);
  440. return 0;
  441. }
  442. static int xfrmi_change(struct xfrm_if *xi, const struct xfrm_if_parms *p)
  443. {
  444. if (xi->p.link != p->link)
  445. return -EINVAL;
  446. xi->p.if_id = p->if_id;
  447. return 0;
  448. }
  449. static int xfrmi_update(struct xfrm_if *xi, struct xfrm_if_parms *p)
  450. {
  451. struct net *net = dev_net(xi->dev);
  452. struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
  453. int err;
  454. xfrmi_unlink(xfrmn, xi);
  455. synchronize_net();
  456. err = xfrmi_change(xi, p);
  457. xfrmi_link(xfrmn, xi);
  458. netdev_state_change(xi->dev);
  459. return err;
  460. }
  461. static void xfrmi_get_stats64(struct net_device *dev,
  462. struct rtnl_link_stats64 *s)
  463. {
  464. int cpu;
  465. if (!dev->tstats)
  466. return;
  467. for_each_possible_cpu(cpu) {
  468. struct pcpu_sw_netstats *stats;
  469. struct pcpu_sw_netstats tmp;
  470. int start;
  471. stats = per_cpu_ptr(dev->tstats, cpu);
  472. do {
  473. start = u64_stats_fetch_begin_irq(&stats->syncp);
  474. tmp.rx_packets = stats->rx_packets;
  475. tmp.rx_bytes = stats->rx_bytes;
  476. tmp.tx_packets = stats->tx_packets;
  477. tmp.tx_bytes = stats->tx_bytes;
  478. } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
  479. s->rx_packets += tmp.rx_packets;
  480. s->rx_bytes += tmp.rx_bytes;
  481. s->tx_packets += tmp.tx_packets;
  482. s->tx_bytes += tmp.tx_bytes;
  483. }
  484. s->rx_dropped = dev->stats.rx_dropped;
  485. s->tx_dropped = dev->stats.tx_dropped;
  486. }
  487. static int xfrmi_get_iflink(const struct net_device *dev)
  488. {
  489. struct xfrm_if *xi = netdev_priv(dev);
  490. return xi->phydev->ifindex;
  491. }
  492. static const struct net_device_ops xfrmi_netdev_ops = {
  493. .ndo_init = xfrmi_dev_init,
  494. .ndo_uninit = xfrmi_dev_uninit,
  495. .ndo_start_xmit = xfrmi_xmit,
  496. .ndo_get_stats64 = xfrmi_get_stats64,
  497. .ndo_get_iflink = xfrmi_get_iflink,
  498. };
  499. static void xfrmi_dev_setup(struct net_device *dev)
  500. {
  501. dev->netdev_ops = &xfrmi_netdev_ops;
  502. dev->type = ARPHRD_NONE;
  503. dev->hard_header_len = ETH_HLEN;
  504. dev->min_header_len = ETH_HLEN;
  505. dev->mtu = ETH_DATA_LEN;
  506. dev->min_mtu = ETH_MIN_MTU;
  507. dev->max_mtu = ETH_DATA_LEN;
  508. dev->addr_len = ETH_ALEN;
  509. dev->flags = IFF_NOARP;
  510. dev->needs_free_netdev = true;
  511. dev->priv_destructor = xfrmi_dev_free;
  512. netif_keep_dst(dev);
  513. }
  514. static int xfrmi_dev_init(struct net_device *dev)
  515. {
  516. struct xfrm_if *xi = netdev_priv(dev);
  517. struct net_device *phydev = xi->phydev;
  518. int err;
  519. dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
  520. if (!dev->tstats)
  521. return -ENOMEM;
  522. err = gro_cells_init(&xi->gro_cells, dev);
  523. if (err) {
  524. free_percpu(dev->tstats);
  525. return err;
  526. }
  527. dev->features |= NETIF_F_LLTX;
  528. dev->needed_headroom = phydev->needed_headroom;
  529. dev->needed_tailroom = phydev->needed_tailroom;
  530. if (is_zero_ether_addr(dev->dev_addr))
  531. eth_hw_addr_inherit(dev, phydev);
  532. if (is_zero_ether_addr(dev->broadcast))
  533. memcpy(dev->broadcast, phydev->broadcast, dev->addr_len);
  534. return 0;
  535. }
  536. static int xfrmi_validate(struct nlattr *tb[], struct nlattr *data[],
  537. struct netlink_ext_ack *extack)
  538. {
  539. return 0;
  540. }
  541. static void xfrmi_netlink_parms(struct nlattr *data[],
  542. struct xfrm_if_parms *parms)
  543. {
  544. memset(parms, 0, sizeof(*parms));
  545. if (!data)
  546. return;
  547. if (data[IFLA_XFRM_LINK])
  548. parms->link = nla_get_u32(data[IFLA_XFRM_LINK]);
  549. if (data[IFLA_XFRM_IF_ID])
  550. parms->if_id = nla_get_u32(data[IFLA_XFRM_IF_ID]);
  551. }
  552. static int xfrmi_newlink(struct net *src_net, struct net_device *dev,
  553. struct nlattr *tb[], struct nlattr *data[],
  554. struct netlink_ext_ack *extack)
  555. {
  556. struct net *net = dev_net(dev);
  557. struct xfrm_if_parms *p;
  558. struct xfrm_if *xi;
  559. xi = netdev_priv(dev);
  560. p = &xi->p;
  561. xfrmi_netlink_parms(data, p);
  562. if (!tb[IFLA_IFNAME])
  563. return -EINVAL;
  564. nla_strlcpy(p->name, tb[IFLA_IFNAME], IFNAMSIZ);
  565. xi = xfrmi_locate(net, p, 1);
  566. return PTR_ERR_OR_ZERO(xi);
  567. }
  568. static void xfrmi_dellink(struct net_device *dev, struct list_head *head)
  569. {
  570. unregister_netdevice_queue(dev, head);
  571. }
  572. static int xfrmi_changelink(struct net_device *dev, struct nlattr *tb[],
  573. struct nlattr *data[],
  574. struct netlink_ext_ack *extack)
  575. {
  576. struct xfrm_if *xi = netdev_priv(dev);
  577. struct net *net = dev_net(dev);
  578. xfrmi_netlink_parms(data, &xi->p);
  579. xi = xfrmi_locate(net, &xi->p, 0);
  580. if (IS_ERR_OR_NULL(xi)) {
  581. xi = netdev_priv(dev);
  582. } else {
  583. if (xi->dev != dev)
  584. return -EEXIST;
  585. }
  586. return xfrmi_update(xi, &xi->p);
  587. }
  588. static size_t xfrmi_get_size(const struct net_device *dev)
  589. {
  590. return
  591. /* IFLA_XFRM_LINK */
  592. nla_total_size(4) +
  593. /* IFLA_XFRM_IF_ID */
  594. nla_total_size(4) +
  595. 0;
  596. }
  597. static int xfrmi_fill_info(struct sk_buff *skb, const struct net_device *dev)
  598. {
  599. struct xfrm_if *xi = netdev_priv(dev);
  600. struct xfrm_if_parms *parm = &xi->p;
  601. if (nla_put_u32(skb, IFLA_XFRM_LINK, parm->link) ||
  602. nla_put_u32(skb, IFLA_XFRM_IF_ID, parm->if_id))
  603. goto nla_put_failure;
  604. return 0;
  605. nla_put_failure:
  606. return -EMSGSIZE;
  607. }
  608. struct net *xfrmi_get_link_net(const struct net_device *dev)
  609. {
  610. struct xfrm_if *xi = netdev_priv(dev);
  611. return dev_net(xi->phydev);
  612. }
  613. static const struct nla_policy xfrmi_policy[IFLA_XFRM_MAX + 1] = {
  614. [IFLA_XFRM_LINK] = { .type = NLA_U32 },
  615. [IFLA_XFRM_IF_ID] = { .type = NLA_U32 },
  616. };
  617. static struct rtnl_link_ops xfrmi_link_ops __read_mostly = {
  618. .kind = "xfrm",
  619. .maxtype = IFLA_XFRM_MAX,
  620. .policy = xfrmi_policy,
  621. .priv_size = sizeof(struct xfrm_if),
  622. .setup = xfrmi_dev_setup,
  623. .validate = xfrmi_validate,
  624. .newlink = xfrmi_newlink,
  625. .dellink = xfrmi_dellink,
  626. .changelink = xfrmi_changelink,
  627. .get_size = xfrmi_get_size,
  628. .fill_info = xfrmi_fill_info,
  629. .get_link_net = xfrmi_get_link_net,
  630. };
  631. static void __net_exit xfrmi_destroy_interfaces(struct xfrmi_net *xfrmn)
  632. {
  633. struct xfrm_if *xi;
  634. LIST_HEAD(list);
  635. xi = rtnl_dereference(xfrmn->xfrmi[0]);
  636. if (!xi)
  637. return;
  638. unregister_netdevice_queue(xi->dev, &list);
  639. unregister_netdevice_many(&list);
  640. }
  641. static int __net_init xfrmi_init_net(struct net *net)
  642. {
  643. return 0;
  644. }
  645. static void __net_exit xfrmi_exit_net(struct net *net)
  646. {
  647. struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
  648. rtnl_lock();
  649. xfrmi_destroy_interfaces(xfrmn);
  650. rtnl_unlock();
  651. }
  652. static struct pernet_operations xfrmi_net_ops = {
  653. .init = xfrmi_init_net,
  654. .exit = xfrmi_exit_net,
  655. .id = &xfrmi_net_id,
  656. .size = sizeof(struct xfrmi_net),
  657. };
  658. static struct xfrm6_protocol xfrmi_esp6_protocol __read_mostly = {
  659. .handler = xfrm6_rcv,
  660. .cb_handler = xfrmi_rcv_cb,
  661. .err_handler = xfrmi6_err,
  662. .priority = 10,
  663. };
  664. static struct xfrm6_protocol xfrmi_ah6_protocol __read_mostly = {
  665. .handler = xfrm6_rcv,
  666. .cb_handler = xfrmi_rcv_cb,
  667. .err_handler = xfrmi6_err,
  668. .priority = 10,
  669. };
  670. static struct xfrm6_protocol xfrmi_ipcomp6_protocol __read_mostly = {
  671. .handler = xfrm6_rcv,
  672. .cb_handler = xfrmi_rcv_cb,
  673. .err_handler = xfrmi6_err,
  674. .priority = 10,
  675. };
  676. static struct xfrm4_protocol xfrmi_esp4_protocol __read_mostly = {
  677. .handler = xfrm4_rcv,
  678. .input_handler = xfrm_input,
  679. .cb_handler = xfrmi_rcv_cb,
  680. .err_handler = xfrmi4_err,
  681. .priority = 10,
  682. };
  683. static struct xfrm4_protocol xfrmi_ah4_protocol __read_mostly = {
  684. .handler = xfrm4_rcv,
  685. .input_handler = xfrm_input,
  686. .cb_handler = xfrmi_rcv_cb,
  687. .err_handler = xfrmi4_err,
  688. .priority = 10,
  689. };
  690. static struct xfrm4_protocol xfrmi_ipcomp4_protocol __read_mostly = {
  691. .handler = xfrm4_rcv,
  692. .input_handler = xfrm_input,
  693. .cb_handler = xfrmi_rcv_cb,
  694. .err_handler = xfrmi4_err,
  695. .priority = 10,
  696. };
  697. static int __init xfrmi4_init(void)
  698. {
  699. int err;
  700. err = xfrm4_protocol_register(&xfrmi_esp4_protocol, IPPROTO_ESP);
  701. if (err < 0)
  702. goto xfrm_proto_esp_failed;
  703. err = xfrm4_protocol_register(&xfrmi_ah4_protocol, IPPROTO_AH);
  704. if (err < 0)
  705. goto xfrm_proto_ah_failed;
  706. err = xfrm4_protocol_register(&xfrmi_ipcomp4_protocol, IPPROTO_COMP);
  707. if (err < 0)
  708. goto xfrm_proto_comp_failed;
  709. return 0;
  710. xfrm_proto_comp_failed:
  711. xfrm4_protocol_deregister(&xfrmi_ah4_protocol, IPPROTO_AH);
  712. xfrm_proto_ah_failed:
  713. xfrm4_protocol_deregister(&xfrmi_esp4_protocol, IPPROTO_ESP);
  714. xfrm_proto_esp_failed:
  715. return err;
  716. }
  717. static void xfrmi4_fini(void)
  718. {
  719. xfrm4_protocol_deregister(&xfrmi_ipcomp4_protocol, IPPROTO_COMP);
  720. xfrm4_protocol_deregister(&xfrmi_ah4_protocol, IPPROTO_AH);
  721. xfrm4_protocol_deregister(&xfrmi_esp4_protocol, IPPROTO_ESP);
  722. }
  723. static int __init xfrmi6_init(void)
  724. {
  725. int err;
  726. err = xfrm6_protocol_register(&xfrmi_esp6_protocol, IPPROTO_ESP);
  727. if (err < 0)
  728. goto xfrm_proto_esp_failed;
  729. err = xfrm6_protocol_register(&xfrmi_ah6_protocol, IPPROTO_AH);
  730. if (err < 0)
  731. goto xfrm_proto_ah_failed;
  732. err = xfrm6_protocol_register(&xfrmi_ipcomp6_protocol, IPPROTO_COMP);
  733. if (err < 0)
  734. goto xfrm_proto_comp_failed;
  735. return 0;
  736. xfrm_proto_comp_failed:
  737. xfrm6_protocol_deregister(&xfrmi_ah6_protocol, IPPROTO_AH);
  738. xfrm_proto_ah_failed:
  739. xfrm6_protocol_deregister(&xfrmi_esp6_protocol, IPPROTO_ESP);
  740. xfrm_proto_esp_failed:
  741. return err;
  742. }
  743. static void xfrmi6_fini(void)
  744. {
  745. xfrm6_protocol_deregister(&xfrmi_ipcomp6_protocol, IPPROTO_COMP);
  746. xfrm6_protocol_deregister(&xfrmi_ah6_protocol, IPPROTO_AH);
  747. xfrm6_protocol_deregister(&xfrmi_esp6_protocol, IPPROTO_ESP);
  748. }
  749. static const struct xfrm_if_cb xfrm_if_cb = {
  750. .decode_session = xfrmi_decode_session,
  751. };
  752. static int __init xfrmi_init(void)
  753. {
  754. const char *msg;
  755. int err;
  756. pr_info("IPsec XFRM device driver\n");
  757. msg = "tunnel device";
  758. err = register_pernet_device(&xfrmi_net_ops);
  759. if (err < 0)
  760. goto pernet_dev_failed;
  761. msg = "xfrm4 protocols";
  762. err = xfrmi4_init();
  763. if (err < 0)
  764. goto xfrmi4_failed;
  765. msg = "xfrm6 protocols";
  766. err = xfrmi6_init();
  767. if (err < 0)
  768. goto xfrmi6_failed;
  769. msg = "netlink interface";
  770. err = rtnl_link_register(&xfrmi_link_ops);
  771. if (err < 0)
  772. goto rtnl_link_failed;
  773. xfrm_if_register_cb(&xfrm_if_cb);
  774. return err;
  775. rtnl_link_failed:
  776. xfrmi6_fini();
  777. xfrmi6_failed:
  778. xfrmi4_fini();
  779. xfrmi4_failed:
  780. unregister_pernet_device(&xfrmi_net_ops);
  781. pernet_dev_failed:
  782. pr_err("xfrmi init: failed to register %s\n", msg);
  783. return err;
  784. }
  785. static void __exit xfrmi_fini(void)
  786. {
  787. xfrm_if_unregister_cb();
  788. rtnl_link_unregister(&xfrmi_link_ops);
  789. xfrmi4_fini();
  790. xfrmi6_fini();
  791. unregister_pernet_device(&xfrmi_net_ops);
  792. }
  793. module_init(xfrmi_init);
  794. module_exit(xfrmi_fini);
  795. MODULE_LICENSE("GPL");
  796. MODULE_ALIAS_RTNL_LINK("xfrm");
  797. MODULE_ALIAS_NETDEV("xfrm0");
  798. MODULE_AUTHOR("Steffen Klassert");
  799. MODULE_DESCRIPTION("XFRM virtual interface");