ipvlan_core.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609
  1. /* Copyright (c) 2014 Mahesh Bandewar <maheshb@google.com>
  2. *
  3. * This program is free software; you can redistribute it and/or
  4. * modify it under the terms of the GNU General Public License as
  5. * published by the Free Software Foundation; either version 2 of
  6. * the License, or (at your option) any later version.
  7. *
  8. */
  9. #include "ipvlan.h"
  10. static u32 ipvlan_jhash_secret __read_mostly;
  11. void ipvlan_init_secret(void)
  12. {
  13. net_get_random_once(&ipvlan_jhash_secret, sizeof(ipvlan_jhash_secret));
  14. }
  15. static void ipvlan_count_rx(const struct ipvl_dev *ipvlan,
  16. unsigned int len, bool success, bool mcast)
  17. {
  18. if (!ipvlan)
  19. return;
  20. if (likely(success)) {
  21. struct ipvl_pcpu_stats *pcptr;
  22. pcptr = this_cpu_ptr(ipvlan->pcpu_stats);
  23. u64_stats_update_begin(&pcptr->syncp);
  24. pcptr->rx_pkts++;
  25. pcptr->rx_bytes += len;
  26. if (mcast)
  27. pcptr->rx_mcast++;
  28. u64_stats_update_end(&pcptr->syncp);
  29. } else {
  30. this_cpu_inc(ipvlan->pcpu_stats->rx_errs);
  31. }
  32. }
  33. static u8 ipvlan_get_v6_hash(const void *iaddr)
  34. {
  35. const struct in6_addr *ip6_addr = iaddr;
  36. return __ipv6_addr_jhash(ip6_addr, ipvlan_jhash_secret) &
  37. IPVLAN_HASH_MASK;
  38. }
  39. static u8 ipvlan_get_v4_hash(const void *iaddr)
  40. {
  41. const struct in_addr *ip4_addr = iaddr;
  42. return jhash_1word(ip4_addr->s_addr, ipvlan_jhash_secret) &
  43. IPVLAN_HASH_MASK;
  44. }
  45. struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port,
  46. const void *iaddr, bool is_v6)
  47. {
  48. struct ipvl_addr *addr;
  49. u8 hash;
  50. hash = is_v6 ? ipvlan_get_v6_hash(iaddr) :
  51. ipvlan_get_v4_hash(iaddr);
  52. hlist_for_each_entry_rcu(addr, &port->hlhead[hash], hlnode) {
  53. if (is_v6 && addr->atype == IPVL_IPV6 &&
  54. ipv6_addr_equal(&addr->ip6addr, iaddr))
  55. return addr;
  56. else if (!is_v6 && addr->atype == IPVL_IPV4 &&
  57. addr->ip4addr.s_addr ==
  58. ((struct in_addr *)iaddr)->s_addr)
  59. return addr;
  60. }
  61. return NULL;
  62. }
  63. void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr)
  64. {
  65. struct ipvl_port *port = ipvlan->port;
  66. u8 hash;
  67. hash = (addr->atype == IPVL_IPV6) ?
  68. ipvlan_get_v6_hash(&addr->ip6addr) :
  69. ipvlan_get_v4_hash(&addr->ip4addr);
  70. hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]);
  71. }
  72. void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync)
  73. {
  74. hlist_del_rcu(&addr->hlnode);
  75. if (sync)
  76. synchronize_rcu();
  77. }
  78. bool ipvlan_addr_busy(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6)
  79. {
  80. struct ipvl_port *port = ipvlan->port;
  81. struct ipvl_addr *addr;
  82. list_for_each_entry(addr, &ipvlan->addrs, anode) {
  83. if ((is_v6 && addr->atype == IPVL_IPV6 &&
  84. ipv6_addr_equal(&addr->ip6addr, iaddr)) ||
  85. (!is_v6 && addr->atype == IPVL_IPV4 &&
  86. addr->ip4addr.s_addr == ((struct in_addr *)iaddr)->s_addr))
  87. return true;
  88. }
  89. if (ipvlan_ht_addr_lookup(port, iaddr, is_v6))
  90. return true;
  91. return false;
  92. }
  93. static void *ipvlan_get_L3_hdr(struct sk_buff *skb, int *type)
  94. {
  95. void *lyr3h = NULL;
  96. switch (skb->protocol) {
  97. case htons(ETH_P_ARP): {
  98. struct arphdr *arph;
  99. if (unlikely(!pskb_may_pull(skb, sizeof(*arph))))
  100. return NULL;
  101. arph = arp_hdr(skb);
  102. *type = IPVL_ARP;
  103. lyr3h = arph;
  104. break;
  105. }
  106. case htons(ETH_P_IP): {
  107. u32 pktlen;
  108. struct iphdr *ip4h;
  109. if (unlikely(!pskb_may_pull(skb, sizeof(*ip4h))))
  110. return NULL;
  111. ip4h = ip_hdr(skb);
  112. pktlen = ntohs(ip4h->tot_len);
  113. if (ip4h->ihl < 5 || ip4h->version != 4)
  114. return NULL;
  115. if (skb->len < pktlen || pktlen < (ip4h->ihl * 4))
  116. return NULL;
  117. *type = IPVL_IPV4;
  118. lyr3h = ip4h;
  119. break;
  120. }
  121. case htons(ETH_P_IPV6): {
  122. struct ipv6hdr *ip6h;
  123. if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h))))
  124. return NULL;
  125. ip6h = ipv6_hdr(skb);
  126. if (ip6h->version != 6)
  127. return NULL;
  128. *type = IPVL_IPV6;
  129. lyr3h = ip6h;
  130. /* Only Neighbour Solicitation pkts need different treatment */
  131. if (ipv6_addr_any(&ip6h->saddr) &&
  132. ip6h->nexthdr == NEXTHDR_ICMP) {
  133. *type = IPVL_ICMPV6;
  134. lyr3h = ip6h + 1;
  135. }
  136. break;
  137. }
  138. default:
  139. return NULL;
  140. }
  141. return lyr3h;
  142. }
  143. unsigned int ipvlan_mac_hash(const unsigned char *addr)
  144. {
  145. u32 hash = jhash_1word(__get_unaligned_cpu32(addr+2),
  146. ipvlan_jhash_secret);
  147. return hash & IPVLAN_MAC_FILTER_MASK;
  148. }
  149. static void ipvlan_multicast_frame(struct ipvl_port *port, struct sk_buff *skb,
  150. const struct ipvl_dev *in_dev, bool local)
  151. {
  152. struct ethhdr *eth = eth_hdr(skb);
  153. struct ipvl_dev *ipvlan;
  154. struct sk_buff *nskb;
  155. unsigned int len;
  156. unsigned int mac_hash;
  157. int ret;
  158. if (skb->protocol == htons(ETH_P_PAUSE))
  159. return;
  160. list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
  161. if (local && (ipvlan == in_dev))
  162. continue;
  163. mac_hash = ipvlan_mac_hash(eth->h_dest);
  164. if (!test_bit(mac_hash, ipvlan->mac_filters))
  165. continue;
  166. ret = NET_RX_DROP;
  167. len = skb->len + ETH_HLEN;
  168. nskb = skb_clone(skb, GFP_ATOMIC);
  169. if (!nskb)
  170. goto mcast_acct;
  171. if (ether_addr_equal(eth->h_dest, ipvlan->phy_dev->broadcast))
  172. nskb->pkt_type = PACKET_BROADCAST;
  173. else
  174. nskb->pkt_type = PACKET_MULTICAST;
  175. nskb->dev = ipvlan->dev;
  176. if (local)
  177. ret = dev_forward_skb(ipvlan->dev, nskb);
  178. else
  179. ret = netif_rx(nskb);
  180. mcast_acct:
  181. ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true);
  182. }
  183. /* Locally generated? ...Forward a copy to the main-device as
  184. * well. On the RX side we'll ignore it (wont give it to any
  185. * of the virtual devices.
  186. */
  187. if (local) {
  188. nskb = skb_clone(skb, GFP_ATOMIC);
  189. if (nskb) {
  190. if (ether_addr_equal(eth->h_dest, port->dev->broadcast))
  191. nskb->pkt_type = PACKET_BROADCAST;
  192. else
  193. nskb->pkt_type = PACKET_MULTICAST;
  194. dev_forward_skb(port->dev, nskb);
  195. }
  196. }
  197. }
  198. static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff *skb,
  199. bool local)
  200. {
  201. struct ipvl_dev *ipvlan = addr->master;
  202. struct net_device *dev = ipvlan->dev;
  203. unsigned int len;
  204. rx_handler_result_t ret = RX_HANDLER_CONSUMED;
  205. bool success = false;
  206. len = skb->len + ETH_HLEN;
  207. if (unlikely(!(dev->flags & IFF_UP))) {
  208. kfree_skb(skb);
  209. goto out;
  210. }
  211. skb = skb_share_check(skb, GFP_ATOMIC);
  212. if (!skb)
  213. goto out;
  214. skb->dev = dev;
  215. skb->pkt_type = PACKET_HOST;
  216. if (local) {
  217. if (dev_forward_skb(ipvlan->dev, skb) == NET_RX_SUCCESS)
  218. success = true;
  219. } else {
  220. ret = RX_HANDLER_ANOTHER;
  221. success = true;
  222. }
  223. out:
  224. ipvlan_count_rx(ipvlan, len, success, false);
  225. return ret;
  226. }
  227. static struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port,
  228. void *lyr3h, int addr_type,
  229. bool use_dest)
  230. {
  231. struct ipvl_addr *addr = NULL;
  232. if (addr_type == IPVL_IPV6) {
  233. struct ipv6hdr *ip6h;
  234. struct in6_addr *i6addr;
  235. ip6h = (struct ipv6hdr *)lyr3h;
  236. i6addr = use_dest ? &ip6h->daddr : &ip6h->saddr;
  237. addr = ipvlan_ht_addr_lookup(port, i6addr, true);
  238. } else if (addr_type == IPVL_ICMPV6) {
  239. struct nd_msg *ndmh;
  240. struct in6_addr *i6addr;
  241. /* Make sure that the NeighborSolicitation ICMPv6 packets
  242. * are handled to avoid DAD issue.
  243. */
  244. ndmh = (struct nd_msg *)lyr3h;
  245. if (ndmh->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
  246. i6addr = &ndmh->target;
  247. addr = ipvlan_ht_addr_lookup(port, i6addr, true);
  248. }
  249. } else if (addr_type == IPVL_IPV4) {
  250. struct iphdr *ip4h;
  251. __be32 *i4addr;
  252. ip4h = (struct iphdr *)lyr3h;
  253. i4addr = use_dest ? &ip4h->daddr : &ip4h->saddr;
  254. addr = ipvlan_ht_addr_lookup(port, i4addr, false);
  255. } else if (addr_type == IPVL_ARP) {
  256. struct arphdr *arph;
  257. unsigned char *arp_ptr;
  258. __be32 dip;
  259. arph = (struct arphdr *)lyr3h;
  260. arp_ptr = (unsigned char *)(arph + 1);
  261. if (use_dest)
  262. arp_ptr += (2 * port->dev->addr_len) + 4;
  263. else
  264. arp_ptr += port->dev->addr_len;
  265. memcpy(&dip, arp_ptr, 4);
  266. addr = ipvlan_ht_addr_lookup(port, &dip, false);
  267. }
  268. return addr;
  269. }
  270. static int ipvlan_process_v4_outbound(struct sk_buff *skb)
  271. {
  272. const struct iphdr *ip4h = ip_hdr(skb);
  273. struct net_device *dev = skb->dev;
  274. struct rtable *rt;
  275. int err, ret = NET_XMIT_DROP;
  276. struct flowi4 fl4 = {
  277. .flowi4_oif = dev->iflink,
  278. .flowi4_tos = RT_TOS(ip4h->tos),
  279. .flowi4_flags = FLOWI_FLAG_ANYSRC,
  280. .daddr = ip4h->daddr,
  281. .saddr = ip4h->saddr,
  282. };
  283. rt = ip_route_output_flow(dev_net(dev), &fl4, NULL);
  284. if (IS_ERR(rt))
  285. goto err;
  286. if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
  287. ip_rt_put(rt);
  288. goto err;
  289. }
  290. skb_dst_drop(skb);
  291. skb_dst_set(skb, &rt->dst);
  292. err = ip_local_out(skb);
  293. if (unlikely(net_xmit_eval(err)))
  294. dev->stats.tx_errors++;
  295. else
  296. ret = NET_XMIT_SUCCESS;
  297. goto out;
  298. err:
  299. dev->stats.tx_errors++;
  300. kfree_skb(skb);
  301. out:
  302. return ret;
  303. }
  304. static int ipvlan_process_v6_outbound(struct sk_buff *skb)
  305. {
  306. const struct ipv6hdr *ip6h = ipv6_hdr(skb);
  307. struct net_device *dev = skb->dev;
  308. struct dst_entry *dst;
  309. int err, ret = NET_XMIT_DROP;
  310. struct flowi6 fl6 = {
  311. .flowi6_iif = skb->dev->ifindex,
  312. .daddr = ip6h->daddr,
  313. .saddr = ip6h->saddr,
  314. .flowi6_flags = FLOWI_FLAG_ANYSRC,
  315. .flowlabel = ip6_flowinfo(ip6h),
  316. .flowi6_mark = skb->mark,
  317. .flowi6_proto = ip6h->nexthdr,
  318. };
  319. dst = ip6_route_output(dev_net(dev), NULL, &fl6);
  320. if (dst->error) {
  321. ret = dst->error;
  322. dst_release(dst);
  323. goto err;
  324. }
  325. skb_dst_drop(skb);
  326. skb_dst_set(skb, dst);
  327. err = ip6_local_out(skb);
  328. if (unlikely(net_xmit_eval(err)))
  329. dev->stats.tx_errors++;
  330. else
  331. ret = NET_XMIT_SUCCESS;
  332. goto out;
  333. err:
  334. dev->stats.tx_errors++;
  335. kfree_skb(skb);
  336. out:
  337. return ret;
  338. }
  339. static int ipvlan_process_outbound(struct sk_buff *skb,
  340. const struct ipvl_dev *ipvlan)
  341. {
  342. struct ethhdr *ethh = eth_hdr(skb);
  343. int ret = NET_XMIT_DROP;
  344. /* In this mode we dont care about multicast and broadcast traffic */
  345. if (is_multicast_ether_addr(ethh->h_dest)) {
  346. pr_warn_ratelimited("Dropped {multi|broad}cast of type= [%x]\n",
  347. ntohs(skb->protocol));
  348. kfree_skb(skb);
  349. goto out;
  350. }
  351. /* The ipvlan is a pseudo-L2 device, so the packets that we receive
  352. * will have L2; which need to discarded and processed further
  353. * in the net-ns of the main-device.
  354. */
  355. if (skb_mac_header_was_set(skb)) {
  356. skb_pull(skb, sizeof(*ethh));
  357. skb->mac_header = (typeof(skb->mac_header))~0U;
  358. skb_reset_network_header(skb);
  359. }
  360. if (skb->protocol == htons(ETH_P_IPV6))
  361. ret = ipvlan_process_v6_outbound(skb);
  362. else if (skb->protocol == htons(ETH_P_IP))
  363. ret = ipvlan_process_v4_outbound(skb);
  364. else {
  365. pr_warn_ratelimited("Dropped outbound packet type=%x\n",
  366. ntohs(skb->protocol));
  367. kfree_skb(skb);
  368. }
  369. out:
  370. return ret;
  371. }
  372. static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev)
  373. {
  374. const struct ipvl_dev *ipvlan = netdev_priv(dev);
  375. void *lyr3h;
  376. struct ipvl_addr *addr;
  377. int addr_type;
  378. lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
  379. if (!lyr3h)
  380. goto out;
  381. addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
  382. if (addr)
  383. return ipvlan_rcv_frame(addr, skb, true);
  384. out:
  385. skb->dev = ipvlan->phy_dev;
  386. return ipvlan_process_outbound(skb, ipvlan);
  387. }
  388. static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
  389. {
  390. const struct ipvl_dev *ipvlan = netdev_priv(dev);
  391. struct ethhdr *eth = eth_hdr(skb);
  392. struct ipvl_addr *addr;
  393. void *lyr3h;
  394. int addr_type;
  395. if (ether_addr_equal(eth->h_dest, eth->h_source)) {
  396. lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
  397. if (lyr3h) {
  398. addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
  399. if (addr)
  400. return ipvlan_rcv_frame(addr, skb, true);
  401. }
  402. skb = skb_share_check(skb, GFP_ATOMIC);
  403. if (!skb)
  404. return NET_XMIT_DROP;
  405. /* Packet definitely does not belong to any of the
  406. * virtual devices, but the dest is local. So forward
  407. * the skb for the main-dev. At the RX side we just return
  408. * RX_PASS for it to be processed further on the stack.
  409. */
  410. return dev_forward_skb(ipvlan->phy_dev, skb);
  411. } else if (is_multicast_ether_addr(eth->h_dest)) {
  412. u8 ip_summed = skb->ip_summed;
  413. skb->ip_summed = CHECKSUM_UNNECESSARY;
  414. ipvlan_multicast_frame(ipvlan->port, skb, ipvlan, true);
  415. skb->ip_summed = ip_summed;
  416. }
  417. skb->dev = ipvlan->phy_dev;
  418. return dev_queue_xmit(skb);
  419. }
  420. int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
  421. {
  422. struct ipvl_dev *ipvlan = netdev_priv(dev);
  423. struct ipvl_port *port = ipvlan_port_get_rcu(ipvlan->phy_dev);
  424. if (!port)
  425. goto out;
  426. if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
  427. goto out;
  428. switch(port->mode) {
  429. case IPVLAN_MODE_L2:
  430. return ipvlan_xmit_mode_l2(skb, dev);
  431. case IPVLAN_MODE_L3:
  432. return ipvlan_xmit_mode_l3(skb, dev);
  433. }
  434. /* Should not reach here */
  435. WARN_ONCE(true, "ipvlan_queue_xmit() called for mode = [%hx]\n",
  436. port->mode);
  437. out:
  438. kfree_skb(skb);
  439. return NET_XMIT_DROP;
  440. }
  441. static bool ipvlan_external_frame(struct sk_buff *skb, struct ipvl_port *port)
  442. {
  443. struct ethhdr *eth = eth_hdr(skb);
  444. struct ipvl_addr *addr;
  445. void *lyr3h;
  446. int addr_type;
  447. if (ether_addr_equal(eth->h_source, skb->dev->dev_addr)) {
  448. lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
  449. if (!lyr3h)
  450. return true;
  451. addr = ipvlan_addr_lookup(port, lyr3h, addr_type, false);
  452. if (addr)
  453. return false;
  454. }
  455. return true;
  456. }
  457. static rx_handler_result_t ipvlan_handle_mode_l3(struct sk_buff **pskb,
  458. struct ipvl_port *port)
  459. {
  460. void *lyr3h;
  461. int addr_type;
  462. struct ipvl_addr *addr;
  463. struct sk_buff *skb = *pskb;
  464. rx_handler_result_t ret = RX_HANDLER_PASS;
  465. lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
  466. if (!lyr3h)
  467. goto out;
  468. addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
  469. if (addr)
  470. ret = ipvlan_rcv_frame(addr, skb, false);
  471. out:
  472. return ret;
  473. }
  474. static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
  475. struct ipvl_port *port)
  476. {
  477. struct sk_buff *skb = *pskb;
  478. struct ethhdr *eth = eth_hdr(skb);
  479. rx_handler_result_t ret = RX_HANDLER_PASS;
  480. void *lyr3h;
  481. int addr_type;
  482. if (is_multicast_ether_addr(eth->h_dest)) {
  483. if (ipvlan_external_frame(skb, port))
  484. ipvlan_multicast_frame(port, skb, NULL, false);
  485. } else {
  486. struct ipvl_addr *addr;
  487. lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
  488. if (!lyr3h)
  489. return ret;
  490. addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
  491. if (addr)
  492. ret = ipvlan_rcv_frame(addr, skb, false);
  493. }
  494. return ret;
  495. }
  496. rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb)
  497. {
  498. struct sk_buff *skb = *pskb;
  499. struct ipvl_port *port = ipvlan_port_get_rcu(skb->dev);
  500. if (!port)
  501. return RX_HANDLER_PASS;
  502. switch (port->mode) {
  503. case IPVLAN_MODE_L2:
  504. return ipvlan_handle_mode_l2(pskb, port);
  505. case IPVLAN_MODE_L3:
  506. return ipvlan_handle_mode_l3(pskb, port);
  507. }
  508. /* Should not reach here */
  509. WARN_ONCE(true, "ipvlan_handle_frame() called for mode = [%hx]\n",
  510. port->mode);
  511. kfree_skb(skb);
  512. return NET_RX_DROP;
  513. }