ipvlan_core.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782
  1. /* Copyright (c) 2014 Mahesh Bandewar <maheshb@google.com>
  2. *
  3. * This program is free software; you can redistribute it and/or
  4. * modify it under the terms of the GNU General Public License as
  5. * published by the Free Software Foundation; either version 2 of
  6. * the License, or (at your option) any later version.
  7. *
  8. */
  9. #include "ipvlan.h"
  10. static u32 ipvlan_jhash_secret __read_mostly;
  11. void ipvlan_init_secret(void)
  12. {
  13. net_get_random_once(&ipvlan_jhash_secret, sizeof(ipvlan_jhash_secret));
  14. }
  15. void ipvlan_count_rx(const struct ipvl_dev *ipvlan,
  16. unsigned int len, bool success, bool mcast)
  17. {
  18. if (likely(success)) {
  19. struct ipvl_pcpu_stats *pcptr;
  20. pcptr = this_cpu_ptr(ipvlan->pcpu_stats);
  21. u64_stats_update_begin(&pcptr->syncp);
  22. pcptr->rx_pkts++;
  23. pcptr->rx_bytes += len;
  24. if (mcast)
  25. pcptr->rx_mcast++;
  26. u64_stats_update_end(&pcptr->syncp);
  27. } else {
  28. this_cpu_inc(ipvlan->pcpu_stats->rx_errs);
  29. }
  30. }
  31. EXPORT_SYMBOL_GPL(ipvlan_count_rx);
  32. static u8 ipvlan_get_v6_hash(const void *iaddr)
  33. {
  34. const struct in6_addr *ip6_addr = iaddr;
  35. return __ipv6_addr_jhash(ip6_addr, ipvlan_jhash_secret) &
  36. IPVLAN_HASH_MASK;
  37. }
  38. static u8 ipvlan_get_v4_hash(const void *iaddr)
  39. {
  40. const struct in_addr *ip4_addr = iaddr;
  41. return jhash_1word(ip4_addr->s_addr, ipvlan_jhash_secret) &
  42. IPVLAN_HASH_MASK;
  43. }
  44. static struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port,
  45. const void *iaddr, bool is_v6)
  46. {
  47. struct ipvl_addr *addr;
  48. u8 hash;
  49. hash = is_v6 ? ipvlan_get_v6_hash(iaddr) :
  50. ipvlan_get_v4_hash(iaddr);
  51. hlist_for_each_entry_rcu(addr, &port->hlhead[hash], hlnode) {
  52. if (is_v6 && addr->atype == IPVL_IPV6 &&
  53. ipv6_addr_equal(&addr->ip6addr, iaddr))
  54. return addr;
  55. else if (!is_v6 && addr->atype == IPVL_IPV4 &&
  56. addr->ip4addr.s_addr ==
  57. ((struct in_addr *)iaddr)->s_addr)
  58. return addr;
  59. }
  60. return NULL;
  61. }
  62. void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr)
  63. {
  64. struct ipvl_port *port = ipvlan->port;
  65. u8 hash;
  66. hash = (addr->atype == IPVL_IPV6) ?
  67. ipvlan_get_v6_hash(&addr->ip6addr) :
  68. ipvlan_get_v4_hash(&addr->ip4addr);
  69. if (hlist_unhashed(&addr->hlnode))
  70. hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]);
  71. }
  72. void ipvlan_ht_addr_del(struct ipvl_addr *addr)
  73. {
  74. hlist_del_init_rcu(&addr->hlnode);
  75. }
  76. struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
  77. const void *iaddr, bool is_v6)
  78. {
  79. struct ipvl_addr *addr;
  80. list_for_each_entry(addr, &ipvlan->addrs, anode) {
  81. if ((is_v6 && addr->atype == IPVL_IPV6 &&
  82. ipv6_addr_equal(&addr->ip6addr, iaddr)) ||
  83. (!is_v6 && addr->atype == IPVL_IPV4 &&
  84. addr->ip4addr.s_addr == ((struct in_addr *)iaddr)->s_addr))
  85. return addr;
  86. }
  87. return NULL;
  88. }
  89. bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6)
  90. {
  91. struct ipvl_dev *ipvlan;
  92. ASSERT_RTNL();
  93. list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
  94. if (ipvlan_find_addr(ipvlan, iaddr, is_v6))
  95. return true;
  96. }
  97. return false;
  98. }
  99. static void *ipvlan_get_L3_hdr(struct sk_buff *skb, int *type)
  100. {
  101. void *lyr3h = NULL;
  102. switch (skb->protocol) {
  103. case htons(ETH_P_ARP): {
  104. struct arphdr *arph;
  105. if (unlikely(!pskb_may_pull(skb, sizeof(*arph))))
  106. return NULL;
  107. arph = arp_hdr(skb);
  108. *type = IPVL_ARP;
  109. lyr3h = arph;
  110. break;
  111. }
  112. case htons(ETH_P_IP): {
  113. u32 pktlen;
  114. struct iphdr *ip4h;
  115. if (unlikely(!pskb_may_pull(skb, sizeof(*ip4h))))
  116. return NULL;
  117. ip4h = ip_hdr(skb);
  118. pktlen = ntohs(ip4h->tot_len);
  119. if (ip4h->ihl < 5 || ip4h->version != 4)
  120. return NULL;
  121. if (skb->len < pktlen || pktlen < (ip4h->ihl * 4))
  122. return NULL;
  123. *type = IPVL_IPV4;
  124. lyr3h = ip4h;
  125. break;
  126. }
  127. case htons(ETH_P_IPV6): {
  128. struct ipv6hdr *ip6h;
  129. if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h))))
  130. return NULL;
  131. ip6h = ipv6_hdr(skb);
  132. if (ip6h->version != 6)
  133. return NULL;
  134. *type = IPVL_IPV6;
  135. lyr3h = ip6h;
  136. /* Only Neighbour Solicitation pkts need different treatment */
  137. if (ipv6_addr_any(&ip6h->saddr) &&
  138. ip6h->nexthdr == NEXTHDR_ICMP) {
  139. *type = IPVL_ICMPV6;
  140. lyr3h = ip6h + 1;
  141. }
  142. break;
  143. }
  144. default:
  145. return NULL;
  146. }
  147. return lyr3h;
  148. }
  149. unsigned int ipvlan_mac_hash(const unsigned char *addr)
  150. {
  151. u32 hash = jhash_1word(__get_unaligned_cpu32(addr+2),
  152. ipvlan_jhash_secret);
  153. return hash & IPVLAN_MAC_FILTER_MASK;
  154. }
  155. void ipvlan_process_multicast(struct work_struct *work)
  156. {
  157. struct ipvl_port *port = container_of(work, struct ipvl_port, wq);
  158. struct ethhdr *ethh;
  159. struct ipvl_dev *ipvlan;
  160. struct sk_buff *skb, *nskb;
  161. struct sk_buff_head list;
  162. unsigned int len;
  163. unsigned int mac_hash;
  164. int ret;
  165. u8 pkt_type;
  166. bool tx_pkt;
  167. __skb_queue_head_init(&list);
  168. spin_lock_bh(&port->backlog.lock);
  169. skb_queue_splice_tail_init(&port->backlog, &list);
  170. spin_unlock_bh(&port->backlog.lock);
  171. while ((skb = __skb_dequeue(&list)) != NULL) {
  172. struct net_device *dev = skb->dev;
  173. bool consumed = false;
  174. ethh = eth_hdr(skb);
  175. tx_pkt = IPVL_SKB_CB(skb)->tx_pkt;
  176. mac_hash = ipvlan_mac_hash(ethh->h_dest);
  177. if (ether_addr_equal(ethh->h_dest, port->dev->broadcast))
  178. pkt_type = PACKET_BROADCAST;
  179. else
  180. pkt_type = PACKET_MULTICAST;
  181. rcu_read_lock();
  182. list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
  183. if (tx_pkt && (ipvlan->dev == skb->dev))
  184. continue;
  185. if (!test_bit(mac_hash, ipvlan->mac_filters))
  186. continue;
  187. if (!(ipvlan->dev->flags & IFF_UP))
  188. continue;
  189. ret = NET_RX_DROP;
  190. len = skb->len + ETH_HLEN;
  191. nskb = skb_clone(skb, GFP_ATOMIC);
  192. local_bh_disable();
  193. if (nskb) {
  194. consumed = true;
  195. nskb->pkt_type = pkt_type;
  196. nskb->dev = ipvlan->dev;
  197. if (tx_pkt)
  198. ret = dev_forward_skb(ipvlan->dev, nskb);
  199. else
  200. ret = netif_rx(nskb);
  201. }
  202. ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true);
  203. local_bh_enable();
  204. }
  205. rcu_read_unlock();
  206. if (tx_pkt) {
  207. /* If the packet originated here, send it out. */
  208. skb->dev = port->dev;
  209. skb->pkt_type = pkt_type;
  210. dev_queue_xmit(skb);
  211. } else {
  212. if (consumed)
  213. consume_skb(skb);
  214. else
  215. kfree_skb(skb);
  216. }
  217. if (dev)
  218. dev_put(dev);
  219. }
  220. }
  221. static void ipvlan_skb_crossing_ns(struct sk_buff *skb, struct net_device *dev)
  222. {
  223. bool xnet = true;
  224. if (dev)
  225. xnet = !net_eq(dev_net(skb->dev), dev_net(dev));
  226. skb_scrub_packet(skb, xnet);
  227. if (dev)
  228. skb->dev = dev;
  229. }
  230. static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb,
  231. bool local)
  232. {
  233. struct ipvl_dev *ipvlan = addr->master;
  234. struct net_device *dev = ipvlan->dev;
  235. unsigned int len;
  236. rx_handler_result_t ret = RX_HANDLER_CONSUMED;
  237. bool success = false;
  238. struct sk_buff *skb = *pskb;
  239. len = skb->len + ETH_HLEN;
  240. /* Only packets exchanged between two local slaves need to have
  241. * device-up check as well as skb-share check.
  242. */
  243. if (local) {
  244. if (unlikely(!(dev->flags & IFF_UP))) {
  245. kfree_skb(skb);
  246. goto out;
  247. }
  248. skb = skb_share_check(skb, GFP_ATOMIC);
  249. if (!skb)
  250. goto out;
  251. *pskb = skb;
  252. }
  253. ipvlan_skb_crossing_ns(skb, dev);
  254. if (local) {
  255. skb->pkt_type = PACKET_HOST;
  256. if (dev_forward_skb(ipvlan->dev, skb) == NET_RX_SUCCESS)
  257. success = true;
  258. } else {
  259. ret = RX_HANDLER_ANOTHER;
  260. success = true;
  261. }
  262. out:
  263. ipvlan_count_rx(ipvlan, len, success, false);
  264. return ret;
  265. }
  266. static struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port,
  267. void *lyr3h, int addr_type,
  268. bool use_dest)
  269. {
  270. struct ipvl_addr *addr = NULL;
  271. if (addr_type == IPVL_IPV6) {
  272. struct ipv6hdr *ip6h;
  273. struct in6_addr *i6addr;
  274. ip6h = (struct ipv6hdr *)lyr3h;
  275. i6addr = use_dest ? &ip6h->daddr : &ip6h->saddr;
  276. addr = ipvlan_ht_addr_lookup(port, i6addr, true);
  277. } else if (addr_type == IPVL_ICMPV6) {
  278. struct nd_msg *ndmh;
  279. struct in6_addr *i6addr;
  280. /* Make sure that the NeighborSolicitation ICMPv6 packets
  281. * are handled to avoid DAD issue.
  282. */
  283. ndmh = (struct nd_msg *)lyr3h;
  284. if (ndmh->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
  285. i6addr = &ndmh->target;
  286. addr = ipvlan_ht_addr_lookup(port, i6addr, true);
  287. }
  288. } else if (addr_type == IPVL_IPV4) {
  289. struct iphdr *ip4h;
  290. __be32 *i4addr;
  291. ip4h = (struct iphdr *)lyr3h;
  292. i4addr = use_dest ? &ip4h->daddr : &ip4h->saddr;
  293. addr = ipvlan_ht_addr_lookup(port, i4addr, false);
  294. } else if (addr_type == IPVL_ARP) {
  295. struct arphdr *arph;
  296. unsigned char *arp_ptr;
  297. __be32 dip;
  298. arph = (struct arphdr *)lyr3h;
  299. arp_ptr = (unsigned char *)(arph + 1);
  300. if (use_dest)
  301. arp_ptr += (2 * port->dev->addr_len) + 4;
  302. else
  303. arp_ptr += port->dev->addr_len;
  304. memcpy(&dip, arp_ptr, 4);
  305. addr = ipvlan_ht_addr_lookup(port, &dip, false);
  306. }
  307. return addr;
  308. }
  309. static int ipvlan_process_v4_outbound(struct sk_buff *skb)
  310. {
  311. const struct iphdr *ip4h = ip_hdr(skb);
  312. struct net_device *dev = skb->dev;
  313. struct net *net = dev_net(dev);
  314. struct rtable *rt;
  315. int err, ret = NET_XMIT_DROP;
  316. struct flowi4 fl4 = {
  317. .flowi4_oif = dev->ifindex,
  318. .flowi4_tos = RT_TOS(ip4h->tos),
  319. .flowi4_flags = FLOWI_FLAG_ANYSRC,
  320. .daddr = ip4h->daddr,
  321. .saddr = ip4h->saddr,
  322. };
  323. rt = ip_route_output_flow(net, &fl4, NULL);
  324. if (IS_ERR(rt))
  325. goto err;
  326. if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
  327. ip_rt_put(rt);
  328. goto err;
  329. }
  330. skb_dst_set(skb, &rt->dst);
  331. err = ip_local_out(net, skb->sk, skb);
  332. if (unlikely(net_xmit_eval(err)))
  333. dev->stats.tx_errors++;
  334. else
  335. ret = NET_XMIT_SUCCESS;
  336. goto out;
  337. err:
  338. dev->stats.tx_errors++;
  339. kfree_skb(skb);
  340. out:
  341. return ret;
  342. }
  343. static int ipvlan_process_v6_outbound(struct sk_buff *skb)
  344. {
  345. const struct ipv6hdr *ip6h = ipv6_hdr(skb);
  346. struct net_device *dev = skb->dev;
  347. struct net *net = dev_net(dev);
  348. struct dst_entry *dst;
  349. int err, ret = NET_XMIT_DROP;
  350. struct flowi6 fl6 = {
  351. .flowi6_iif = dev->ifindex,
  352. .daddr = ip6h->daddr,
  353. .saddr = ip6h->saddr,
  354. .flowi6_flags = FLOWI_FLAG_ANYSRC,
  355. .flowlabel = ip6_flowinfo(ip6h),
  356. .flowi6_mark = skb->mark,
  357. .flowi6_proto = ip6h->nexthdr,
  358. };
  359. dst = ip6_route_output(net, NULL, &fl6);
  360. if (dst->error) {
  361. ret = dst->error;
  362. dst_release(dst);
  363. goto err;
  364. }
  365. skb_dst_set(skb, dst);
  366. err = ip6_local_out(net, skb->sk, skb);
  367. if (unlikely(net_xmit_eval(err)))
  368. dev->stats.tx_errors++;
  369. else
  370. ret = NET_XMIT_SUCCESS;
  371. goto out;
  372. err:
  373. dev->stats.tx_errors++;
  374. kfree_skb(skb);
  375. out:
  376. return ret;
  377. }
  378. static int ipvlan_process_outbound(struct sk_buff *skb)
  379. {
  380. struct ethhdr *ethh = eth_hdr(skb);
  381. int ret = NET_XMIT_DROP;
  382. /* In this mode we dont care about multicast and broadcast traffic */
  383. if (is_multicast_ether_addr(ethh->h_dest)) {
  384. pr_warn_ratelimited("Dropped {multi|broad}cast of type= [%x]\n",
  385. ntohs(skb->protocol));
  386. kfree_skb(skb);
  387. goto out;
  388. }
  389. /* The ipvlan is a pseudo-L2 device, so the packets that we receive
  390. * will have L2; which need to discarded and processed further
  391. * in the net-ns of the main-device.
  392. */
  393. if (skb_mac_header_was_set(skb)) {
  394. skb_pull(skb, sizeof(*ethh));
  395. skb->mac_header = (typeof(skb->mac_header))~0U;
  396. skb_reset_network_header(skb);
  397. }
  398. if (skb->protocol == htons(ETH_P_IPV6))
  399. ret = ipvlan_process_v6_outbound(skb);
  400. else if (skb->protocol == htons(ETH_P_IP))
  401. ret = ipvlan_process_v4_outbound(skb);
  402. else {
  403. pr_warn_ratelimited("Dropped outbound packet type=%x\n",
  404. ntohs(skb->protocol));
  405. kfree_skb(skb);
  406. }
  407. out:
  408. return ret;
  409. }
  410. static void ipvlan_multicast_enqueue(struct ipvl_port *port,
  411. struct sk_buff *skb, bool tx_pkt)
  412. {
  413. if (skb->protocol == htons(ETH_P_PAUSE)) {
  414. kfree_skb(skb);
  415. return;
  416. }
  417. /* Record that the deferred packet is from TX or RX path. By
  418. * looking at mac-addresses on packet will lead to erronus decisions.
  419. * (This would be true for a loopback-mode on master device or a
  420. * hair-pin mode of the switch.)
  421. */
  422. IPVL_SKB_CB(skb)->tx_pkt = tx_pkt;
  423. spin_lock(&port->backlog.lock);
  424. if (skb_queue_len(&port->backlog) < IPVLAN_QBACKLOG_LIMIT) {
  425. if (skb->dev)
  426. dev_hold(skb->dev);
  427. __skb_queue_tail(&port->backlog, skb);
  428. spin_unlock(&port->backlog.lock);
  429. schedule_work(&port->wq);
  430. } else {
  431. spin_unlock(&port->backlog.lock);
  432. atomic_long_inc(&skb->dev->rx_dropped);
  433. kfree_skb(skb);
  434. }
  435. }
  436. static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev)
  437. {
  438. const struct ipvl_dev *ipvlan = netdev_priv(dev);
  439. void *lyr3h;
  440. struct ipvl_addr *addr;
  441. int addr_type;
  442. lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
  443. if (!lyr3h)
  444. goto out;
  445. addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
  446. if (addr)
  447. return ipvlan_rcv_frame(addr, &skb, true);
  448. out:
  449. ipvlan_skb_crossing_ns(skb, ipvlan->phy_dev);
  450. return ipvlan_process_outbound(skb);
  451. }
  452. static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
  453. {
  454. const struct ipvl_dev *ipvlan = netdev_priv(dev);
  455. struct ethhdr *eth = eth_hdr(skb);
  456. struct ipvl_addr *addr;
  457. void *lyr3h;
  458. int addr_type;
  459. if (ether_addr_equal(eth->h_dest, eth->h_source)) {
  460. lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
  461. if (lyr3h) {
  462. addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
  463. if (addr)
  464. return ipvlan_rcv_frame(addr, &skb, true);
  465. }
  466. skb = skb_share_check(skb, GFP_ATOMIC);
  467. if (!skb)
  468. return NET_XMIT_DROP;
  469. /* Packet definitely does not belong to any of the
  470. * virtual devices, but the dest is local. So forward
  471. * the skb for the main-dev. At the RX side we just return
  472. * RX_PASS for it to be processed further on the stack.
  473. */
  474. return dev_forward_skb(ipvlan->phy_dev, skb);
  475. } else if (is_multicast_ether_addr(eth->h_dest)) {
  476. ipvlan_skb_crossing_ns(skb, NULL);
  477. ipvlan_multicast_enqueue(ipvlan->port, skb, true);
  478. return NET_XMIT_SUCCESS;
  479. }
  480. ipvlan_skb_crossing_ns(skb, ipvlan->phy_dev);
  481. return dev_queue_xmit(skb);
  482. }
  483. int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
  484. {
  485. struct ipvl_dev *ipvlan = netdev_priv(dev);
  486. struct ipvl_port *port = ipvlan_port_get_rcu_bh(ipvlan->phy_dev);
  487. if (!port)
  488. goto out;
  489. if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
  490. goto out;
  491. switch(port->mode) {
  492. case IPVLAN_MODE_L2:
  493. return ipvlan_xmit_mode_l2(skb, dev);
  494. case IPVLAN_MODE_L3:
  495. case IPVLAN_MODE_L3S:
  496. return ipvlan_xmit_mode_l3(skb, dev);
  497. }
  498. /* Should not reach here */
  499. WARN_ONCE(true, "ipvlan_queue_xmit() called for mode = [%hx]\n",
  500. port->mode);
  501. out:
  502. kfree_skb(skb);
  503. return NET_XMIT_DROP;
  504. }
  505. static bool ipvlan_external_frame(struct sk_buff *skb, struct ipvl_port *port)
  506. {
  507. struct ethhdr *eth = eth_hdr(skb);
  508. struct ipvl_addr *addr;
  509. void *lyr3h;
  510. int addr_type;
  511. if (ether_addr_equal(eth->h_source, skb->dev->dev_addr)) {
  512. lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
  513. if (!lyr3h)
  514. return true;
  515. addr = ipvlan_addr_lookup(port, lyr3h, addr_type, false);
  516. if (addr)
  517. return false;
  518. }
  519. return true;
  520. }
  521. static rx_handler_result_t ipvlan_handle_mode_l3(struct sk_buff **pskb,
  522. struct ipvl_port *port)
  523. {
  524. void *lyr3h;
  525. int addr_type;
  526. struct ipvl_addr *addr;
  527. struct sk_buff *skb = *pskb;
  528. rx_handler_result_t ret = RX_HANDLER_PASS;
  529. lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
  530. if (!lyr3h)
  531. goto out;
  532. addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
  533. if (addr)
  534. ret = ipvlan_rcv_frame(addr, pskb, false);
  535. out:
  536. return ret;
  537. }
  538. static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
  539. struct ipvl_port *port)
  540. {
  541. struct sk_buff *skb = *pskb;
  542. struct ethhdr *eth = eth_hdr(skb);
  543. rx_handler_result_t ret = RX_HANDLER_PASS;
  544. void *lyr3h;
  545. int addr_type;
  546. if (is_multicast_ether_addr(eth->h_dest)) {
  547. if (ipvlan_external_frame(skb, port)) {
  548. struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
  549. /* External frames are queued for device local
  550. * distribution, but a copy is given to master
  551. * straight away to avoid sending duplicates later
  552. * when work-queue processes this frame. This is
  553. * achieved by returning RX_HANDLER_PASS.
  554. */
  555. if (nskb) {
  556. ipvlan_skb_crossing_ns(nskb, NULL);
  557. ipvlan_multicast_enqueue(port, nskb, false);
  558. }
  559. }
  560. } else {
  561. struct ipvl_addr *addr;
  562. lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
  563. if (!lyr3h)
  564. return ret;
  565. addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
  566. if (addr)
  567. ret = ipvlan_rcv_frame(addr, pskb, false);
  568. }
  569. return ret;
  570. }
  571. rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb)
  572. {
  573. struct sk_buff *skb = *pskb;
  574. struct ipvl_port *port = ipvlan_port_get_rcu(skb->dev);
  575. if (!port)
  576. return RX_HANDLER_PASS;
  577. switch (port->mode) {
  578. case IPVLAN_MODE_L2:
  579. return ipvlan_handle_mode_l2(pskb, port);
  580. case IPVLAN_MODE_L3:
  581. return ipvlan_handle_mode_l3(pskb, port);
  582. case IPVLAN_MODE_L3S:
  583. return RX_HANDLER_PASS;
  584. }
  585. /* Should not reach here */
  586. WARN_ONCE(true, "ipvlan_handle_frame() called for mode = [%hx]\n",
  587. port->mode);
  588. kfree_skb(skb);
  589. return RX_HANDLER_CONSUMED;
  590. }
  591. static struct ipvl_addr *ipvlan_skb_to_addr(struct sk_buff *skb,
  592. struct net_device *dev)
  593. {
  594. struct ipvl_addr *addr = NULL;
  595. struct ipvl_port *port;
  596. void *lyr3h;
  597. int addr_type;
  598. if (!dev || !netif_is_ipvlan_port(dev))
  599. goto out;
  600. port = ipvlan_port_get_rcu(dev);
  601. if (!port || port->mode != IPVLAN_MODE_L3S)
  602. goto out;
  603. lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
  604. if (!lyr3h)
  605. goto out;
  606. addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
  607. out:
  608. return addr;
  609. }
  610. struct sk_buff *ipvlan_l3_rcv(struct net_device *dev, struct sk_buff *skb,
  611. u16 proto)
  612. {
  613. struct ipvl_addr *addr;
  614. struct net_device *sdev;
  615. addr = ipvlan_skb_to_addr(skb, dev);
  616. if (!addr)
  617. goto out;
  618. sdev = addr->master->dev;
  619. switch (proto) {
  620. case AF_INET:
  621. {
  622. int err;
  623. struct iphdr *ip4h = ip_hdr(skb);
  624. err = ip_route_input_noref(skb, ip4h->daddr, ip4h->saddr,
  625. ip4h->tos, sdev);
  626. if (unlikely(err))
  627. goto out;
  628. break;
  629. }
  630. case AF_INET6:
  631. {
  632. struct dst_entry *dst;
  633. struct ipv6hdr *ip6h = ipv6_hdr(skb);
  634. int flags = RT6_LOOKUP_F_HAS_SADDR;
  635. struct flowi6 fl6 = {
  636. .flowi6_iif = sdev->ifindex,
  637. .daddr = ip6h->daddr,
  638. .saddr = ip6h->saddr,
  639. .flowlabel = ip6_flowinfo(ip6h),
  640. .flowi6_mark = skb->mark,
  641. .flowi6_proto = ip6h->nexthdr,
  642. };
  643. skb_dst_drop(skb);
  644. dst = ip6_route_input_lookup(dev_net(sdev), sdev, &fl6, flags);
  645. skb_dst_set(skb, dst);
  646. break;
  647. }
  648. default:
  649. break;
  650. }
  651. out:
  652. return skb;
  653. }
  654. unsigned int ipvlan_nf_input(void *priv, struct sk_buff *skb,
  655. const struct nf_hook_state *state)
  656. {
  657. struct ipvl_addr *addr;
  658. unsigned int len;
  659. addr = ipvlan_skb_to_addr(skb, skb->dev);
  660. if (!addr)
  661. goto out;
  662. skb->dev = addr->master->dev;
  663. len = skb->len + ETH_HLEN;
  664. ipvlan_count_rx(addr->master, len, true, false);
  665. out:
  666. return NF_ACCEPT;
  667. }