ipvlan_core.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784
  1. /* Copyright (c) 2014 Mahesh Bandewar <maheshb@google.com>
  2. *
  3. * This program is free software; you can redistribute it and/or
  4. * modify it under the terms of the GNU General Public License as
  5. * published by the Free Software Foundation; either version 2 of
  6. * the License, or (at your option) any later version.
  7. *
  8. */
  9. #include "ipvlan.h"
  10. static u32 ipvlan_jhash_secret __read_mostly;
  11. void ipvlan_init_secret(void)
  12. {
  13. net_get_random_once(&ipvlan_jhash_secret, sizeof(ipvlan_jhash_secret));
  14. }
  15. static void ipvlan_count_rx(const struct ipvl_dev *ipvlan,
  16. unsigned int len, bool success, bool mcast)
  17. {
  18. if (!ipvlan)
  19. return;
  20. if (likely(success)) {
  21. struct ipvl_pcpu_stats *pcptr;
  22. pcptr = this_cpu_ptr(ipvlan->pcpu_stats);
  23. u64_stats_update_begin(&pcptr->syncp);
  24. pcptr->rx_pkts++;
  25. pcptr->rx_bytes += len;
  26. if (mcast)
  27. pcptr->rx_mcast++;
  28. u64_stats_update_end(&pcptr->syncp);
  29. } else {
  30. this_cpu_inc(ipvlan->pcpu_stats->rx_errs);
  31. }
  32. }
  33. static u8 ipvlan_get_v6_hash(const void *iaddr)
  34. {
  35. const struct in6_addr *ip6_addr = iaddr;
  36. return __ipv6_addr_jhash(ip6_addr, ipvlan_jhash_secret) &
  37. IPVLAN_HASH_MASK;
  38. }
  39. static u8 ipvlan_get_v4_hash(const void *iaddr)
  40. {
  41. const struct in_addr *ip4_addr = iaddr;
  42. return jhash_1word(ip4_addr->s_addr, ipvlan_jhash_secret) &
  43. IPVLAN_HASH_MASK;
  44. }
  45. static struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port,
  46. const void *iaddr, bool is_v6)
  47. {
  48. struct ipvl_addr *addr;
  49. u8 hash;
  50. hash = is_v6 ? ipvlan_get_v6_hash(iaddr) :
  51. ipvlan_get_v4_hash(iaddr);
  52. hlist_for_each_entry_rcu(addr, &port->hlhead[hash], hlnode) {
  53. if (is_v6 && addr->atype == IPVL_IPV6 &&
  54. ipv6_addr_equal(&addr->ip6addr, iaddr))
  55. return addr;
  56. else if (!is_v6 && addr->atype == IPVL_IPV4 &&
  57. addr->ip4addr.s_addr ==
  58. ((struct in_addr *)iaddr)->s_addr)
  59. return addr;
  60. }
  61. return NULL;
  62. }
  63. void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr)
  64. {
  65. struct ipvl_port *port = ipvlan->port;
  66. u8 hash;
  67. hash = (addr->atype == IPVL_IPV6) ?
  68. ipvlan_get_v6_hash(&addr->ip6addr) :
  69. ipvlan_get_v4_hash(&addr->ip4addr);
  70. if (hlist_unhashed(&addr->hlnode))
  71. hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]);
  72. }
  73. void ipvlan_ht_addr_del(struct ipvl_addr *addr)
  74. {
  75. hlist_del_init_rcu(&addr->hlnode);
  76. }
  77. struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
  78. const void *iaddr, bool is_v6)
  79. {
  80. struct ipvl_addr *addr;
  81. list_for_each_entry(addr, &ipvlan->addrs, anode) {
  82. if ((is_v6 && addr->atype == IPVL_IPV6 &&
  83. ipv6_addr_equal(&addr->ip6addr, iaddr)) ||
  84. (!is_v6 && addr->atype == IPVL_IPV4 &&
  85. addr->ip4addr.s_addr == ((struct in_addr *)iaddr)->s_addr))
  86. return addr;
  87. }
  88. return NULL;
  89. }
  90. bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6)
  91. {
  92. struct ipvl_dev *ipvlan;
  93. ASSERT_RTNL();
  94. list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
  95. if (ipvlan_find_addr(ipvlan, iaddr, is_v6))
  96. return true;
  97. }
  98. return false;
  99. }
  100. static void *ipvlan_get_L3_hdr(struct sk_buff *skb, int *type)
  101. {
  102. void *lyr3h = NULL;
  103. switch (skb->protocol) {
  104. case htons(ETH_P_ARP): {
  105. struct arphdr *arph;
  106. if (unlikely(!pskb_may_pull(skb, sizeof(*arph))))
  107. return NULL;
  108. arph = arp_hdr(skb);
  109. *type = IPVL_ARP;
  110. lyr3h = arph;
  111. break;
  112. }
  113. case htons(ETH_P_IP): {
  114. u32 pktlen;
  115. struct iphdr *ip4h;
  116. if (unlikely(!pskb_may_pull(skb, sizeof(*ip4h))))
  117. return NULL;
  118. ip4h = ip_hdr(skb);
  119. pktlen = ntohs(ip4h->tot_len);
  120. if (ip4h->ihl < 5 || ip4h->version != 4)
  121. return NULL;
  122. if (skb->len < pktlen || pktlen < (ip4h->ihl * 4))
  123. return NULL;
  124. *type = IPVL_IPV4;
  125. lyr3h = ip4h;
  126. break;
  127. }
  128. case htons(ETH_P_IPV6): {
  129. struct ipv6hdr *ip6h;
  130. if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h))))
  131. return NULL;
  132. ip6h = ipv6_hdr(skb);
  133. if (ip6h->version != 6)
  134. return NULL;
  135. *type = IPVL_IPV6;
  136. lyr3h = ip6h;
  137. /* Only Neighbour Solicitation pkts need different treatment */
  138. if (ipv6_addr_any(&ip6h->saddr) &&
  139. ip6h->nexthdr == NEXTHDR_ICMP) {
  140. *type = IPVL_ICMPV6;
  141. lyr3h = ip6h + 1;
  142. }
  143. break;
  144. }
  145. default:
  146. return NULL;
  147. }
  148. return lyr3h;
  149. }
  150. unsigned int ipvlan_mac_hash(const unsigned char *addr)
  151. {
  152. u32 hash = jhash_1word(__get_unaligned_cpu32(addr+2),
  153. ipvlan_jhash_secret);
  154. return hash & IPVLAN_MAC_FILTER_MASK;
  155. }
  156. void ipvlan_process_multicast(struct work_struct *work)
  157. {
  158. struct ipvl_port *port = container_of(work, struct ipvl_port, wq);
  159. struct ethhdr *ethh;
  160. struct ipvl_dev *ipvlan;
  161. struct sk_buff *skb, *nskb;
  162. struct sk_buff_head list;
  163. unsigned int len;
  164. unsigned int mac_hash;
  165. int ret;
  166. u8 pkt_type;
  167. bool tx_pkt;
  168. __skb_queue_head_init(&list);
  169. spin_lock_bh(&port->backlog.lock);
  170. skb_queue_splice_tail_init(&port->backlog, &list);
  171. spin_unlock_bh(&port->backlog.lock);
  172. while ((skb = __skb_dequeue(&list)) != NULL) {
  173. struct net_device *dev = skb->dev;
  174. bool consumed = false;
  175. ethh = eth_hdr(skb);
  176. tx_pkt = IPVL_SKB_CB(skb)->tx_pkt;
  177. mac_hash = ipvlan_mac_hash(ethh->h_dest);
  178. if (ether_addr_equal(ethh->h_dest, port->dev->broadcast))
  179. pkt_type = PACKET_BROADCAST;
  180. else
  181. pkt_type = PACKET_MULTICAST;
  182. rcu_read_lock();
  183. list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
  184. if (tx_pkt && (ipvlan->dev == skb->dev))
  185. continue;
  186. if (!test_bit(mac_hash, ipvlan->mac_filters))
  187. continue;
  188. if (!(ipvlan->dev->flags & IFF_UP))
  189. continue;
  190. ret = NET_RX_DROP;
  191. len = skb->len + ETH_HLEN;
  192. nskb = skb_clone(skb, GFP_ATOMIC);
  193. local_bh_disable();
  194. if (nskb) {
  195. consumed = true;
  196. nskb->pkt_type = pkt_type;
  197. nskb->dev = ipvlan->dev;
  198. if (tx_pkt)
  199. ret = dev_forward_skb(ipvlan->dev, nskb);
  200. else
  201. ret = netif_rx(nskb);
  202. }
  203. ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true);
  204. local_bh_enable();
  205. }
  206. rcu_read_unlock();
  207. if (tx_pkt) {
  208. /* If the packet originated here, send it out. */
  209. skb->dev = port->dev;
  210. skb->pkt_type = pkt_type;
  211. dev_queue_xmit(skb);
  212. } else {
  213. if (consumed)
  214. consume_skb(skb);
  215. else
  216. kfree_skb(skb);
  217. }
  218. if (dev)
  219. dev_put(dev);
  220. }
  221. }
  222. static void ipvlan_skb_crossing_ns(struct sk_buff *skb, struct net_device *dev)
  223. {
  224. bool xnet = true;
  225. if (dev)
  226. xnet = !net_eq(dev_net(skb->dev), dev_net(dev));
  227. skb_scrub_packet(skb, xnet);
  228. if (dev)
  229. skb->dev = dev;
  230. }
  231. static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb,
  232. bool local)
  233. {
  234. struct ipvl_dev *ipvlan = addr->master;
  235. struct net_device *dev = ipvlan->dev;
  236. unsigned int len;
  237. rx_handler_result_t ret = RX_HANDLER_CONSUMED;
  238. bool success = false;
  239. struct sk_buff *skb = *pskb;
  240. len = skb->len + ETH_HLEN;
  241. /* Only packets exchanged between two local slaves need to have
  242. * device-up check as well as skb-share check.
  243. */
  244. if (local) {
  245. if (unlikely(!(dev->flags & IFF_UP))) {
  246. kfree_skb(skb);
  247. goto out;
  248. }
  249. skb = skb_share_check(skb, GFP_ATOMIC);
  250. if (!skb)
  251. goto out;
  252. *pskb = skb;
  253. }
  254. ipvlan_skb_crossing_ns(skb, dev);
  255. if (local) {
  256. skb->pkt_type = PACKET_HOST;
  257. if (dev_forward_skb(ipvlan->dev, skb) == NET_RX_SUCCESS)
  258. success = true;
  259. } else {
  260. ret = RX_HANDLER_ANOTHER;
  261. success = true;
  262. }
  263. out:
  264. ipvlan_count_rx(ipvlan, len, success, false);
  265. return ret;
  266. }
  267. static struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port,
  268. void *lyr3h, int addr_type,
  269. bool use_dest)
  270. {
  271. struct ipvl_addr *addr = NULL;
  272. if (addr_type == IPVL_IPV6) {
  273. struct ipv6hdr *ip6h;
  274. struct in6_addr *i6addr;
  275. ip6h = (struct ipv6hdr *)lyr3h;
  276. i6addr = use_dest ? &ip6h->daddr : &ip6h->saddr;
  277. addr = ipvlan_ht_addr_lookup(port, i6addr, true);
  278. } else if (addr_type == IPVL_ICMPV6) {
  279. struct nd_msg *ndmh;
  280. struct in6_addr *i6addr;
  281. /* Make sure that the NeighborSolicitation ICMPv6 packets
  282. * are handled to avoid DAD issue.
  283. */
  284. ndmh = (struct nd_msg *)lyr3h;
  285. if (ndmh->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
  286. i6addr = &ndmh->target;
  287. addr = ipvlan_ht_addr_lookup(port, i6addr, true);
  288. }
  289. } else if (addr_type == IPVL_IPV4) {
  290. struct iphdr *ip4h;
  291. __be32 *i4addr;
  292. ip4h = (struct iphdr *)lyr3h;
  293. i4addr = use_dest ? &ip4h->daddr : &ip4h->saddr;
  294. addr = ipvlan_ht_addr_lookup(port, i4addr, false);
  295. } else if (addr_type == IPVL_ARP) {
  296. struct arphdr *arph;
  297. unsigned char *arp_ptr;
  298. __be32 dip;
  299. arph = (struct arphdr *)lyr3h;
  300. arp_ptr = (unsigned char *)(arph + 1);
  301. if (use_dest)
  302. arp_ptr += (2 * port->dev->addr_len) + 4;
  303. else
  304. arp_ptr += port->dev->addr_len;
  305. memcpy(&dip, arp_ptr, 4);
  306. addr = ipvlan_ht_addr_lookup(port, &dip, false);
  307. }
  308. return addr;
  309. }
  310. static int ipvlan_process_v4_outbound(struct sk_buff *skb)
  311. {
  312. const struct iphdr *ip4h = ip_hdr(skb);
  313. struct net_device *dev = skb->dev;
  314. struct net *net = dev_net(dev);
  315. struct rtable *rt;
  316. int err, ret = NET_XMIT_DROP;
  317. struct flowi4 fl4 = {
  318. .flowi4_oif = dev->ifindex,
  319. .flowi4_tos = RT_TOS(ip4h->tos),
  320. .flowi4_flags = FLOWI_FLAG_ANYSRC,
  321. .daddr = ip4h->daddr,
  322. .saddr = ip4h->saddr,
  323. };
  324. rt = ip_route_output_flow(net, &fl4, NULL);
  325. if (IS_ERR(rt))
  326. goto err;
  327. if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
  328. ip_rt_put(rt);
  329. goto err;
  330. }
  331. skb_dst_set(skb, &rt->dst);
  332. err = ip_local_out(net, skb->sk, skb);
  333. if (unlikely(net_xmit_eval(err)))
  334. dev->stats.tx_errors++;
  335. else
  336. ret = NET_XMIT_SUCCESS;
  337. goto out;
  338. err:
  339. dev->stats.tx_errors++;
  340. kfree_skb(skb);
  341. out:
  342. return ret;
  343. }
  344. static int ipvlan_process_v6_outbound(struct sk_buff *skb)
  345. {
  346. const struct ipv6hdr *ip6h = ipv6_hdr(skb);
  347. struct net_device *dev = skb->dev;
  348. struct net *net = dev_net(dev);
  349. struct dst_entry *dst;
  350. int err, ret = NET_XMIT_DROP;
  351. struct flowi6 fl6 = {
  352. .flowi6_iif = dev->ifindex,
  353. .daddr = ip6h->daddr,
  354. .saddr = ip6h->saddr,
  355. .flowi6_flags = FLOWI_FLAG_ANYSRC,
  356. .flowlabel = ip6_flowinfo(ip6h),
  357. .flowi6_mark = skb->mark,
  358. .flowi6_proto = ip6h->nexthdr,
  359. };
  360. dst = ip6_route_output(net, NULL, &fl6);
  361. if (dst->error) {
  362. ret = dst->error;
  363. dst_release(dst);
  364. goto err;
  365. }
  366. skb_dst_set(skb, dst);
  367. err = ip6_local_out(net, skb->sk, skb);
  368. if (unlikely(net_xmit_eval(err)))
  369. dev->stats.tx_errors++;
  370. else
  371. ret = NET_XMIT_SUCCESS;
  372. goto out;
  373. err:
  374. dev->stats.tx_errors++;
  375. kfree_skb(skb);
  376. out:
  377. return ret;
  378. }
  379. static int ipvlan_process_outbound(struct sk_buff *skb)
  380. {
  381. struct ethhdr *ethh = eth_hdr(skb);
  382. int ret = NET_XMIT_DROP;
  383. /* In this mode we dont care about multicast and broadcast traffic */
  384. if (is_multicast_ether_addr(ethh->h_dest)) {
  385. pr_warn_ratelimited("Dropped {multi|broad}cast of type= [%x]\n",
  386. ntohs(skb->protocol));
  387. kfree_skb(skb);
  388. goto out;
  389. }
  390. /* The ipvlan is a pseudo-L2 device, so the packets that we receive
  391. * will have L2; which need to discarded and processed further
  392. * in the net-ns of the main-device.
  393. */
  394. if (skb_mac_header_was_set(skb)) {
  395. skb_pull(skb, sizeof(*ethh));
  396. skb->mac_header = (typeof(skb->mac_header))~0U;
  397. skb_reset_network_header(skb);
  398. }
  399. if (skb->protocol == htons(ETH_P_IPV6))
  400. ret = ipvlan_process_v6_outbound(skb);
  401. else if (skb->protocol == htons(ETH_P_IP))
  402. ret = ipvlan_process_v4_outbound(skb);
  403. else {
  404. pr_warn_ratelimited("Dropped outbound packet type=%x\n",
  405. ntohs(skb->protocol));
  406. kfree_skb(skb);
  407. }
  408. out:
  409. return ret;
  410. }
  411. static void ipvlan_multicast_enqueue(struct ipvl_port *port,
  412. struct sk_buff *skb, bool tx_pkt)
  413. {
  414. if (skb->protocol == htons(ETH_P_PAUSE)) {
  415. kfree_skb(skb);
  416. return;
  417. }
  418. /* Record that the deferred packet is from TX or RX path. By
  419. * looking at mac-addresses on packet will lead to erronus decisions.
  420. * (This would be true for a loopback-mode on master device or a
  421. * hair-pin mode of the switch.)
  422. */
  423. IPVL_SKB_CB(skb)->tx_pkt = tx_pkt;
  424. spin_lock(&port->backlog.lock);
  425. if (skb_queue_len(&port->backlog) < IPVLAN_QBACKLOG_LIMIT) {
  426. if (skb->dev)
  427. dev_hold(skb->dev);
  428. __skb_queue_tail(&port->backlog, skb);
  429. spin_unlock(&port->backlog.lock);
  430. schedule_work(&port->wq);
  431. } else {
  432. spin_unlock(&port->backlog.lock);
  433. atomic_long_inc(&skb->dev->rx_dropped);
  434. kfree_skb(skb);
  435. }
  436. }
  437. static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev)
  438. {
  439. const struct ipvl_dev *ipvlan = netdev_priv(dev);
  440. void *lyr3h;
  441. struct ipvl_addr *addr;
  442. int addr_type;
  443. lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
  444. if (!lyr3h)
  445. goto out;
  446. addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
  447. if (addr)
  448. return ipvlan_rcv_frame(addr, &skb, true);
  449. out:
  450. ipvlan_skb_crossing_ns(skb, ipvlan->phy_dev);
  451. return ipvlan_process_outbound(skb);
  452. }
  453. static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
  454. {
  455. const struct ipvl_dev *ipvlan = netdev_priv(dev);
  456. struct ethhdr *eth = eth_hdr(skb);
  457. struct ipvl_addr *addr;
  458. void *lyr3h;
  459. int addr_type;
  460. if (ether_addr_equal(eth->h_dest, eth->h_source)) {
  461. lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
  462. if (lyr3h) {
  463. addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
  464. if (addr)
  465. return ipvlan_rcv_frame(addr, &skb, true);
  466. }
  467. skb = skb_share_check(skb, GFP_ATOMIC);
  468. if (!skb)
  469. return NET_XMIT_DROP;
  470. /* Packet definitely does not belong to any of the
  471. * virtual devices, but the dest is local. So forward
  472. * the skb for the main-dev. At the RX side we just return
  473. * RX_PASS for it to be processed further on the stack.
  474. */
  475. return dev_forward_skb(ipvlan->phy_dev, skb);
  476. } else if (is_multicast_ether_addr(eth->h_dest)) {
  477. ipvlan_skb_crossing_ns(skb, NULL);
  478. ipvlan_multicast_enqueue(ipvlan->port, skb, true);
  479. return NET_XMIT_SUCCESS;
  480. }
  481. ipvlan_skb_crossing_ns(skb, ipvlan->phy_dev);
  482. return dev_queue_xmit(skb);
  483. }
  484. int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
  485. {
  486. struct ipvl_dev *ipvlan = netdev_priv(dev);
  487. struct ipvl_port *port = ipvlan_port_get_rcu_bh(ipvlan->phy_dev);
  488. if (!port)
  489. goto out;
  490. if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
  491. goto out;
  492. switch(port->mode) {
  493. case IPVLAN_MODE_L2:
  494. return ipvlan_xmit_mode_l2(skb, dev);
  495. case IPVLAN_MODE_L3:
  496. case IPVLAN_MODE_L3S:
  497. return ipvlan_xmit_mode_l3(skb, dev);
  498. }
  499. /* Should not reach here */
  500. WARN_ONCE(true, "ipvlan_queue_xmit() called for mode = [%hx]\n",
  501. port->mode);
  502. out:
  503. kfree_skb(skb);
  504. return NET_XMIT_DROP;
  505. }
  506. static bool ipvlan_external_frame(struct sk_buff *skb, struct ipvl_port *port)
  507. {
  508. struct ethhdr *eth = eth_hdr(skb);
  509. struct ipvl_addr *addr;
  510. void *lyr3h;
  511. int addr_type;
  512. if (ether_addr_equal(eth->h_source, skb->dev->dev_addr)) {
  513. lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
  514. if (!lyr3h)
  515. return true;
  516. addr = ipvlan_addr_lookup(port, lyr3h, addr_type, false);
  517. if (addr)
  518. return false;
  519. }
  520. return true;
  521. }
  522. static rx_handler_result_t ipvlan_handle_mode_l3(struct sk_buff **pskb,
  523. struct ipvl_port *port)
  524. {
  525. void *lyr3h;
  526. int addr_type;
  527. struct ipvl_addr *addr;
  528. struct sk_buff *skb = *pskb;
  529. rx_handler_result_t ret = RX_HANDLER_PASS;
  530. lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
  531. if (!lyr3h)
  532. goto out;
  533. addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
  534. if (addr)
  535. ret = ipvlan_rcv_frame(addr, pskb, false);
  536. out:
  537. return ret;
  538. }
  539. static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
  540. struct ipvl_port *port)
  541. {
  542. struct sk_buff *skb = *pskb;
  543. struct ethhdr *eth = eth_hdr(skb);
  544. rx_handler_result_t ret = RX_HANDLER_PASS;
  545. void *lyr3h;
  546. int addr_type;
  547. if (is_multicast_ether_addr(eth->h_dest)) {
  548. if (ipvlan_external_frame(skb, port)) {
  549. struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
  550. /* External frames are queued for device local
  551. * distribution, but a copy is given to master
  552. * straight away to avoid sending duplicates later
  553. * when work-queue processes this frame. This is
  554. * achieved by returning RX_HANDLER_PASS.
  555. */
  556. if (nskb) {
  557. ipvlan_skb_crossing_ns(nskb, NULL);
  558. ipvlan_multicast_enqueue(port, nskb, false);
  559. }
  560. }
  561. } else {
  562. struct ipvl_addr *addr;
  563. lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
  564. if (!lyr3h)
  565. return ret;
  566. addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
  567. if (addr)
  568. ret = ipvlan_rcv_frame(addr, pskb, false);
  569. }
  570. return ret;
  571. }
  572. rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb)
  573. {
  574. struct sk_buff *skb = *pskb;
  575. struct ipvl_port *port = ipvlan_port_get_rcu(skb->dev);
  576. if (!port)
  577. return RX_HANDLER_PASS;
  578. switch (port->mode) {
  579. case IPVLAN_MODE_L2:
  580. return ipvlan_handle_mode_l2(pskb, port);
  581. case IPVLAN_MODE_L3:
  582. return ipvlan_handle_mode_l3(pskb, port);
  583. case IPVLAN_MODE_L3S:
  584. return RX_HANDLER_PASS;
  585. }
  586. /* Should not reach here */
  587. WARN_ONCE(true, "ipvlan_handle_frame() called for mode = [%hx]\n",
  588. port->mode);
  589. kfree_skb(skb);
  590. return RX_HANDLER_CONSUMED;
  591. }
  592. static struct ipvl_addr *ipvlan_skb_to_addr(struct sk_buff *skb,
  593. struct net_device *dev)
  594. {
  595. struct ipvl_addr *addr = NULL;
  596. struct ipvl_port *port;
  597. void *lyr3h;
  598. int addr_type;
  599. if (!dev || !netif_is_ipvlan_port(dev))
  600. goto out;
  601. port = ipvlan_port_get_rcu(dev);
  602. if (!port || port->mode != IPVLAN_MODE_L3S)
  603. goto out;
  604. lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
  605. if (!lyr3h)
  606. goto out;
  607. addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
  608. out:
  609. return addr;
  610. }
  611. struct sk_buff *ipvlan_l3_rcv(struct net_device *dev, struct sk_buff *skb,
  612. u16 proto)
  613. {
  614. struct ipvl_addr *addr;
  615. struct net_device *sdev;
  616. addr = ipvlan_skb_to_addr(skb, dev);
  617. if (!addr)
  618. goto out;
  619. sdev = addr->master->dev;
  620. switch (proto) {
  621. case AF_INET:
  622. {
  623. int err;
  624. struct iphdr *ip4h = ip_hdr(skb);
  625. err = ip_route_input_noref(skb, ip4h->daddr, ip4h->saddr,
  626. ip4h->tos, sdev);
  627. if (unlikely(err))
  628. goto out;
  629. break;
  630. }
  631. case AF_INET6:
  632. {
  633. struct dst_entry *dst;
  634. struct ipv6hdr *ip6h = ipv6_hdr(skb);
  635. int flags = RT6_LOOKUP_F_HAS_SADDR;
  636. struct flowi6 fl6 = {
  637. .flowi6_iif = sdev->ifindex,
  638. .daddr = ip6h->daddr,
  639. .saddr = ip6h->saddr,
  640. .flowlabel = ip6_flowinfo(ip6h),
  641. .flowi6_mark = skb->mark,
  642. .flowi6_proto = ip6h->nexthdr,
  643. };
  644. skb_dst_drop(skb);
  645. dst = ip6_route_input_lookup(dev_net(sdev), sdev, &fl6, flags);
  646. skb_dst_set(skb, dst);
  647. break;
  648. }
  649. default:
  650. break;
  651. }
  652. out:
  653. return skb;
  654. }
  655. unsigned int ipvlan_nf_input(void *priv, struct sk_buff *skb,
  656. const struct nf_hook_state *state)
  657. {
  658. struct ipvl_addr *addr;
  659. unsigned int len;
  660. addr = ipvlan_skb_to_addr(skb, skb->dev);
  661. if (!addr)
  662. goto out;
  663. skb->dev = addr->master->dev;
  664. len = skb->len + ETH_HLEN;
  665. ipvlan_count_rx(addr->master, len, true, false);
  666. out:
  667. return NF_ACCEPT;
  668. }