br_forward.c 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282
  1. /*
  2. * Forwarding decision
  3. * Linux ethernet bridge
  4. *
  5. * Authors:
  6. * Lennert Buytenhek <buytenh@gnu.org>
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License
  10. * as published by the Free Software Foundation; either version
  11. * 2 of the License, or (at your option) any later version.
  12. */
  13. #include <linux/err.h>
  14. #include <linux/slab.h>
  15. #include <linux/kernel.h>
  16. #include <linux/netdevice.h>
  17. #include <linux/netpoll.h>
  18. #include <linux/skbuff.h>
  19. #include <linux/if_vlan.h>
  20. #include <linux/netfilter_bridge.h>
  21. #include "br_private.h"
  22. static int deliver_clone(const struct net_bridge_port *prev,
  23. struct sk_buff *skb,
  24. void (*__packet_hook)(const struct net_bridge_port *p,
  25. struct sk_buff *skb));
  26. /* Don't forward packets to originating port or forwarding disabled */
  27. static inline int should_deliver(const struct net_bridge_port *p,
  28. const struct sk_buff *skb)
  29. {
  30. return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
  31. br_allowed_egress(p->br, nbp_get_vlan_info(p), skb) &&
  32. p->state == BR_STATE_FORWARDING;
  33. }
  34. int br_dev_queue_push_xmit(struct sk_buff *skb)
  35. {
  36. /* ip_fragment doesn't copy the MAC header */
  37. if (nf_bridge_maybe_copy_header(skb) ||
  38. !is_skb_forwardable(skb->dev, skb)) {
  39. kfree_skb(skb);
  40. } else {
  41. skb_push(skb, ETH_HLEN);
  42. br_drop_fake_rtable(skb);
  43. dev_queue_xmit(skb);
  44. }
  45. return 0;
  46. }
  47. EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit);
  48. int br_forward_finish(struct sk_buff *skb)
  49. {
  50. return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev,
  51. br_dev_queue_push_xmit);
  52. }
  53. EXPORT_SYMBOL_GPL(br_forward_finish);
  54. static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
  55. {
  56. skb = br_handle_vlan(to->br, nbp_get_vlan_info(to), skb);
  57. if (!skb)
  58. return;
  59. skb->dev = to->dev;
  60. if (unlikely(netpoll_tx_running(to->br->dev))) {
  61. if (!is_skb_forwardable(skb->dev, skb))
  62. kfree_skb(skb);
  63. else {
  64. skb_push(skb, ETH_HLEN);
  65. br_netpoll_send_skb(to, skb);
  66. }
  67. return;
  68. }
  69. NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
  70. br_forward_finish);
  71. }
  72. static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
  73. {
  74. struct net_device *indev;
  75. if (skb_warn_if_lro(skb)) {
  76. kfree_skb(skb);
  77. return;
  78. }
  79. skb = br_handle_vlan(to->br, nbp_get_vlan_info(to), skb);
  80. if (!skb)
  81. return;
  82. indev = skb->dev;
  83. skb->dev = to->dev;
  84. skb_forward_csum(skb);
  85. NF_HOOK(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev,
  86. br_forward_finish);
  87. }
  88. /* called with rcu_read_lock */
  89. void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
  90. {
  91. if (to && should_deliver(to, skb)) {
  92. __br_deliver(to, skb);
  93. return;
  94. }
  95. kfree_skb(skb);
  96. }
  97. /* called with rcu_read_lock */
  98. void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0)
  99. {
  100. if (should_deliver(to, skb)) {
  101. if (skb0)
  102. deliver_clone(to, skb, __br_forward);
  103. else
  104. __br_forward(to, skb);
  105. return;
  106. }
  107. if (!skb0)
  108. kfree_skb(skb);
  109. }
  110. static int deliver_clone(const struct net_bridge_port *prev,
  111. struct sk_buff *skb,
  112. void (*__packet_hook)(const struct net_bridge_port *p,
  113. struct sk_buff *skb))
  114. {
  115. struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
  116. skb = skb_clone(skb, GFP_ATOMIC);
  117. if (!skb) {
  118. dev->stats.tx_dropped++;
  119. return -ENOMEM;
  120. }
  121. __packet_hook(prev, skb);
  122. return 0;
  123. }
  124. static struct net_bridge_port *maybe_deliver(
  125. struct net_bridge_port *prev, struct net_bridge_port *p,
  126. struct sk_buff *skb,
  127. void (*__packet_hook)(const struct net_bridge_port *p,
  128. struct sk_buff *skb))
  129. {
  130. int err;
  131. if (!should_deliver(p, skb))
  132. return prev;
  133. if (!prev)
  134. goto out;
  135. err = deliver_clone(prev, skb, __packet_hook);
  136. if (err)
  137. return ERR_PTR(err);
  138. out:
  139. return p;
  140. }
  141. /* called under bridge lock */
  142. static void br_flood(struct net_bridge *br, struct sk_buff *skb,
  143. struct sk_buff *skb0,
  144. void (*__packet_hook)(const struct net_bridge_port *p,
  145. struct sk_buff *skb),
  146. bool unicast)
  147. {
  148. struct net_bridge_port *p;
  149. struct net_bridge_port *prev;
  150. prev = NULL;
  151. list_for_each_entry_rcu(p, &br->port_list, list) {
  152. /* Do not flood unicast traffic to ports that turn it off */
  153. if (unicast && !(p->flags & BR_FLOOD))
  154. continue;
  155. prev = maybe_deliver(prev, p, skb, __packet_hook);
  156. if (IS_ERR(prev))
  157. goto out;
  158. }
  159. if (!prev)
  160. goto out;
  161. if (skb0)
  162. deliver_clone(prev, skb, __packet_hook);
  163. else
  164. __packet_hook(prev, skb);
  165. return;
  166. out:
  167. if (!skb0)
  168. kfree_skb(skb);
  169. }
  170. /* called with rcu_read_lock */
  171. void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb, bool unicast)
  172. {
  173. br_flood(br, skb, NULL, __br_deliver, unicast);
  174. }
  175. /* called under bridge lock */
  176. void br_flood_forward(struct net_bridge *br, struct sk_buff *skb,
  177. struct sk_buff *skb2, bool unicast)
  178. {
  179. br_flood(br, skb, skb2, __br_forward, unicast);
  180. }
  181. #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
  182. /* called with rcu_read_lock */
  183. static void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
  184. struct sk_buff *skb, struct sk_buff *skb0,
  185. void (*__packet_hook)(
  186. const struct net_bridge_port *p,
  187. struct sk_buff *skb))
  188. {
  189. struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
  190. struct net_bridge *br = netdev_priv(dev);
  191. struct net_bridge_port *prev = NULL;
  192. struct net_bridge_port_group *p;
  193. struct hlist_node *rp;
  194. rp = rcu_dereference(hlist_first_rcu(&br->router_list));
  195. p = mdst ? rcu_dereference(mdst->ports) : NULL;
  196. while (p || rp) {
  197. struct net_bridge_port *port, *lport, *rport;
  198. lport = p ? p->port : NULL;
  199. rport = rp ? hlist_entry(rp, struct net_bridge_port, rlist) :
  200. NULL;
  201. port = (unsigned long)lport > (unsigned long)rport ?
  202. lport : rport;
  203. prev = maybe_deliver(prev, port, skb, __packet_hook);
  204. if (IS_ERR(prev))
  205. goto out;
  206. if ((unsigned long)lport >= (unsigned long)port)
  207. p = rcu_dereference(p->next);
  208. if ((unsigned long)rport >= (unsigned long)port)
  209. rp = rcu_dereference(hlist_next_rcu(rp));
  210. }
  211. if (!prev)
  212. goto out;
  213. if (skb0)
  214. deliver_clone(prev, skb, __packet_hook);
  215. else
  216. __packet_hook(prev, skb);
  217. return;
  218. out:
  219. if (!skb0)
  220. kfree_skb(skb);
  221. }
  222. /* called with rcu_read_lock */
  223. void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
  224. struct sk_buff *skb)
  225. {
  226. br_multicast_flood(mdst, skb, NULL, __br_deliver);
  227. }
  228. /* called with rcu_read_lock */
  229. void br_multicast_forward(struct net_bridge_mdb_entry *mdst,
  230. struct sk_buff *skb, struct sk_buff *skb2)
  231. {
  232. br_multicast_flood(mdst, skb, skb2, __br_forward);
  233. }
  234. #endif