br_vlan.c 25 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105
  1. #include <linux/kernel.h>
  2. #include <linux/netdevice.h>
  3. #include <linux/rtnetlink.h>
  4. #include <linux/slab.h>
  5. #include <net/switchdev.h>
  6. #include "br_private.h"
  7. #include "br_private_tunnel.h"
  8. static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
  9. const void *ptr)
  10. {
  11. const struct net_bridge_vlan *vle = ptr;
  12. u16 vid = *(u16 *)arg->key;
  13. return vle->vid != vid;
  14. }
  15. static const struct rhashtable_params br_vlan_rht_params = {
  16. .head_offset = offsetof(struct net_bridge_vlan, vnode),
  17. .key_offset = offsetof(struct net_bridge_vlan, vid),
  18. .key_len = sizeof(u16),
  19. .nelem_hint = 3,
  20. .locks_mul = 1,
  21. .max_size = VLAN_N_VID,
  22. .obj_cmpfn = br_vlan_cmp,
  23. .automatic_shrinking = true,
  24. };
  25. static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
  26. {
  27. return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
  28. }
  29. static void __vlan_add_pvid(struct net_bridge_vlan_group *vg, u16 vid)
  30. {
  31. if (vg->pvid == vid)
  32. return;
  33. smp_wmb();
  34. vg->pvid = vid;
  35. }
  36. static void __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
  37. {
  38. if (vg->pvid != vid)
  39. return;
  40. smp_wmb();
  41. vg->pvid = 0;
  42. }
  43. static void __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
  44. {
  45. struct net_bridge_vlan_group *vg;
  46. if (br_vlan_is_master(v))
  47. vg = br_vlan_group(v->br);
  48. else
  49. vg = nbp_vlan_group(v->port);
  50. if (flags & BRIDGE_VLAN_INFO_PVID)
  51. __vlan_add_pvid(vg, v->vid);
  52. else
  53. __vlan_delete_pvid(vg, v->vid);
  54. if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
  55. v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
  56. else
  57. v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
  58. }
  59. static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
  60. u16 vid, u16 flags)
  61. {
  62. struct switchdev_obj_port_vlan v = {
  63. .obj.orig_dev = dev,
  64. .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
  65. .flags = flags,
  66. .vid_begin = vid,
  67. .vid_end = vid,
  68. };
  69. int err;
  70. /* Try switchdev op first. In case it is not supported, fallback to
  71. * 8021q add.
  72. */
  73. err = switchdev_port_obj_add(dev, &v.obj);
  74. if (err == -EOPNOTSUPP)
  75. return vlan_vid_add(dev, br->vlan_proto, vid);
  76. return err;
  77. }
  78. static void __vlan_add_list(struct net_bridge_vlan *v)
  79. {
  80. struct net_bridge_vlan_group *vg;
  81. struct list_head *headp, *hpos;
  82. struct net_bridge_vlan *vent;
  83. if (br_vlan_is_master(v))
  84. vg = br_vlan_group(v->br);
  85. else
  86. vg = nbp_vlan_group(v->port);
  87. headp = &vg->vlan_list;
  88. list_for_each_prev(hpos, headp) {
  89. vent = list_entry(hpos, struct net_bridge_vlan, vlist);
  90. if (v->vid < vent->vid)
  91. continue;
  92. else
  93. break;
  94. }
  95. list_add_rcu(&v->vlist, hpos);
  96. }
  97. static void __vlan_del_list(struct net_bridge_vlan *v)
  98. {
  99. list_del_rcu(&v->vlist);
  100. }
  101. static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
  102. u16 vid)
  103. {
  104. struct switchdev_obj_port_vlan v = {
  105. .obj.orig_dev = dev,
  106. .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
  107. .vid_begin = vid,
  108. .vid_end = vid,
  109. };
  110. int err;
  111. /* Try switchdev op first. In case it is not supported, fallback to
  112. * 8021q del.
  113. */
  114. err = switchdev_port_obj_del(dev, &v.obj);
  115. if (err == -EOPNOTSUPP) {
  116. vlan_vid_del(dev, br->vlan_proto, vid);
  117. return 0;
  118. }
  119. return err;
  120. }
  121. /* Returns a master vlan, if it didn't exist it gets created. In all cases a
  122. * a reference is taken to the master vlan before returning.
  123. */
  124. static struct net_bridge_vlan *br_vlan_get_master(struct net_bridge *br, u16 vid)
  125. {
  126. struct net_bridge_vlan_group *vg;
  127. struct net_bridge_vlan *masterv;
  128. vg = br_vlan_group(br);
  129. masterv = br_vlan_find(vg, vid);
  130. if (!masterv) {
  131. /* missing global ctx, create it now */
  132. if (br_vlan_add(br, vid, 0))
  133. return NULL;
  134. masterv = br_vlan_find(vg, vid);
  135. if (WARN_ON(!masterv))
  136. return NULL;
  137. }
  138. atomic_inc(&masterv->refcnt);
  139. return masterv;
  140. }
  141. static void br_master_vlan_rcu_free(struct rcu_head *rcu)
  142. {
  143. struct net_bridge_vlan *v;
  144. v = container_of(rcu, struct net_bridge_vlan, rcu);
  145. WARN_ON(!br_vlan_is_master(v));
  146. free_percpu(v->stats);
  147. v->stats = NULL;
  148. kfree(v);
  149. }
  150. static void br_vlan_put_master(struct net_bridge_vlan *masterv)
  151. {
  152. struct net_bridge_vlan_group *vg;
  153. if (!br_vlan_is_master(masterv))
  154. return;
  155. vg = br_vlan_group(masterv->br);
  156. if (atomic_dec_and_test(&masterv->refcnt)) {
  157. rhashtable_remove_fast(&vg->vlan_hash,
  158. &masterv->vnode, br_vlan_rht_params);
  159. __vlan_del_list(masterv);
  160. call_rcu(&masterv->rcu, br_master_vlan_rcu_free);
  161. }
  162. }
  163. /* This is the shared VLAN add function which works for both ports and bridge
  164. * devices. There are four possible calls to this function in terms of the
  165. * vlan entry type:
  166. * 1. vlan is being added on a port (no master flags, global entry exists)
  167. * 2. vlan is being added on a bridge (both master and brentry flags)
  168. * 3. vlan is being added on a port, but a global entry didn't exist which
  169. * is being created right now (master flag set, brentry flag unset), the
  170. * global entry is used for global per-vlan features, but not for filtering
  171. * 4. same as 3 but with both master and brentry flags set so the entry
  172. * will be used for filtering in both the port and the bridge
  173. */
  174. static int __vlan_add(struct net_bridge_vlan *v, u16 flags)
  175. {
  176. struct net_bridge_vlan *masterv = NULL;
  177. struct net_bridge_port *p = NULL;
  178. struct net_bridge_vlan_group *vg;
  179. struct net_device *dev;
  180. struct net_bridge *br;
  181. int err;
  182. if (br_vlan_is_master(v)) {
  183. br = v->br;
  184. dev = br->dev;
  185. vg = br_vlan_group(br);
  186. } else {
  187. p = v->port;
  188. br = p->br;
  189. dev = p->dev;
  190. vg = nbp_vlan_group(p);
  191. }
  192. if (p) {
  193. /* Add VLAN to the device filter if it is supported.
  194. * This ensures tagged traffic enters the bridge when
  195. * promiscuous mode is disabled by br_manage_promisc().
  196. */
  197. err = __vlan_vid_add(dev, br, v->vid, flags);
  198. if (err)
  199. goto out;
  200. /* need to work on the master vlan too */
  201. if (flags & BRIDGE_VLAN_INFO_MASTER) {
  202. err = br_vlan_add(br, v->vid, flags |
  203. BRIDGE_VLAN_INFO_BRENTRY);
  204. if (err)
  205. goto out_filt;
  206. }
  207. masterv = br_vlan_get_master(br, v->vid);
  208. if (!masterv)
  209. goto out_filt;
  210. v->brvlan = masterv;
  211. v->stats = masterv->stats;
  212. }
  213. /* Add the dev mac and count the vlan only if it's usable */
  214. if (br_vlan_should_use(v)) {
  215. err = br_fdb_insert(br, p, dev->dev_addr, v->vid);
  216. if (err) {
  217. br_err(br, "failed insert local address into bridge forwarding table\n");
  218. goto out_filt;
  219. }
  220. vg->num_vlans++;
  221. }
  222. err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
  223. br_vlan_rht_params);
  224. if (err)
  225. goto out_fdb_insert;
  226. __vlan_add_list(v);
  227. __vlan_add_flags(v, flags);
  228. out:
  229. return err;
  230. out_fdb_insert:
  231. if (br_vlan_should_use(v)) {
  232. br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
  233. vg->num_vlans--;
  234. }
  235. out_filt:
  236. if (p) {
  237. __vlan_vid_del(dev, br, v->vid);
  238. if (masterv) {
  239. br_vlan_put_master(masterv);
  240. v->brvlan = NULL;
  241. }
  242. }
  243. goto out;
  244. }
  245. static int __vlan_del(struct net_bridge_vlan *v)
  246. {
  247. struct net_bridge_vlan *masterv = v;
  248. struct net_bridge_vlan_group *vg;
  249. struct net_bridge_port *p = NULL;
  250. int err = 0;
  251. if (br_vlan_is_master(v)) {
  252. vg = br_vlan_group(v->br);
  253. } else {
  254. p = v->port;
  255. vg = nbp_vlan_group(v->port);
  256. masterv = v->brvlan;
  257. }
  258. __vlan_delete_pvid(vg, v->vid);
  259. if (p) {
  260. err = __vlan_vid_del(p->dev, p->br, v->vid);
  261. if (err)
  262. goto out;
  263. }
  264. if (br_vlan_should_use(v)) {
  265. v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
  266. vg->num_vlans--;
  267. }
  268. if (masterv != v) {
  269. vlan_tunnel_info_del(vg, v);
  270. rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
  271. br_vlan_rht_params);
  272. __vlan_del_list(v);
  273. kfree_rcu(v, rcu);
  274. }
  275. br_vlan_put_master(masterv);
  276. out:
  277. return err;
  278. }
  279. static void __vlan_group_free(struct net_bridge_vlan_group *vg)
  280. {
  281. WARN_ON(!list_empty(&vg->vlan_list));
  282. rhashtable_destroy(&vg->vlan_hash);
  283. vlan_tunnel_deinit(vg);
  284. kfree(vg);
  285. }
  286. static void __vlan_flush(struct net_bridge_vlan_group *vg)
  287. {
  288. struct net_bridge_vlan *vlan, *tmp;
  289. __vlan_delete_pvid(vg, vg->pvid);
  290. list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist)
  291. __vlan_del(vlan);
  292. }
  293. struct sk_buff *br_handle_vlan(struct net_bridge *br,
  294. const struct net_bridge_port *p,
  295. struct net_bridge_vlan_group *vg,
  296. struct sk_buff *skb)
  297. {
  298. struct br_vlan_stats *stats;
  299. struct net_bridge_vlan *v;
  300. u16 vid;
  301. /* If this packet was not filtered at input, let it pass */
  302. if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
  303. goto out;
  304. /* At this point, we know that the frame was filtered and contains
  305. * a valid vlan id. If the vlan id has untagged flag set,
  306. * send untagged; otherwise, send tagged.
  307. */
  308. br_vlan_get_tag(skb, &vid);
  309. v = br_vlan_find(vg, vid);
  310. /* Vlan entry must be configured at this point. The
  311. * only exception is the bridge is set in promisc mode and the
  312. * packet is destined for the bridge device. In this case
  313. * pass the packet as is.
  314. */
  315. if (!v || !br_vlan_should_use(v)) {
  316. if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
  317. goto out;
  318. } else {
  319. kfree_skb(skb);
  320. return NULL;
  321. }
  322. }
  323. if (br->vlan_stats_enabled) {
  324. stats = this_cpu_ptr(v->stats);
  325. u64_stats_update_begin(&stats->syncp);
  326. stats->tx_bytes += skb->len;
  327. stats->tx_packets++;
  328. u64_stats_update_end(&stats->syncp);
  329. }
  330. if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
  331. skb->vlan_tci = 0;
  332. if (p && (p->flags & BR_VLAN_TUNNEL) &&
  333. br_handle_egress_vlan_tunnel(skb, v)) {
  334. kfree_skb(skb);
  335. return NULL;
  336. }
  337. out:
  338. return skb;
  339. }
  340. /* Called under RCU */
  341. static bool __allowed_ingress(const struct net_bridge *br,
  342. struct net_bridge_vlan_group *vg,
  343. struct sk_buff *skb, u16 *vid)
  344. {
  345. struct br_vlan_stats *stats;
  346. struct net_bridge_vlan *v;
  347. bool tagged;
  348. BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
  349. /* If vlan tx offload is disabled on bridge device and frame was
  350. * sent from vlan device on the bridge device, it does not have
  351. * HW accelerated vlan tag.
  352. */
  353. if (unlikely(!skb_vlan_tag_present(skb) &&
  354. skb->protocol == br->vlan_proto)) {
  355. skb = skb_vlan_untag(skb);
  356. if (unlikely(!skb))
  357. return false;
  358. }
  359. if (!br_vlan_get_tag(skb, vid)) {
  360. /* Tagged frame */
  361. if (skb->vlan_proto != br->vlan_proto) {
  362. /* Protocol-mismatch, empty out vlan_tci for new tag */
  363. skb_push(skb, ETH_HLEN);
  364. skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
  365. skb_vlan_tag_get(skb));
  366. if (unlikely(!skb))
  367. return false;
  368. skb_pull(skb, ETH_HLEN);
  369. skb_reset_mac_len(skb);
  370. *vid = 0;
  371. tagged = false;
  372. } else {
  373. tagged = true;
  374. }
  375. } else {
  376. /* Untagged frame */
  377. tagged = false;
  378. }
  379. if (!*vid) {
  380. u16 pvid = br_get_pvid(vg);
  381. /* Frame had a tag with VID 0 or did not have a tag.
  382. * See if pvid is set on this port. That tells us which
  383. * vlan untagged or priority-tagged traffic belongs to.
  384. */
  385. if (!pvid)
  386. goto drop;
  387. /* PVID is set on this port. Any untagged or priority-tagged
  388. * ingress frame is considered to belong to this vlan.
  389. */
  390. *vid = pvid;
  391. if (likely(!tagged))
  392. /* Untagged Frame. */
  393. __vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid);
  394. else
  395. /* Priority-tagged Frame.
  396. * At this point, We know that skb->vlan_tci had
  397. * VLAN_TAG_PRESENT bit and its VID field was 0x000.
  398. * We update only VID field and preserve PCP field.
  399. */
  400. skb->vlan_tci |= pvid;
  401. /* if stats are disabled we can avoid the lookup */
  402. if (!br->vlan_stats_enabled)
  403. return true;
  404. }
  405. v = br_vlan_find(vg, *vid);
  406. if (!v || !br_vlan_should_use(v))
  407. goto drop;
  408. if (br->vlan_stats_enabled) {
  409. stats = this_cpu_ptr(v->stats);
  410. u64_stats_update_begin(&stats->syncp);
  411. stats->rx_bytes += skb->len;
  412. stats->rx_packets++;
  413. u64_stats_update_end(&stats->syncp);
  414. }
  415. return true;
  416. drop:
  417. kfree_skb(skb);
  418. return false;
  419. }
  420. bool br_allowed_ingress(const struct net_bridge *br,
  421. struct net_bridge_vlan_group *vg, struct sk_buff *skb,
  422. u16 *vid)
  423. {
  424. /* If VLAN filtering is disabled on the bridge, all packets are
  425. * permitted.
  426. */
  427. if (!br->vlan_enabled) {
  428. BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
  429. return true;
  430. }
  431. return __allowed_ingress(br, vg, skb, vid);
  432. }
  433. /* Called under RCU. */
  434. bool br_allowed_egress(struct net_bridge_vlan_group *vg,
  435. const struct sk_buff *skb)
  436. {
  437. const struct net_bridge_vlan *v;
  438. u16 vid;
  439. /* If this packet was not filtered at input, let it pass */
  440. if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
  441. return true;
  442. br_vlan_get_tag(skb, &vid);
  443. v = br_vlan_find(vg, vid);
  444. if (v && br_vlan_should_use(v))
  445. return true;
  446. return false;
  447. }
  448. /* Called under RCU */
  449. bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
  450. {
  451. struct net_bridge_vlan_group *vg;
  452. struct net_bridge *br = p->br;
  453. /* If filtering was disabled at input, let it pass. */
  454. if (!br->vlan_enabled)
  455. return true;
  456. vg = nbp_vlan_group_rcu(p);
  457. if (!vg || !vg->num_vlans)
  458. return false;
  459. if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
  460. *vid = 0;
  461. if (!*vid) {
  462. *vid = br_get_pvid(vg);
  463. if (!*vid)
  464. return false;
  465. return true;
  466. }
  467. if (br_vlan_find(vg, *vid))
  468. return true;
  469. return false;
  470. }
  471. /* Must be protected by RTNL.
  472. * Must be called with vid in range from 1 to 4094 inclusive.
  473. */
  474. int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags)
  475. {
  476. struct net_bridge_vlan_group *vg;
  477. struct net_bridge_vlan *vlan;
  478. int ret;
  479. ASSERT_RTNL();
  480. vg = br_vlan_group(br);
  481. vlan = br_vlan_find(vg, vid);
  482. if (vlan) {
  483. if (!br_vlan_is_brentry(vlan)) {
  484. /* Trying to change flags of non-existent bridge vlan */
  485. if (!(flags & BRIDGE_VLAN_INFO_BRENTRY))
  486. return -EINVAL;
  487. /* It was only kept for port vlans, now make it real */
  488. ret = br_fdb_insert(br, NULL, br->dev->dev_addr,
  489. vlan->vid);
  490. if (ret) {
  491. br_err(br, "failed insert local address into bridge forwarding table\n");
  492. return ret;
  493. }
  494. atomic_inc(&vlan->refcnt);
  495. vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
  496. vg->num_vlans++;
  497. }
  498. __vlan_add_flags(vlan, flags);
  499. return 0;
  500. }
  501. vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
  502. if (!vlan)
  503. return -ENOMEM;
  504. vlan->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
  505. if (!vlan->stats) {
  506. kfree(vlan);
  507. return -ENOMEM;
  508. }
  509. vlan->vid = vid;
  510. vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
  511. vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
  512. vlan->br = br;
  513. if (flags & BRIDGE_VLAN_INFO_BRENTRY)
  514. atomic_set(&vlan->refcnt, 1);
  515. ret = __vlan_add(vlan, flags);
  516. if (ret) {
  517. free_percpu(vlan->stats);
  518. kfree(vlan);
  519. }
  520. return ret;
  521. }
  522. /* Must be protected by RTNL.
  523. * Must be called with vid in range from 1 to 4094 inclusive.
  524. */
  525. int br_vlan_delete(struct net_bridge *br, u16 vid)
  526. {
  527. struct net_bridge_vlan_group *vg;
  528. struct net_bridge_vlan *v;
  529. ASSERT_RTNL();
  530. vg = br_vlan_group(br);
  531. v = br_vlan_find(vg, vid);
  532. if (!v || !br_vlan_is_brentry(v))
  533. return -ENOENT;
  534. br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
  535. br_fdb_delete_by_port(br, NULL, vid, 0);
  536. vlan_tunnel_info_del(vg, v);
  537. return __vlan_del(v);
  538. }
  539. void br_vlan_flush(struct net_bridge *br)
  540. {
  541. struct net_bridge_vlan_group *vg;
  542. ASSERT_RTNL();
  543. vg = br_vlan_group(br);
  544. __vlan_flush(vg);
  545. RCU_INIT_POINTER(br->vlgrp, NULL);
  546. synchronize_rcu();
  547. __vlan_group_free(vg);
  548. }
  549. struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
  550. {
  551. if (!vg)
  552. return NULL;
  553. return br_vlan_lookup(&vg->vlan_hash, vid);
  554. }
  555. /* Must be protected by RTNL. */
  556. static void recalculate_group_addr(struct net_bridge *br)
  557. {
  558. if (br->group_addr_set)
  559. return;
  560. spin_lock_bh(&br->lock);
  561. if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q)) {
  562. /* Bridge Group Address */
  563. br->group_addr[5] = 0x00;
  564. } else { /* vlan_enabled && ETH_P_8021AD */
  565. /* Provider Bridge Group Address */
  566. br->group_addr[5] = 0x08;
  567. }
  568. spin_unlock_bh(&br->lock);
  569. }
  570. /* Must be protected by RTNL. */
  571. void br_recalculate_fwd_mask(struct net_bridge *br)
  572. {
  573. if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q))
  574. br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
  575. else /* vlan_enabled && ETH_P_8021AD */
  576. br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
  577. ~(1u << br->group_addr[5]);
  578. }
  579. int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
  580. {
  581. struct switchdev_attr attr = {
  582. .orig_dev = br->dev,
  583. .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
  584. .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
  585. .u.vlan_filtering = val,
  586. };
  587. int err;
  588. if (br->vlan_enabled == val)
  589. return 0;
  590. err = switchdev_port_attr_set(br->dev, &attr);
  591. if (err && err != -EOPNOTSUPP)
  592. return err;
  593. br->vlan_enabled = val;
  594. br_manage_promisc(br);
  595. recalculate_group_addr(br);
  596. br_recalculate_fwd_mask(br);
  597. return 0;
  598. }
  599. int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
  600. {
  601. return __br_vlan_filter_toggle(br, val);
  602. }
  603. int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
  604. {
  605. int err = 0;
  606. struct net_bridge_port *p;
  607. struct net_bridge_vlan *vlan;
  608. struct net_bridge_vlan_group *vg;
  609. __be16 oldproto;
  610. if (br->vlan_proto == proto)
  611. return 0;
  612. /* Add VLANs for the new proto to the device filter. */
  613. list_for_each_entry(p, &br->port_list, list) {
  614. vg = nbp_vlan_group(p);
  615. list_for_each_entry(vlan, &vg->vlan_list, vlist) {
  616. err = vlan_vid_add(p->dev, proto, vlan->vid);
  617. if (err)
  618. goto err_filt;
  619. }
  620. }
  621. oldproto = br->vlan_proto;
  622. br->vlan_proto = proto;
  623. recalculate_group_addr(br);
  624. br_recalculate_fwd_mask(br);
  625. /* Delete VLANs for the old proto from the device filter. */
  626. list_for_each_entry(p, &br->port_list, list) {
  627. vg = nbp_vlan_group(p);
  628. list_for_each_entry(vlan, &vg->vlan_list, vlist)
  629. vlan_vid_del(p->dev, oldproto, vlan->vid);
  630. }
  631. return 0;
  632. err_filt:
  633. list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist)
  634. vlan_vid_del(p->dev, proto, vlan->vid);
  635. list_for_each_entry_continue_reverse(p, &br->port_list, list) {
  636. vg = nbp_vlan_group(p);
  637. list_for_each_entry(vlan, &vg->vlan_list, vlist)
  638. vlan_vid_del(p->dev, proto, vlan->vid);
  639. }
  640. return err;
  641. }
  642. int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
  643. {
  644. if (val != ETH_P_8021Q && val != ETH_P_8021AD)
  645. return -EPROTONOSUPPORT;
  646. return __br_vlan_set_proto(br, htons(val));
  647. }
  648. int br_vlan_set_stats(struct net_bridge *br, unsigned long val)
  649. {
  650. switch (val) {
  651. case 0:
  652. case 1:
  653. br->vlan_stats_enabled = val;
  654. break;
  655. default:
  656. return -EINVAL;
  657. }
  658. return 0;
  659. }
  660. static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
  661. {
  662. struct net_bridge_vlan *v;
  663. if (vid != vg->pvid)
  664. return false;
  665. v = br_vlan_lookup(&vg->vlan_hash, vid);
  666. if (v && br_vlan_should_use(v) &&
  667. (v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
  668. return true;
  669. return false;
  670. }
  671. static void br_vlan_disable_default_pvid(struct net_bridge *br)
  672. {
  673. struct net_bridge_port *p;
  674. u16 pvid = br->default_pvid;
  675. /* Disable default_pvid on all ports where it is still
  676. * configured.
  677. */
  678. if (vlan_default_pvid(br_vlan_group(br), pvid))
  679. br_vlan_delete(br, pvid);
  680. list_for_each_entry(p, &br->port_list, list) {
  681. if (vlan_default_pvid(nbp_vlan_group(p), pvid))
  682. nbp_vlan_delete(p, pvid);
  683. }
  684. br->default_pvid = 0;
  685. }
  686. int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid)
  687. {
  688. const struct net_bridge_vlan *pvent;
  689. struct net_bridge_vlan_group *vg;
  690. struct net_bridge_port *p;
  691. u16 old_pvid;
  692. int err = 0;
  693. unsigned long *changed;
  694. if (!pvid) {
  695. br_vlan_disable_default_pvid(br);
  696. return 0;
  697. }
  698. changed = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long),
  699. GFP_KERNEL);
  700. if (!changed)
  701. return -ENOMEM;
  702. old_pvid = br->default_pvid;
  703. /* Update default_pvid config only if we do not conflict with
  704. * user configuration.
  705. */
  706. vg = br_vlan_group(br);
  707. pvent = br_vlan_find(vg, pvid);
  708. if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) &&
  709. (!pvent || !br_vlan_should_use(pvent))) {
  710. err = br_vlan_add(br, pvid,
  711. BRIDGE_VLAN_INFO_PVID |
  712. BRIDGE_VLAN_INFO_UNTAGGED |
  713. BRIDGE_VLAN_INFO_BRENTRY);
  714. if (err)
  715. goto out;
  716. br_vlan_delete(br, old_pvid);
  717. set_bit(0, changed);
  718. }
  719. list_for_each_entry(p, &br->port_list, list) {
  720. /* Update default_pvid config only if we do not conflict with
  721. * user configuration.
  722. */
  723. vg = nbp_vlan_group(p);
  724. if ((old_pvid &&
  725. !vlan_default_pvid(vg, old_pvid)) ||
  726. br_vlan_find(vg, pvid))
  727. continue;
  728. err = nbp_vlan_add(p, pvid,
  729. BRIDGE_VLAN_INFO_PVID |
  730. BRIDGE_VLAN_INFO_UNTAGGED);
  731. if (err)
  732. goto err_port;
  733. nbp_vlan_delete(p, old_pvid);
  734. set_bit(p->port_no, changed);
  735. }
  736. br->default_pvid = pvid;
  737. out:
  738. kfree(changed);
  739. return err;
  740. err_port:
  741. list_for_each_entry_continue_reverse(p, &br->port_list, list) {
  742. if (!test_bit(p->port_no, changed))
  743. continue;
  744. if (old_pvid)
  745. nbp_vlan_add(p, old_pvid,
  746. BRIDGE_VLAN_INFO_PVID |
  747. BRIDGE_VLAN_INFO_UNTAGGED);
  748. nbp_vlan_delete(p, pvid);
  749. }
  750. if (test_bit(0, changed)) {
  751. if (old_pvid)
  752. br_vlan_add(br, old_pvid,
  753. BRIDGE_VLAN_INFO_PVID |
  754. BRIDGE_VLAN_INFO_UNTAGGED |
  755. BRIDGE_VLAN_INFO_BRENTRY);
  756. br_vlan_delete(br, pvid);
  757. }
  758. goto out;
  759. }
  760. int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val)
  761. {
  762. u16 pvid = val;
  763. int err = 0;
  764. if (val >= VLAN_VID_MASK)
  765. return -EINVAL;
  766. if (pvid == br->default_pvid)
  767. goto out;
  768. /* Only allow default pvid change when filtering is disabled */
  769. if (br->vlan_enabled) {
  770. pr_info_once("Please disable vlan filtering to change default_pvid\n");
  771. err = -EPERM;
  772. goto out;
  773. }
  774. err = __br_vlan_set_default_pvid(br, pvid);
  775. out:
  776. return err;
  777. }
  778. int br_vlan_init(struct net_bridge *br)
  779. {
  780. struct net_bridge_vlan_group *vg;
  781. int ret = -ENOMEM;
  782. vg = kzalloc(sizeof(*vg), GFP_KERNEL);
  783. if (!vg)
  784. goto out;
  785. ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
  786. if (ret)
  787. goto err_rhtbl;
  788. ret = vlan_tunnel_init(vg);
  789. if (ret)
  790. goto err_tunnel_init;
  791. INIT_LIST_HEAD(&vg->vlan_list);
  792. br->vlan_proto = htons(ETH_P_8021Q);
  793. br->default_pvid = 1;
  794. rcu_assign_pointer(br->vlgrp, vg);
  795. ret = br_vlan_add(br, 1,
  796. BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED |
  797. BRIDGE_VLAN_INFO_BRENTRY);
  798. if (ret)
  799. goto err_vlan_add;
  800. out:
  801. return ret;
  802. err_vlan_add:
  803. vlan_tunnel_deinit(vg);
  804. err_tunnel_init:
  805. rhashtable_destroy(&vg->vlan_hash);
  806. err_rhtbl:
  807. kfree(vg);
  808. goto out;
  809. }
  810. int nbp_vlan_init(struct net_bridge_port *p)
  811. {
  812. struct switchdev_attr attr = {
  813. .orig_dev = p->br->dev,
  814. .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
  815. .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
  816. .u.vlan_filtering = p->br->vlan_enabled,
  817. };
  818. struct net_bridge_vlan_group *vg;
  819. int ret = -ENOMEM;
  820. vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
  821. if (!vg)
  822. goto out;
  823. ret = switchdev_port_attr_set(p->dev, &attr);
  824. if (ret && ret != -EOPNOTSUPP)
  825. goto err_vlan_enabled;
  826. ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
  827. if (ret)
  828. goto err_rhtbl;
  829. ret = vlan_tunnel_init(vg);
  830. if (ret)
  831. goto err_tunnel_init;
  832. INIT_LIST_HEAD(&vg->vlan_list);
  833. rcu_assign_pointer(p->vlgrp, vg);
  834. if (p->br->default_pvid) {
  835. ret = nbp_vlan_add(p, p->br->default_pvid,
  836. BRIDGE_VLAN_INFO_PVID |
  837. BRIDGE_VLAN_INFO_UNTAGGED);
  838. if (ret)
  839. goto err_vlan_add;
  840. }
  841. out:
  842. return ret;
  843. err_vlan_add:
  844. RCU_INIT_POINTER(p->vlgrp, NULL);
  845. synchronize_rcu();
  846. vlan_tunnel_deinit(vg);
  847. err_tunnel_init:
  848. rhashtable_destroy(&vg->vlan_hash);
  849. err_rhtbl:
  850. err_vlan_enabled:
  851. kfree(vg);
  852. goto out;
  853. }
  854. /* Must be protected by RTNL.
  855. * Must be called with vid in range from 1 to 4094 inclusive.
  856. */
  857. int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
  858. {
  859. struct switchdev_obj_port_vlan v = {
  860. .obj.orig_dev = port->dev,
  861. .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
  862. .flags = flags,
  863. .vid_begin = vid,
  864. .vid_end = vid,
  865. };
  866. struct net_bridge_vlan *vlan;
  867. int ret;
  868. ASSERT_RTNL();
  869. vlan = br_vlan_find(nbp_vlan_group(port), vid);
  870. if (vlan) {
  871. /* Pass the flags to the hardware bridge */
  872. ret = switchdev_port_obj_add(port->dev, &v.obj);
  873. if (ret && ret != -EOPNOTSUPP)
  874. return ret;
  875. __vlan_add_flags(vlan, flags);
  876. return 0;
  877. }
  878. vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
  879. if (!vlan)
  880. return -ENOMEM;
  881. vlan->vid = vid;
  882. vlan->port = port;
  883. ret = __vlan_add(vlan, flags);
  884. if (ret)
  885. kfree(vlan);
  886. return ret;
  887. }
  888. /* Must be protected by RTNL.
  889. * Must be called with vid in range from 1 to 4094 inclusive.
  890. */
  891. int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
  892. {
  893. struct net_bridge_vlan *v;
  894. ASSERT_RTNL();
  895. v = br_vlan_find(nbp_vlan_group(port), vid);
  896. if (!v)
  897. return -ENOENT;
  898. br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
  899. br_fdb_delete_by_port(port->br, port, vid, 0);
  900. return __vlan_del(v);
  901. }
  902. void nbp_vlan_flush(struct net_bridge_port *port)
  903. {
  904. struct net_bridge_vlan_group *vg;
  905. ASSERT_RTNL();
  906. vg = nbp_vlan_group(port);
  907. __vlan_flush(vg);
  908. RCU_INIT_POINTER(port->vlgrp, NULL);
  909. synchronize_rcu();
  910. __vlan_group_free(vg);
  911. }
  912. void br_vlan_get_stats(const struct net_bridge_vlan *v,
  913. struct br_vlan_stats *stats)
  914. {
  915. int i;
  916. memset(stats, 0, sizeof(*stats));
  917. for_each_possible_cpu(i) {
  918. u64 rxpackets, rxbytes, txpackets, txbytes;
  919. struct br_vlan_stats *cpu_stats;
  920. unsigned int start;
  921. cpu_stats = per_cpu_ptr(v->stats, i);
  922. do {
  923. start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
  924. rxpackets = cpu_stats->rx_packets;
  925. rxbytes = cpu_stats->rx_bytes;
  926. txbytes = cpu_stats->tx_bytes;
  927. txpackets = cpu_stats->tx_packets;
  928. } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
  929. stats->rx_packets += rxpackets;
  930. stats->rx_bytes += rxbytes;
  931. stats->tx_bytes += txbytes;
  932. stats->tx_packets += txpackets;
  933. }
  934. }