br_vlan.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113
  1. #include <linux/kernel.h>
  2. #include <linux/netdevice.h>
  3. #include <linux/rtnetlink.h>
  4. #include <linux/slab.h>
  5. #include <net/switchdev.h>
  6. #include "br_private.h"
  7. #include "br_private_tunnel.h"
  8. static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
  9. const void *ptr)
  10. {
  11. const struct net_bridge_vlan *vle = ptr;
  12. u16 vid = *(u16 *)arg->key;
  13. return vle->vid != vid;
  14. }
  15. static const struct rhashtable_params br_vlan_rht_params = {
  16. .head_offset = offsetof(struct net_bridge_vlan, vnode),
  17. .key_offset = offsetof(struct net_bridge_vlan, vid),
  18. .key_len = sizeof(u16),
  19. .nelem_hint = 3,
  20. .locks_mul = 1,
  21. .max_size = VLAN_N_VID,
  22. .obj_cmpfn = br_vlan_cmp,
  23. .automatic_shrinking = true,
  24. };
  25. static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
  26. {
  27. return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
  28. }
  29. static void __vlan_add_pvid(struct net_bridge_vlan_group *vg, u16 vid)
  30. {
  31. if (vg->pvid == vid)
  32. return;
  33. smp_wmb();
  34. vg->pvid = vid;
  35. }
  36. static void __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
  37. {
  38. if (vg->pvid != vid)
  39. return;
  40. smp_wmb();
  41. vg->pvid = 0;
  42. }
  43. static void __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
  44. {
  45. struct net_bridge_vlan_group *vg;
  46. if (br_vlan_is_master(v))
  47. vg = br_vlan_group(v->br);
  48. else
  49. vg = nbp_vlan_group(v->port);
  50. if (flags & BRIDGE_VLAN_INFO_PVID)
  51. __vlan_add_pvid(vg, v->vid);
  52. else
  53. __vlan_delete_pvid(vg, v->vid);
  54. if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
  55. v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
  56. else
  57. v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
  58. }
  59. static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
  60. u16 vid, u16 flags)
  61. {
  62. struct switchdev_obj_port_vlan v = {
  63. .obj.orig_dev = dev,
  64. .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
  65. .flags = flags,
  66. .vid_begin = vid,
  67. .vid_end = vid,
  68. };
  69. int err;
  70. /* Try switchdev op first. In case it is not supported, fallback to
  71. * 8021q add.
  72. */
  73. err = switchdev_port_obj_add(dev, &v.obj);
  74. if (err == -EOPNOTSUPP)
  75. return vlan_vid_add(dev, br->vlan_proto, vid);
  76. return err;
  77. }
  78. static void __vlan_add_list(struct net_bridge_vlan *v)
  79. {
  80. struct net_bridge_vlan_group *vg;
  81. struct list_head *headp, *hpos;
  82. struct net_bridge_vlan *vent;
  83. if (br_vlan_is_master(v))
  84. vg = br_vlan_group(v->br);
  85. else
  86. vg = nbp_vlan_group(v->port);
  87. headp = &vg->vlan_list;
  88. list_for_each_prev(hpos, headp) {
  89. vent = list_entry(hpos, struct net_bridge_vlan, vlist);
  90. if (v->vid < vent->vid)
  91. continue;
  92. else
  93. break;
  94. }
  95. list_add_rcu(&v->vlist, hpos);
  96. }
  97. static void __vlan_del_list(struct net_bridge_vlan *v)
  98. {
  99. list_del_rcu(&v->vlist);
  100. }
  101. static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
  102. u16 vid)
  103. {
  104. struct switchdev_obj_port_vlan v = {
  105. .obj.orig_dev = dev,
  106. .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
  107. .vid_begin = vid,
  108. .vid_end = vid,
  109. };
  110. int err;
  111. /* Try switchdev op first. In case it is not supported, fallback to
  112. * 8021q del.
  113. */
  114. err = switchdev_port_obj_del(dev, &v.obj);
  115. if (err == -EOPNOTSUPP) {
  116. vlan_vid_del(dev, br->vlan_proto, vid);
  117. return 0;
  118. }
  119. return err;
  120. }
  121. /* Returns a master vlan, if it didn't exist it gets created. In all cases a
  122. * a reference is taken to the master vlan before returning.
  123. */
  124. static struct net_bridge_vlan *br_vlan_get_master(struct net_bridge *br, u16 vid)
  125. {
  126. struct net_bridge_vlan_group *vg;
  127. struct net_bridge_vlan *masterv;
  128. vg = br_vlan_group(br);
  129. masterv = br_vlan_find(vg, vid);
  130. if (!masterv) {
  131. /* missing global ctx, create it now */
  132. if (br_vlan_add(br, vid, 0))
  133. return NULL;
  134. masterv = br_vlan_find(vg, vid);
  135. if (WARN_ON(!masterv))
  136. return NULL;
  137. }
  138. refcount_inc(&masterv->refcnt);
  139. return masterv;
  140. }
  141. static void br_master_vlan_rcu_free(struct rcu_head *rcu)
  142. {
  143. struct net_bridge_vlan *v;
  144. v = container_of(rcu, struct net_bridge_vlan, rcu);
  145. WARN_ON(!br_vlan_is_master(v));
  146. free_percpu(v->stats);
  147. v->stats = NULL;
  148. kfree(v);
  149. }
  150. static void br_vlan_put_master(struct net_bridge_vlan *masterv)
  151. {
  152. struct net_bridge_vlan_group *vg;
  153. if (!br_vlan_is_master(masterv))
  154. return;
  155. vg = br_vlan_group(masterv->br);
  156. if (refcount_dec_and_test(&masterv->refcnt)) {
  157. rhashtable_remove_fast(&vg->vlan_hash,
  158. &masterv->vnode, br_vlan_rht_params);
  159. __vlan_del_list(masterv);
  160. call_rcu(&masterv->rcu, br_master_vlan_rcu_free);
  161. }
  162. }
  163. /* This is the shared VLAN add function which works for both ports and bridge
  164. * devices. There are four possible calls to this function in terms of the
  165. * vlan entry type:
  166. * 1. vlan is being added on a port (no master flags, global entry exists)
  167. * 2. vlan is being added on a bridge (both master and brentry flags)
  168. * 3. vlan is being added on a port, but a global entry didn't exist which
  169. * is being created right now (master flag set, brentry flag unset), the
  170. * global entry is used for global per-vlan features, but not for filtering
  171. * 4. same as 3 but with both master and brentry flags set so the entry
  172. * will be used for filtering in both the port and the bridge
  173. */
  174. static int __vlan_add(struct net_bridge_vlan *v, u16 flags)
  175. {
  176. struct net_bridge_vlan *masterv = NULL;
  177. struct net_bridge_port *p = NULL;
  178. struct net_bridge_vlan_group *vg;
  179. struct net_device *dev;
  180. struct net_bridge *br;
  181. int err;
  182. if (br_vlan_is_master(v)) {
  183. br = v->br;
  184. dev = br->dev;
  185. vg = br_vlan_group(br);
  186. } else {
  187. p = v->port;
  188. br = p->br;
  189. dev = p->dev;
  190. vg = nbp_vlan_group(p);
  191. }
  192. if (p) {
  193. /* Add VLAN to the device filter if it is supported.
  194. * This ensures tagged traffic enters the bridge when
  195. * promiscuous mode is disabled by br_manage_promisc().
  196. */
  197. err = __vlan_vid_add(dev, br, v->vid, flags);
  198. if (err)
  199. goto out;
  200. /* need to work on the master vlan too */
  201. if (flags & BRIDGE_VLAN_INFO_MASTER) {
  202. err = br_vlan_add(br, v->vid, flags |
  203. BRIDGE_VLAN_INFO_BRENTRY);
  204. if (err)
  205. goto out_filt;
  206. }
  207. masterv = br_vlan_get_master(br, v->vid);
  208. if (!masterv)
  209. goto out_filt;
  210. v->brvlan = masterv;
  211. v->stats = masterv->stats;
  212. }
  213. /* Add the dev mac and count the vlan only if it's usable */
  214. if (br_vlan_should_use(v)) {
  215. err = br_fdb_insert(br, p, dev->dev_addr, v->vid);
  216. if (err) {
  217. br_err(br, "failed insert local address into bridge forwarding table\n");
  218. goto out_filt;
  219. }
  220. vg->num_vlans++;
  221. }
  222. err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
  223. br_vlan_rht_params);
  224. if (err)
  225. goto out_fdb_insert;
  226. __vlan_add_list(v);
  227. __vlan_add_flags(v, flags);
  228. out:
  229. return err;
  230. out_fdb_insert:
  231. if (br_vlan_should_use(v)) {
  232. br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
  233. vg->num_vlans--;
  234. }
  235. out_filt:
  236. if (p) {
  237. __vlan_vid_del(dev, br, v->vid);
  238. if (masterv) {
  239. br_vlan_put_master(masterv);
  240. v->brvlan = NULL;
  241. }
  242. }
  243. goto out;
  244. }
  245. static int __vlan_del(struct net_bridge_vlan *v)
  246. {
  247. struct net_bridge_vlan *masterv = v;
  248. struct net_bridge_vlan_group *vg;
  249. struct net_bridge_port *p = NULL;
  250. int err = 0;
  251. if (br_vlan_is_master(v)) {
  252. vg = br_vlan_group(v->br);
  253. } else {
  254. p = v->port;
  255. vg = nbp_vlan_group(v->port);
  256. masterv = v->brvlan;
  257. }
  258. __vlan_delete_pvid(vg, v->vid);
  259. if (p) {
  260. err = __vlan_vid_del(p->dev, p->br, v->vid);
  261. if (err)
  262. goto out;
  263. }
  264. if (br_vlan_should_use(v)) {
  265. v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
  266. vg->num_vlans--;
  267. }
  268. if (masterv != v) {
  269. vlan_tunnel_info_del(vg, v);
  270. rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
  271. br_vlan_rht_params);
  272. __vlan_del_list(v);
  273. kfree_rcu(v, rcu);
  274. }
  275. br_vlan_put_master(masterv);
  276. out:
  277. return err;
  278. }
  279. static void __vlan_group_free(struct net_bridge_vlan_group *vg)
  280. {
  281. WARN_ON(!list_empty(&vg->vlan_list));
  282. rhashtable_destroy(&vg->vlan_hash);
  283. vlan_tunnel_deinit(vg);
  284. kfree(vg);
  285. }
  286. static void __vlan_flush(struct net_bridge_vlan_group *vg)
  287. {
  288. struct net_bridge_vlan *vlan, *tmp;
  289. __vlan_delete_pvid(vg, vg->pvid);
  290. list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist)
  291. __vlan_del(vlan);
  292. }
  293. struct sk_buff *br_handle_vlan(struct net_bridge *br,
  294. const struct net_bridge_port *p,
  295. struct net_bridge_vlan_group *vg,
  296. struct sk_buff *skb)
  297. {
  298. struct br_vlan_stats *stats;
  299. struct net_bridge_vlan *v;
  300. u16 vid;
  301. /* If this packet was not filtered at input, let it pass */
  302. if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
  303. goto out;
  304. /* At this point, we know that the frame was filtered and contains
  305. * a valid vlan id. If the vlan id has untagged flag set,
  306. * send untagged; otherwise, send tagged.
  307. */
  308. br_vlan_get_tag(skb, &vid);
  309. v = br_vlan_find(vg, vid);
  310. /* Vlan entry must be configured at this point. The
  311. * only exception is the bridge is set in promisc mode and the
  312. * packet is destined for the bridge device. In this case
  313. * pass the packet as is.
  314. */
  315. if (!v || !br_vlan_should_use(v)) {
  316. if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
  317. goto out;
  318. } else {
  319. kfree_skb(skb);
  320. return NULL;
  321. }
  322. }
  323. if (br->vlan_stats_enabled) {
  324. stats = this_cpu_ptr(v->stats);
  325. u64_stats_update_begin(&stats->syncp);
  326. stats->tx_bytes += skb->len;
  327. stats->tx_packets++;
  328. u64_stats_update_end(&stats->syncp);
  329. }
  330. if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
  331. skb->vlan_tci = 0;
  332. if (p && (p->flags & BR_VLAN_TUNNEL) &&
  333. br_handle_egress_vlan_tunnel(skb, v)) {
  334. kfree_skb(skb);
  335. return NULL;
  336. }
  337. out:
  338. return skb;
  339. }
  340. /* Called under RCU */
  341. static bool __allowed_ingress(const struct net_bridge *br,
  342. struct net_bridge_vlan_group *vg,
  343. struct sk_buff *skb, u16 *vid)
  344. {
  345. struct br_vlan_stats *stats;
  346. struct net_bridge_vlan *v;
  347. bool tagged;
  348. BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
  349. /* If vlan tx offload is disabled on bridge device and frame was
  350. * sent from vlan device on the bridge device, it does not have
  351. * HW accelerated vlan tag.
  352. */
  353. if (unlikely(!skb_vlan_tag_present(skb) &&
  354. skb->protocol == br->vlan_proto)) {
  355. skb = skb_vlan_untag(skb);
  356. if (unlikely(!skb))
  357. return false;
  358. }
  359. if (!br_vlan_get_tag(skb, vid)) {
  360. /* Tagged frame */
  361. if (skb->vlan_proto != br->vlan_proto) {
  362. /* Protocol-mismatch, empty out vlan_tci for new tag */
  363. skb_push(skb, ETH_HLEN);
  364. skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
  365. skb_vlan_tag_get(skb));
  366. if (unlikely(!skb))
  367. return false;
  368. skb_pull(skb, ETH_HLEN);
  369. skb_reset_mac_len(skb);
  370. *vid = 0;
  371. tagged = false;
  372. } else {
  373. tagged = true;
  374. }
  375. } else {
  376. /* Untagged frame */
  377. tagged = false;
  378. }
  379. if (!*vid) {
  380. u16 pvid = br_get_pvid(vg);
  381. /* Frame had a tag with VID 0 or did not have a tag.
  382. * See if pvid is set on this port. That tells us which
  383. * vlan untagged or priority-tagged traffic belongs to.
  384. */
  385. if (!pvid)
  386. goto drop;
  387. /* PVID is set on this port. Any untagged or priority-tagged
  388. * ingress frame is considered to belong to this vlan.
  389. */
  390. *vid = pvid;
  391. if (likely(!tagged))
  392. /* Untagged Frame. */
  393. __vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid);
  394. else
  395. /* Priority-tagged Frame.
  396. * At this point, We know that skb->vlan_tci had
  397. * VLAN_TAG_PRESENT bit and its VID field was 0x000.
  398. * We update only VID field and preserve PCP field.
  399. */
  400. skb->vlan_tci |= pvid;
  401. /* if stats are disabled we can avoid the lookup */
  402. if (!br->vlan_stats_enabled)
  403. return true;
  404. }
  405. v = br_vlan_find(vg, *vid);
  406. if (!v || !br_vlan_should_use(v))
  407. goto drop;
  408. if (br->vlan_stats_enabled) {
  409. stats = this_cpu_ptr(v->stats);
  410. u64_stats_update_begin(&stats->syncp);
  411. stats->rx_bytes += skb->len;
  412. stats->rx_packets++;
  413. u64_stats_update_end(&stats->syncp);
  414. }
  415. return true;
  416. drop:
  417. kfree_skb(skb);
  418. return false;
  419. }
  420. bool br_allowed_ingress(const struct net_bridge *br,
  421. struct net_bridge_vlan_group *vg, struct sk_buff *skb,
  422. u16 *vid)
  423. {
  424. /* If VLAN filtering is disabled on the bridge, all packets are
  425. * permitted.
  426. */
  427. if (!br->vlan_enabled) {
  428. BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
  429. return true;
  430. }
  431. return __allowed_ingress(br, vg, skb, vid);
  432. }
  433. /* Called under RCU. */
  434. bool br_allowed_egress(struct net_bridge_vlan_group *vg,
  435. const struct sk_buff *skb)
  436. {
  437. const struct net_bridge_vlan *v;
  438. u16 vid;
  439. /* If this packet was not filtered at input, let it pass */
  440. if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
  441. return true;
  442. br_vlan_get_tag(skb, &vid);
  443. v = br_vlan_find(vg, vid);
  444. if (v && br_vlan_should_use(v))
  445. return true;
  446. return false;
  447. }
  448. /* Called under RCU */
  449. bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
  450. {
  451. struct net_bridge_vlan_group *vg;
  452. struct net_bridge *br = p->br;
  453. /* If filtering was disabled at input, let it pass. */
  454. if (!br->vlan_enabled)
  455. return true;
  456. vg = nbp_vlan_group_rcu(p);
  457. if (!vg || !vg->num_vlans)
  458. return false;
  459. if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
  460. *vid = 0;
  461. if (!*vid) {
  462. *vid = br_get_pvid(vg);
  463. if (!*vid)
  464. return false;
  465. return true;
  466. }
  467. if (br_vlan_find(vg, *vid))
  468. return true;
  469. return false;
  470. }
  471. /* Must be protected by RTNL.
  472. * Must be called with vid in range from 1 to 4094 inclusive.
  473. */
  474. int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags)
  475. {
  476. struct net_bridge_vlan_group *vg;
  477. struct net_bridge_vlan *vlan;
  478. int ret;
  479. ASSERT_RTNL();
  480. vg = br_vlan_group(br);
  481. vlan = br_vlan_find(vg, vid);
  482. if (vlan) {
  483. if (!br_vlan_is_brentry(vlan)) {
  484. /* Trying to change flags of non-existent bridge vlan */
  485. if (!(flags & BRIDGE_VLAN_INFO_BRENTRY))
  486. return -EINVAL;
  487. /* It was only kept for port vlans, now make it real */
  488. ret = br_fdb_insert(br, NULL, br->dev->dev_addr,
  489. vlan->vid);
  490. if (ret) {
  491. br_err(br, "failed insert local address into bridge forwarding table\n");
  492. return ret;
  493. }
  494. refcount_inc(&vlan->refcnt);
  495. vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
  496. vg->num_vlans++;
  497. }
  498. __vlan_add_flags(vlan, flags);
  499. return 0;
  500. }
  501. vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
  502. if (!vlan)
  503. return -ENOMEM;
  504. vlan->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
  505. if (!vlan->stats) {
  506. kfree(vlan);
  507. return -ENOMEM;
  508. }
  509. vlan->vid = vid;
  510. vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
  511. vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
  512. vlan->br = br;
  513. if (flags & BRIDGE_VLAN_INFO_BRENTRY)
  514. refcount_set(&vlan->refcnt, 1);
  515. ret = __vlan_add(vlan, flags);
  516. if (ret) {
  517. free_percpu(vlan->stats);
  518. kfree(vlan);
  519. }
  520. return ret;
  521. }
  522. /* Must be protected by RTNL.
  523. * Must be called with vid in range from 1 to 4094 inclusive.
  524. */
  525. int br_vlan_delete(struct net_bridge *br, u16 vid)
  526. {
  527. struct net_bridge_vlan_group *vg;
  528. struct net_bridge_vlan *v;
  529. ASSERT_RTNL();
  530. vg = br_vlan_group(br);
  531. v = br_vlan_find(vg, vid);
  532. if (!v || !br_vlan_is_brentry(v))
  533. return -ENOENT;
  534. br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
  535. br_fdb_delete_by_port(br, NULL, vid, 0);
  536. vlan_tunnel_info_del(vg, v);
  537. return __vlan_del(v);
  538. }
  539. void br_vlan_flush(struct net_bridge *br)
  540. {
  541. struct net_bridge_vlan_group *vg;
  542. ASSERT_RTNL();
  543. vg = br_vlan_group(br);
  544. __vlan_flush(vg);
  545. RCU_INIT_POINTER(br->vlgrp, NULL);
  546. synchronize_rcu();
  547. __vlan_group_free(vg);
  548. }
  549. struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
  550. {
  551. if (!vg)
  552. return NULL;
  553. return br_vlan_lookup(&vg->vlan_hash, vid);
  554. }
  555. /* Must be protected by RTNL. */
  556. static void recalculate_group_addr(struct net_bridge *br)
  557. {
  558. if (br->group_addr_set)
  559. return;
  560. spin_lock_bh(&br->lock);
  561. if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q)) {
  562. /* Bridge Group Address */
  563. br->group_addr[5] = 0x00;
  564. } else { /* vlan_enabled && ETH_P_8021AD */
  565. /* Provider Bridge Group Address */
  566. br->group_addr[5] = 0x08;
  567. }
  568. spin_unlock_bh(&br->lock);
  569. }
  570. /* Must be protected by RTNL. */
  571. void br_recalculate_fwd_mask(struct net_bridge *br)
  572. {
  573. if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q))
  574. br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
  575. else /* vlan_enabled && ETH_P_8021AD */
  576. br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
  577. ~(1u << br->group_addr[5]);
  578. }
  579. int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
  580. {
  581. struct switchdev_attr attr = {
  582. .orig_dev = br->dev,
  583. .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
  584. .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
  585. .u.vlan_filtering = val,
  586. };
  587. int err;
  588. if (br->vlan_enabled == val)
  589. return 0;
  590. err = switchdev_port_attr_set(br->dev, &attr);
  591. if (err && err != -EOPNOTSUPP)
  592. return err;
  593. br->vlan_enabled = val;
  594. br_manage_promisc(br);
  595. recalculate_group_addr(br);
  596. br_recalculate_fwd_mask(br);
  597. return 0;
  598. }
  599. int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
  600. {
  601. return __br_vlan_filter_toggle(br, val);
  602. }
  603. bool br_vlan_enabled(const struct net_device *dev)
  604. {
  605. struct net_bridge *br = netdev_priv(dev);
  606. return !!br->vlan_enabled;
  607. }
  608. EXPORT_SYMBOL_GPL(br_vlan_enabled);
  609. int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
  610. {
  611. int err = 0;
  612. struct net_bridge_port *p;
  613. struct net_bridge_vlan *vlan;
  614. struct net_bridge_vlan_group *vg;
  615. __be16 oldproto;
  616. if (br->vlan_proto == proto)
  617. return 0;
  618. /* Add VLANs for the new proto to the device filter. */
  619. list_for_each_entry(p, &br->port_list, list) {
  620. vg = nbp_vlan_group(p);
  621. list_for_each_entry(vlan, &vg->vlan_list, vlist) {
  622. err = vlan_vid_add(p->dev, proto, vlan->vid);
  623. if (err)
  624. goto err_filt;
  625. }
  626. }
  627. oldproto = br->vlan_proto;
  628. br->vlan_proto = proto;
  629. recalculate_group_addr(br);
  630. br_recalculate_fwd_mask(br);
  631. /* Delete VLANs for the old proto from the device filter. */
  632. list_for_each_entry(p, &br->port_list, list) {
  633. vg = nbp_vlan_group(p);
  634. list_for_each_entry(vlan, &vg->vlan_list, vlist)
  635. vlan_vid_del(p->dev, oldproto, vlan->vid);
  636. }
  637. return 0;
  638. err_filt:
  639. list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist)
  640. vlan_vid_del(p->dev, proto, vlan->vid);
  641. list_for_each_entry_continue_reverse(p, &br->port_list, list) {
  642. vg = nbp_vlan_group(p);
  643. list_for_each_entry(vlan, &vg->vlan_list, vlist)
  644. vlan_vid_del(p->dev, proto, vlan->vid);
  645. }
  646. return err;
  647. }
  648. int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
  649. {
  650. if (val != ETH_P_8021Q && val != ETH_P_8021AD)
  651. return -EPROTONOSUPPORT;
  652. return __br_vlan_set_proto(br, htons(val));
  653. }
  654. int br_vlan_set_stats(struct net_bridge *br, unsigned long val)
  655. {
  656. switch (val) {
  657. case 0:
  658. case 1:
  659. br->vlan_stats_enabled = val;
  660. break;
  661. default:
  662. return -EINVAL;
  663. }
  664. return 0;
  665. }
  666. static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
  667. {
  668. struct net_bridge_vlan *v;
  669. if (vid != vg->pvid)
  670. return false;
  671. v = br_vlan_lookup(&vg->vlan_hash, vid);
  672. if (v && br_vlan_should_use(v) &&
  673. (v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
  674. return true;
  675. return false;
  676. }
  677. static void br_vlan_disable_default_pvid(struct net_bridge *br)
  678. {
  679. struct net_bridge_port *p;
  680. u16 pvid = br->default_pvid;
  681. /* Disable default_pvid on all ports where it is still
  682. * configured.
  683. */
  684. if (vlan_default_pvid(br_vlan_group(br), pvid))
  685. br_vlan_delete(br, pvid);
  686. list_for_each_entry(p, &br->port_list, list) {
  687. if (vlan_default_pvid(nbp_vlan_group(p), pvid))
  688. nbp_vlan_delete(p, pvid);
  689. }
  690. br->default_pvid = 0;
  691. }
  692. int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid)
  693. {
  694. const struct net_bridge_vlan *pvent;
  695. struct net_bridge_vlan_group *vg;
  696. struct net_bridge_port *p;
  697. u16 old_pvid;
  698. int err = 0;
  699. unsigned long *changed;
  700. if (!pvid) {
  701. br_vlan_disable_default_pvid(br);
  702. return 0;
  703. }
  704. changed = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long),
  705. GFP_KERNEL);
  706. if (!changed)
  707. return -ENOMEM;
  708. old_pvid = br->default_pvid;
  709. /* Update default_pvid config only if we do not conflict with
  710. * user configuration.
  711. */
  712. vg = br_vlan_group(br);
  713. pvent = br_vlan_find(vg, pvid);
  714. if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) &&
  715. (!pvent || !br_vlan_should_use(pvent))) {
  716. err = br_vlan_add(br, pvid,
  717. BRIDGE_VLAN_INFO_PVID |
  718. BRIDGE_VLAN_INFO_UNTAGGED |
  719. BRIDGE_VLAN_INFO_BRENTRY);
  720. if (err)
  721. goto out;
  722. br_vlan_delete(br, old_pvid);
  723. set_bit(0, changed);
  724. }
  725. list_for_each_entry(p, &br->port_list, list) {
  726. /* Update default_pvid config only if we do not conflict with
  727. * user configuration.
  728. */
  729. vg = nbp_vlan_group(p);
  730. if ((old_pvid &&
  731. !vlan_default_pvid(vg, old_pvid)) ||
  732. br_vlan_find(vg, pvid))
  733. continue;
  734. err = nbp_vlan_add(p, pvid,
  735. BRIDGE_VLAN_INFO_PVID |
  736. BRIDGE_VLAN_INFO_UNTAGGED);
  737. if (err)
  738. goto err_port;
  739. nbp_vlan_delete(p, old_pvid);
  740. set_bit(p->port_no, changed);
  741. }
  742. br->default_pvid = pvid;
  743. out:
  744. kfree(changed);
  745. return err;
  746. err_port:
  747. list_for_each_entry_continue_reverse(p, &br->port_list, list) {
  748. if (!test_bit(p->port_no, changed))
  749. continue;
  750. if (old_pvid)
  751. nbp_vlan_add(p, old_pvid,
  752. BRIDGE_VLAN_INFO_PVID |
  753. BRIDGE_VLAN_INFO_UNTAGGED);
  754. nbp_vlan_delete(p, pvid);
  755. }
  756. if (test_bit(0, changed)) {
  757. if (old_pvid)
  758. br_vlan_add(br, old_pvid,
  759. BRIDGE_VLAN_INFO_PVID |
  760. BRIDGE_VLAN_INFO_UNTAGGED |
  761. BRIDGE_VLAN_INFO_BRENTRY);
  762. br_vlan_delete(br, pvid);
  763. }
  764. goto out;
  765. }
  766. int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val)
  767. {
  768. u16 pvid = val;
  769. int err = 0;
  770. if (val >= VLAN_VID_MASK)
  771. return -EINVAL;
  772. if (pvid == br->default_pvid)
  773. goto out;
  774. /* Only allow default pvid change when filtering is disabled */
  775. if (br->vlan_enabled) {
  776. pr_info_once("Please disable vlan filtering to change default_pvid\n");
  777. err = -EPERM;
  778. goto out;
  779. }
  780. err = __br_vlan_set_default_pvid(br, pvid);
  781. out:
  782. return err;
  783. }
  784. int br_vlan_init(struct net_bridge *br)
  785. {
  786. struct net_bridge_vlan_group *vg;
  787. int ret = -ENOMEM;
  788. vg = kzalloc(sizeof(*vg), GFP_KERNEL);
  789. if (!vg)
  790. goto out;
  791. ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
  792. if (ret)
  793. goto err_rhtbl;
  794. ret = vlan_tunnel_init(vg);
  795. if (ret)
  796. goto err_tunnel_init;
  797. INIT_LIST_HEAD(&vg->vlan_list);
  798. br->vlan_proto = htons(ETH_P_8021Q);
  799. br->default_pvid = 1;
  800. rcu_assign_pointer(br->vlgrp, vg);
  801. ret = br_vlan_add(br, 1,
  802. BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED |
  803. BRIDGE_VLAN_INFO_BRENTRY);
  804. if (ret)
  805. goto err_vlan_add;
  806. out:
  807. return ret;
  808. err_vlan_add:
  809. vlan_tunnel_deinit(vg);
  810. err_tunnel_init:
  811. rhashtable_destroy(&vg->vlan_hash);
  812. err_rhtbl:
  813. kfree(vg);
  814. goto out;
  815. }
  816. int nbp_vlan_init(struct net_bridge_port *p)
  817. {
  818. struct switchdev_attr attr = {
  819. .orig_dev = p->br->dev,
  820. .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
  821. .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
  822. .u.vlan_filtering = p->br->vlan_enabled,
  823. };
  824. struct net_bridge_vlan_group *vg;
  825. int ret = -ENOMEM;
  826. vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
  827. if (!vg)
  828. goto out;
  829. ret = switchdev_port_attr_set(p->dev, &attr);
  830. if (ret && ret != -EOPNOTSUPP)
  831. goto err_vlan_enabled;
  832. ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
  833. if (ret)
  834. goto err_rhtbl;
  835. ret = vlan_tunnel_init(vg);
  836. if (ret)
  837. goto err_tunnel_init;
  838. INIT_LIST_HEAD(&vg->vlan_list);
  839. rcu_assign_pointer(p->vlgrp, vg);
  840. if (p->br->default_pvid) {
  841. ret = nbp_vlan_add(p, p->br->default_pvid,
  842. BRIDGE_VLAN_INFO_PVID |
  843. BRIDGE_VLAN_INFO_UNTAGGED);
  844. if (ret)
  845. goto err_vlan_add;
  846. }
  847. out:
  848. return ret;
  849. err_vlan_add:
  850. RCU_INIT_POINTER(p->vlgrp, NULL);
  851. synchronize_rcu();
  852. vlan_tunnel_deinit(vg);
  853. err_tunnel_init:
  854. rhashtable_destroy(&vg->vlan_hash);
  855. err_rhtbl:
  856. err_vlan_enabled:
  857. kfree(vg);
  858. goto out;
  859. }
  860. /* Must be protected by RTNL.
  861. * Must be called with vid in range from 1 to 4094 inclusive.
  862. */
  863. int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
  864. {
  865. struct switchdev_obj_port_vlan v = {
  866. .obj.orig_dev = port->dev,
  867. .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
  868. .flags = flags,
  869. .vid_begin = vid,
  870. .vid_end = vid,
  871. };
  872. struct net_bridge_vlan *vlan;
  873. int ret;
  874. ASSERT_RTNL();
  875. vlan = br_vlan_find(nbp_vlan_group(port), vid);
  876. if (vlan) {
  877. /* Pass the flags to the hardware bridge */
  878. ret = switchdev_port_obj_add(port->dev, &v.obj);
  879. if (ret && ret != -EOPNOTSUPP)
  880. return ret;
  881. __vlan_add_flags(vlan, flags);
  882. return 0;
  883. }
  884. vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
  885. if (!vlan)
  886. return -ENOMEM;
  887. vlan->vid = vid;
  888. vlan->port = port;
  889. ret = __vlan_add(vlan, flags);
  890. if (ret)
  891. kfree(vlan);
  892. return ret;
  893. }
  894. /* Must be protected by RTNL.
  895. * Must be called with vid in range from 1 to 4094 inclusive.
  896. */
  897. int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
  898. {
  899. struct net_bridge_vlan *v;
  900. ASSERT_RTNL();
  901. v = br_vlan_find(nbp_vlan_group(port), vid);
  902. if (!v)
  903. return -ENOENT;
  904. br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
  905. br_fdb_delete_by_port(port->br, port, vid, 0);
  906. return __vlan_del(v);
  907. }
  908. void nbp_vlan_flush(struct net_bridge_port *port)
  909. {
  910. struct net_bridge_vlan_group *vg;
  911. ASSERT_RTNL();
  912. vg = nbp_vlan_group(port);
  913. __vlan_flush(vg);
  914. RCU_INIT_POINTER(port->vlgrp, NULL);
  915. synchronize_rcu();
  916. __vlan_group_free(vg);
  917. }
  918. void br_vlan_get_stats(const struct net_bridge_vlan *v,
  919. struct br_vlan_stats *stats)
  920. {
  921. int i;
  922. memset(stats, 0, sizeof(*stats));
  923. for_each_possible_cpu(i) {
  924. u64 rxpackets, rxbytes, txpackets, txbytes;
  925. struct br_vlan_stats *cpu_stats;
  926. unsigned int start;
  927. cpu_stats = per_cpu_ptr(v->stats, i);
  928. do {
  929. start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
  930. rxpackets = cpu_stats->rx_packets;
  931. rxbytes = cpu_stats->rx_bytes;
  932. txbytes = cpu_stats->tx_bytes;
  933. txpackets = cpu_stats->tx_packets;
  934. } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
  935. stats->rx_packets += rxpackets;
  936. stats->rx_bytes += rxbytes;
  937. stats->tx_bytes += txbytes;
  938. stats->tx_packets += txpackets;
  939. }
  940. }