br_vlan.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011
  1. #include <linux/kernel.h>
  2. #include <linux/netdevice.h>
  3. #include <linux/rtnetlink.h>
  4. #include <linux/slab.h>
  5. #include <net/switchdev.h>
  6. #include "br_private.h"
  7. static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
  8. const void *ptr)
  9. {
  10. const struct net_bridge_vlan *vle = ptr;
  11. u16 vid = *(u16 *)arg->key;
  12. return vle->vid != vid;
  13. }
  14. static const struct rhashtable_params br_vlan_rht_params = {
  15. .head_offset = offsetof(struct net_bridge_vlan, vnode),
  16. .key_offset = offsetof(struct net_bridge_vlan, vid),
  17. .key_len = sizeof(u16),
  18. .nelem_hint = 3,
  19. .locks_mul = 1,
  20. .max_size = VLAN_N_VID,
  21. .obj_cmpfn = br_vlan_cmp,
  22. .automatic_shrinking = true,
  23. };
  24. static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
  25. {
  26. return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
  27. }
  28. static void __vlan_add_pvid(struct net_bridge_vlan_group *vg, u16 vid)
  29. {
  30. if (vg->pvid == vid)
  31. return;
  32. smp_wmb();
  33. vg->pvid = vid;
  34. }
  35. static void __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
  36. {
  37. if (vg->pvid != vid)
  38. return;
  39. smp_wmb();
  40. vg->pvid = 0;
  41. }
  42. static void __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
  43. {
  44. struct net_bridge_vlan_group *vg;
  45. if (br_vlan_is_master(v))
  46. vg = br_vlan_group(v->br);
  47. else
  48. vg = nbp_vlan_group(v->port);
  49. if (flags & BRIDGE_VLAN_INFO_PVID)
  50. __vlan_add_pvid(vg, v->vid);
  51. else
  52. __vlan_delete_pvid(vg, v->vid);
  53. if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
  54. v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
  55. else
  56. v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
  57. }
  58. static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
  59. u16 vid, u16 flags)
  60. {
  61. struct switchdev_obj_port_vlan v = {
  62. .obj.orig_dev = dev,
  63. .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
  64. .flags = flags,
  65. .vid_begin = vid,
  66. .vid_end = vid,
  67. };
  68. int err;
  69. /* Try switchdev op first. In case it is not supported, fallback to
  70. * 8021q add.
  71. */
  72. err = switchdev_port_obj_add(dev, &v.obj);
  73. if (err == -EOPNOTSUPP)
  74. return vlan_vid_add(dev, br->vlan_proto, vid);
  75. return err;
  76. }
  77. static void __vlan_add_list(struct net_bridge_vlan *v)
  78. {
  79. struct net_bridge_vlan_group *vg;
  80. struct list_head *headp, *hpos;
  81. struct net_bridge_vlan *vent;
  82. if (br_vlan_is_master(v))
  83. vg = br_vlan_group(v->br);
  84. else
  85. vg = nbp_vlan_group(v->port);
  86. headp = &vg->vlan_list;
  87. list_for_each_prev(hpos, headp) {
  88. vent = list_entry(hpos, struct net_bridge_vlan, vlist);
  89. if (v->vid < vent->vid)
  90. continue;
  91. else
  92. break;
  93. }
  94. list_add_rcu(&v->vlist, hpos);
  95. }
  96. static void __vlan_del_list(struct net_bridge_vlan *v)
  97. {
  98. list_del_rcu(&v->vlist);
  99. }
  100. static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
  101. u16 vid)
  102. {
  103. struct switchdev_obj_port_vlan v = {
  104. .obj.orig_dev = dev,
  105. .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
  106. .vid_begin = vid,
  107. .vid_end = vid,
  108. };
  109. int err;
  110. /* Try switchdev op first. In case it is not supported, fallback to
  111. * 8021q del.
  112. */
  113. err = switchdev_port_obj_del(dev, &v.obj);
  114. if (err == -EOPNOTSUPP) {
  115. vlan_vid_del(dev, br->vlan_proto, vid);
  116. return 0;
  117. }
  118. return err;
  119. }
  120. /* Returns a master vlan, if it didn't exist it gets created. In all cases a
  121. * a reference is taken to the master vlan before returning.
  122. */
  123. static struct net_bridge_vlan *br_vlan_get_master(struct net_bridge *br, u16 vid)
  124. {
  125. struct net_bridge_vlan_group *vg;
  126. struct net_bridge_vlan *masterv;
  127. vg = br_vlan_group(br);
  128. masterv = br_vlan_find(vg, vid);
  129. if (!masterv) {
  130. /* missing global ctx, create it now */
  131. if (br_vlan_add(br, vid, 0))
  132. return NULL;
  133. masterv = br_vlan_find(vg, vid);
  134. if (WARN_ON(!masterv))
  135. return NULL;
  136. }
  137. atomic_inc(&masterv->refcnt);
  138. return masterv;
  139. }
  140. static void br_vlan_put_master(struct net_bridge_vlan *masterv)
  141. {
  142. struct net_bridge_vlan_group *vg;
  143. if (!br_vlan_is_master(masterv))
  144. return;
  145. vg = br_vlan_group(masterv->br);
  146. if (atomic_dec_and_test(&masterv->refcnt)) {
  147. rhashtable_remove_fast(&vg->vlan_hash,
  148. &masterv->vnode, br_vlan_rht_params);
  149. __vlan_del_list(masterv);
  150. kfree_rcu(masterv, rcu);
  151. }
  152. }
  153. /* This is the shared VLAN add function which works for both ports and bridge
  154. * devices. There are four possible calls to this function in terms of the
  155. * vlan entry type:
  156. * 1. vlan is being added on a port (no master flags, global entry exists)
  157. * 2. vlan is being added on a bridge (both master and brentry flags)
  158. * 3. vlan is being added on a port, but a global entry didn't exist which
  159. * is being created right now (master flag set, brentry flag unset), the
  160. * global entry is used for global per-vlan features, but not for filtering
  161. * 4. same as 3 but with both master and brentry flags set so the entry
  162. * will be used for filtering in both the port and the bridge
  163. */
  164. static int __vlan_add(struct net_bridge_vlan *v, u16 flags)
  165. {
  166. struct net_bridge_vlan *masterv = NULL;
  167. struct net_bridge_port *p = NULL;
  168. struct net_bridge_vlan_group *vg;
  169. struct net_device *dev;
  170. struct net_bridge *br;
  171. int err;
  172. if (br_vlan_is_master(v)) {
  173. br = v->br;
  174. dev = br->dev;
  175. vg = br_vlan_group(br);
  176. } else {
  177. p = v->port;
  178. br = p->br;
  179. dev = p->dev;
  180. vg = nbp_vlan_group(p);
  181. }
  182. if (p) {
  183. /* Add VLAN to the device filter if it is supported.
  184. * This ensures tagged traffic enters the bridge when
  185. * promiscuous mode is disabled by br_manage_promisc().
  186. */
  187. err = __vlan_vid_add(dev, br, v->vid, flags);
  188. if (err)
  189. goto out;
  190. /* need to work on the master vlan too */
  191. if (flags & BRIDGE_VLAN_INFO_MASTER) {
  192. err = br_vlan_add(br, v->vid, flags |
  193. BRIDGE_VLAN_INFO_BRENTRY);
  194. if (err)
  195. goto out_filt;
  196. }
  197. masterv = br_vlan_get_master(br, v->vid);
  198. if (!masterv)
  199. goto out_filt;
  200. v->brvlan = masterv;
  201. }
  202. /* Add the dev mac and count the vlan only if it's usable */
  203. if (br_vlan_should_use(v)) {
  204. err = br_fdb_insert(br, p, dev->dev_addr, v->vid);
  205. if (err) {
  206. br_err(br, "failed insert local address into bridge forwarding table\n");
  207. goto out_filt;
  208. }
  209. vg->num_vlans++;
  210. }
  211. err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
  212. br_vlan_rht_params);
  213. if (err)
  214. goto out_fdb_insert;
  215. __vlan_add_list(v);
  216. __vlan_add_flags(v, flags);
  217. out:
  218. return err;
  219. out_fdb_insert:
  220. if (br_vlan_should_use(v)) {
  221. br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
  222. vg->num_vlans--;
  223. }
  224. out_filt:
  225. if (p) {
  226. __vlan_vid_del(dev, br, v->vid);
  227. if (masterv) {
  228. br_vlan_put_master(masterv);
  229. v->brvlan = NULL;
  230. }
  231. }
  232. goto out;
  233. }
  234. static int __vlan_del(struct net_bridge_vlan *v)
  235. {
  236. struct net_bridge_vlan *masterv = v;
  237. struct net_bridge_vlan_group *vg;
  238. struct net_bridge_port *p = NULL;
  239. int err = 0;
  240. if (br_vlan_is_master(v)) {
  241. vg = br_vlan_group(v->br);
  242. } else {
  243. p = v->port;
  244. vg = nbp_vlan_group(v->port);
  245. masterv = v->brvlan;
  246. }
  247. __vlan_delete_pvid(vg, v->vid);
  248. if (p) {
  249. err = __vlan_vid_del(p->dev, p->br, v->vid);
  250. if (err)
  251. goto out;
  252. }
  253. if (br_vlan_should_use(v)) {
  254. v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
  255. vg->num_vlans--;
  256. }
  257. if (masterv != v) {
  258. rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
  259. br_vlan_rht_params);
  260. __vlan_del_list(v);
  261. kfree_rcu(v, rcu);
  262. }
  263. br_vlan_put_master(masterv);
  264. out:
  265. return err;
  266. }
  267. static void __vlan_group_free(struct net_bridge_vlan_group *vg)
  268. {
  269. WARN_ON(!list_empty(&vg->vlan_list));
  270. rhashtable_destroy(&vg->vlan_hash);
  271. kfree(vg);
  272. }
  273. static void __vlan_flush(struct net_bridge_vlan_group *vg)
  274. {
  275. struct net_bridge_vlan *vlan, *tmp;
  276. __vlan_delete_pvid(vg, vg->pvid);
  277. list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist)
  278. __vlan_del(vlan);
  279. }
  280. struct sk_buff *br_handle_vlan(struct net_bridge *br,
  281. struct net_bridge_vlan_group *vg,
  282. struct sk_buff *skb)
  283. {
  284. struct net_bridge_vlan *v;
  285. u16 vid;
  286. /* If this packet was not filtered at input, let it pass */
  287. if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
  288. goto out;
  289. /* At this point, we know that the frame was filtered and contains
  290. * a valid vlan id. If the vlan id has untagged flag set,
  291. * send untagged; otherwise, send tagged.
  292. */
  293. br_vlan_get_tag(skb, &vid);
  294. v = br_vlan_find(vg, vid);
  295. /* Vlan entry must be configured at this point. The
  296. * only exception is the bridge is set in promisc mode and the
  297. * packet is destined for the bridge device. In this case
  298. * pass the packet as is.
  299. */
  300. if (!v || !br_vlan_should_use(v)) {
  301. if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
  302. goto out;
  303. } else {
  304. kfree_skb(skb);
  305. return NULL;
  306. }
  307. }
  308. if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
  309. skb->vlan_tci = 0;
  310. out:
  311. return skb;
  312. }
  313. /* Called under RCU */
  314. static bool __allowed_ingress(struct net_bridge_vlan_group *vg, __be16 proto,
  315. struct sk_buff *skb, u16 *vid)
  316. {
  317. const struct net_bridge_vlan *v;
  318. bool tagged;
  319. BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
  320. /* If vlan tx offload is disabled on bridge device and frame was
  321. * sent from vlan device on the bridge device, it does not have
  322. * HW accelerated vlan tag.
  323. */
  324. if (unlikely(!skb_vlan_tag_present(skb) &&
  325. skb->protocol == proto)) {
  326. skb = skb_vlan_untag(skb);
  327. if (unlikely(!skb))
  328. return false;
  329. }
  330. if (!br_vlan_get_tag(skb, vid)) {
  331. /* Tagged frame */
  332. if (skb->vlan_proto != proto) {
  333. /* Protocol-mismatch, empty out vlan_tci for new tag */
  334. skb_push(skb, ETH_HLEN);
  335. skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
  336. skb_vlan_tag_get(skb));
  337. if (unlikely(!skb))
  338. return false;
  339. skb_pull(skb, ETH_HLEN);
  340. skb_reset_mac_len(skb);
  341. *vid = 0;
  342. tagged = false;
  343. } else {
  344. tagged = true;
  345. }
  346. } else {
  347. /* Untagged frame */
  348. tagged = false;
  349. }
  350. if (!*vid) {
  351. u16 pvid = br_get_pvid(vg);
  352. /* Frame had a tag with VID 0 or did not have a tag.
  353. * See if pvid is set on this port. That tells us which
  354. * vlan untagged or priority-tagged traffic belongs to.
  355. */
  356. if (!pvid)
  357. goto drop;
  358. /* PVID is set on this port. Any untagged or priority-tagged
  359. * ingress frame is considered to belong to this vlan.
  360. */
  361. *vid = pvid;
  362. if (likely(!tagged))
  363. /* Untagged Frame. */
  364. __vlan_hwaccel_put_tag(skb, proto, pvid);
  365. else
  366. /* Priority-tagged Frame.
  367. * At this point, We know that skb->vlan_tci had
  368. * VLAN_TAG_PRESENT bit and its VID field was 0x000.
  369. * We update only VID field and preserve PCP field.
  370. */
  371. skb->vlan_tci |= pvid;
  372. return true;
  373. }
  374. /* Frame had a valid vlan tag. See if vlan is allowed */
  375. v = br_vlan_find(vg, *vid);
  376. if (v && br_vlan_should_use(v))
  377. return true;
  378. drop:
  379. kfree_skb(skb);
  380. return false;
  381. }
  382. bool br_allowed_ingress(const struct net_bridge *br,
  383. struct net_bridge_vlan_group *vg, struct sk_buff *skb,
  384. u16 *vid)
  385. {
  386. /* If VLAN filtering is disabled on the bridge, all packets are
  387. * permitted.
  388. */
  389. if (!br->vlan_enabled) {
  390. BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
  391. return true;
  392. }
  393. return __allowed_ingress(vg, br->vlan_proto, skb, vid);
  394. }
  395. /* Called under RCU. */
  396. bool br_allowed_egress(struct net_bridge_vlan_group *vg,
  397. const struct sk_buff *skb)
  398. {
  399. const struct net_bridge_vlan *v;
  400. u16 vid;
  401. /* If this packet was not filtered at input, let it pass */
  402. if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
  403. return true;
  404. br_vlan_get_tag(skb, &vid);
  405. v = br_vlan_find(vg, vid);
  406. if (v && br_vlan_should_use(v))
  407. return true;
  408. return false;
  409. }
  410. /* Called under RCU */
  411. bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
  412. {
  413. struct net_bridge_vlan_group *vg;
  414. struct net_bridge *br = p->br;
  415. /* If filtering was disabled at input, let it pass. */
  416. if (!br->vlan_enabled)
  417. return true;
  418. vg = nbp_vlan_group_rcu(p);
  419. if (!vg || !vg->num_vlans)
  420. return false;
  421. if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
  422. *vid = 0;
  423. if (!*vid) {
  424. *vid = br_get_pvid(vg);
  425. if (!*vid)
  426. return false;
  427. return true;
  428. }
  429. if (br_vlan_find(vg, *vid))
  430. return true;
  431. return false;
  432. }
  433. /* Must be protected by RTNL.
  434. * Must be called with vid in range from 1 to 4094 inclusive.
  435. */
  436. int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags)
  437. {
  438. struct net_bridge_vlan_group *vg;
  439. struct net_bridge_vlan *vlan;
  440. int ret;
  441. ASSERT_RTNL();
  442. vg = br_vlan_group(br);
  443. vlan = br_vlan_find(vg, vid);
  444. if (vlan) {
  445. if (!br_vlan_is_brentry(vlan)) {
  446. /* Trying to change flags of non-existent bridge vlan */
  447. if (!(flags & BRIDGE_VLAN_INFO_BRENTRY))
  448. return -EINVAL;
  449. /* It was only kept for port vlans, now make it real */
  450. ret = br_fdb_insert(br, NULL, br->dev->dev_addr,
  451. vlan->vid);
  452. if (ret) {
  453. br_err(br, "failed insert local address into bridge forwarding table\n");
  454. return ret;
  455. }
  456. atomic_inc(&vlan->refcnt);
  457. vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
  458. vg->num_vlans++;
  459. }
  460. __vlan_add_flags(vlan, flags);
  461. return 0;
  462. }
  463. vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
  464. if (!vlan)
  465. return -ENOMEM;
  466. vlan->vid = vid;
  467. vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
  468. vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
  469. vlan->br = br;
  470. if (flags & BRIDGE_VLAN_INFO_BRENTRY)
  471. atomic_set(&vlan->refcnt, 1);
  472. ret = __vlan_add(vlan, flags);
  473. if (ret)
  474. kfree(vlan);
  475. return ret;
  476. }
  477. /* Must be protected by RTNL.
  478. * Must be called with vid in range from 1 to 4094 inclusive.
  479. */
  480. int br_vlan_delete(struct net_bridge *br, u16 vid)
  481. {
  482. struct net_bridge_vlan_group *vg;
  483. struct net_bridge_vlan *v;
  484. ASSERT_RTNL();
  485. vg = br_vlan_group(br);
  486. v = br_vlan_find(vg, vid);
  487. if (!v || !br_vlan_is_brentry(v))
  488. return -ENOENT;
  489. br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
  490. br_fdb_delete_by_port(br, NULL, vid, 0);
  491. return __vlan_del(v);
  492. }
  493. void br_vlan_flush(struct net_bridge *br)
  494. {
  495. struct net_bridge_vlan_group *vg;
  496. ASSERT_RTNL();
  497. vg = br_vlan_group(br);
  498. __vlan_flush(vg);
  499. RCU_INIT_POINTER(br->vlgrp, NULL);
  500. synchronize_rcu();
  501. __vlan_group_free(vg);
  502. }
  503. struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
  504. {
  505. if (!vg)
  506. return NULL;
  507. return br_vlan_lookup(&vg->vlan_hash, vid);
  508. }
  509. /* Must be protected by RTNL. */
  510. static void recalculate_group_addr(struct net_bridge *br)
  511. {
  512. if (br->group_addr_set)
  513. return;
  514. spin_lock_bh(&br->lock);
  515. if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q)) {
  516. /* Bridge Group Address */
  517. br->group_addr[5] = 0x00;
  518. } else { /* vlan_enabled && ETH_P_8021AD */
  519. /* Provider Bridge Group Address */
  520. br->group_addr[5] = 0x08;
  521. }
  522. spin_unlock_bh(&br->lock);
  523. }
  524. /* Must be protected by RTNL. */
  525. void br_recalculate_fwd_mask(struct net_bridge *br)
  526. {
  527. if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q))
  528. br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
  529. else /* vlan_enabled && ETH_P_8021AD */
  530. br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
  531. ~(1u << br->group_addr[5]);
  532. }
  533. int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
  534. {
  535. struct switchdev_attr attr = {
  536. .orig_dev = br->dev,
  537. .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
  538. .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
  539. .u.vlan_filtering = val,
  540. };
  541. int err;
  542. if (br->vlan_enabled == val)
  543. return 0;
  544. err = switchdev_port_attr_set(br->dev, &attr);
  545. if (err && err != -EOPNOTSUPP)
  546. return err;
  547. br->vlan_enabled = val;
  548. br_manage_promisc(br);
  549. recalculate_group_addr(br);
  550. br_recalculate_fwd_mask(br);
  551. return 0;
  552. }
  553. int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
  554. {
  555. int err;
  556. if (!rtnl_trylock())
  557. return restart_syscall();
  558. err = __br_vlan_filter_toggle(br, val);
  559. rtnl_unlock();
  560. return err;
  561. }
  562. int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
  563. {
  564. int err = 0;
  565. struct net_bridge_port *p;
  566. struct net_bridge_vlan *vlan;
  567. struct net_bridge_vlan_group *vg;
  568. __be16 oldproto;
  569. if (br->vlan_proto == proto)
  570. return 0;
  571. /* Add VLANs for the new proto to the device filter. */
  572. list_for_each_entry(p, &br->port_list, list) {
  573. vg = nbp_vlan_group(p);
  574. list_for_each_entry(vlan, &vg->vlan_list, vlist) {
  575. err = vlan_vid_add(p->dev, proto, vlan->vid);
  576. if (err)
  577. goto err_filt;
  578. }
  579. }
  580. oldproto = br->vlan_proto;
  581. br->vlan_proto = proto;
  582. recalculate_group_addr(br);
  583. br_recalculate_fwd_mask(br);
  584. /* Delete VLANs for the old proto from the device filter. */
  585. list_for_each_entry(p, &br->port_list, list) {
  586. vg = nbp_vlan_group(p);
  587. list_for_each_entry(vlan, &vg->vlan_list, vlist)
  588. vlan_vid_del(p->dev, oldproto, vlan->vid);
  589. }
  590. return 0;
  591. err_filt:
  592. list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist)
  593. vlan_vid_del(p->dev, proto, vlan->vid);
  594. list_for_each_entry_continue_reverse(p, &br->port_list, list) {
  595. vg = nbp_vlan_group(p);
  596. list_for_each_entry(vlan, &vg->vlan_list, vlist)
  597. vlan_vid_del(p->dev, proto, vlan->vid);
  598. }
  599. return err;
  600. }
  601. int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
  602. {
  603. int err;
  604. if (val != ETH_P_8021Q && val != ETH_P_8021AD)
  605. return -EPROTONOSUPPORT;
  606. if (!rtnl_trylock())
  607. return restart_syscall();
  608. err = __br_vlan_set_proto(br, htons(val));
  609. rtnl_unlock();
  610. return err;
  611. }
  612. static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
  613. {
  614. struct net_bridge_vlan *v;
  615. if (vid != vg->pvid)
  616. return false;
  617. v = br_vlan_lookup(&vg->vlan_hash, vid);
  618. if (v && br_vlan_should_use(v) &&
  619. (v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
  620. return true;
  621. return false;
  622. }
  623. static void br_vlan_disable_default_pvid(struct net_bridge *br)
  624. {
  625. struct net_bridge_port *p;
  626. u16 pvid = br->default_pvid;
  627. /* Disable default_pvid on all ports where it is still
  628. * configured.
  629. */
  630. if (vlan_default_pvid(br_vlan_group(br), pvid))
  631. br_vlan_delete(br, pvid);
  632. list_for_each_entry(p, &br->port_list, list) {
  633. if (vlan_default_pvid(nbp_vlan_group(p), pvid))
  634. nbp_vlan_delete(p, pvid);
  635. }
  636. br->default_pvid = 0;
  637. }
  638. int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid)
  639. {
  640. const struct net_bridge_vlan *pvent;
  641. struct net_bridge_vlan_group *vg;
  642. struct net_bridge_port *p;
  643. u16 old_pvid;
  644. int err = 0;
  645. unsigned long *changed;
  646. if (!pvid) {
  647. br_vlan_disable_default_pvid(br);
  648. return 0;
  649. }
  650. changed = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long),
  651. GFP_KERNEL);
  652. if (!changed)
  653. return -ENOMEM;
  654. old_pvid = br->default_pvid;
  655. /* Update default_pvid config only if we do not conflict with
  656. * user configuration.
  657. */
  658. vg = br_vlan_group(br);
  659. pvent = br_vlan_find(vg, pvid);
  660. if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) &&
  661. (!pvent || !br_vlan_should_use(pvent))) {
  662. err = br_vlan_add(br, pvid,
  663. BRIDGE_VLAN_INFO_PVID |
  664. BRIDGE_VLAN_INFO_UNTAGGED |
  665. BRIDGE_VLAN_INFO_BRENTRY);
  666. if (err)
  667. goto out;
  668. br_vlan_delete(br, old_pvid);
  669. set_bit(0, changed);
  670. }
  671. list_for_each_entry(p, &br->port_list, list) {
  672. /* Update default_pvid config only if we do not conflict with
  673. * user configuration.
  674. */
  675. vg = nbp_vlan_group(p);
  676. if ((old_pvid &&
  677. !vlan_default_pvid(vg, old_pvid)) ||
  678. br_vlan_find(vg, pvid))
  679. continue;
  680. err = nbp_vlan_add(p, pvid,
  681. BRIDGE_VLAN_INFO_PVID |
  682. BRIDGE_VLAN_INFO_UNTAGGED);
  683. if (err)
  684. goto err_port;
  685. nbp_vlan_delete(p, old_pvid);
  686. set_bit(p->port_no, changed);
  687. }
  688. br->default_pvid = pvid;
  689. out:
  690. kfree(changed);
  691. return err;
  692. err_port:
  693. list_for_each_entry_continue_reverse(p, &br->port_list, list) {
  694. if (!test_bit(p->port_no, changed))
  695. continue;
  696. if (old_pvid)
  697. nbp_vlan_add(p, old_pvid,
  698. BRIDGE_VLAN_INFO_PVID |
  699. BRIDGE_VLAN_INFO_UNTAGGED);
  700. nbp_vlan_delete(p, pvid);
  701. }
  702. if (test_bit(0, changed)) {
  703. if (old_pvid)
  704. br_vlan_add(br, old_pvid,
  705. BRIDGE_VLAN_INFO_PVID |
  706. BRIDGE_VLAN_INFO_UNTAGGED |
  707. BRIDGE_VLAN_INFO_BRENTRY);
  708. br_vlan_delete(br, pvid);
  709. }
  710. goto out;
  711. }
  712. int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val)
  713. {
  714. u16 pvid = val;
  715. int err = 0;
  716. if (val >= VLAN_VID_MASK)
  717. return -EINVAL;
  718. if (!rtnl_trylock())
  719. return restart_syscall();
  720. if (pvid == br->default_pvid)
  721. goto unlock;
  722. /* Only allow default pvid change when filtering is disabled */
  723. if (br->vlan_enabled) {
  724. pr_info_once("Please disable vlan filtering to change default_pvid\n");
  725. err = -EPERM;
  726. goto unlock;
  727. }
  728. err = __br_vlan_set_default_pvid(br, pvid);
  729. unlock:
  730. rtnl_unlock();
  731. return err;
  732. }
  733. int br_vlan_init(struct net_bridge *br)
  734. {
  735. struct net_bridge_vlan_group *vg;
  736. int ret = -ENOMEM;
  737. vg = kzalloc(sizeof(*vg), GFP_KERNEL);
  738. if (!vg)
  739. goto out;
  740. ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
  741. if (ret)
  742. goto err_rhtbl;
  743. INIT_LIST_HEAD(&vg->vlan_list);
  744. br->vlan_proto = htons(ETH_P_8021Q);
  745. br->default_pvid = 1;
  746. rcu_assign_pointer(br->vlgrp, vg);
  747. ret = br_vlan_add(br, 1,
  748. BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED |
  749. BRIDGE_VLAN_INFO_BRENTRY);
  750. if (ret)
  751. goto err_vlan_add;
  752. out:
  753. return ret;
  754. err_vlan_add:
  755. rhashtable_destroy(&vg->vlan_hash);
  756. err_rhtbl:
  757. kfree(vg);
  758. goto out;
  759. }
  760. int nbp_vlan_init(struct net_bridge_port *p)
  761. {
  762. struct switchdev_attr attr = {
  763. .orig_dev = p->br->dev,
  764. .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
  765. .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
  766. .u.vlan_filtering = p->br->vlan_enabled,
  767. };
  768. struct net_bridge_vlan_group *vg;
  769. int ret = -ENOMEM;
  770. vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
  771. if (!vg)
  772. goto out;
  773. ret = switchdev_port_attr_set(p->dev, &attr);
  774. if (ret && ret != -EOPNOTSUPP)
  775. goto err_vlan_enabled;
  776. ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
  777. if (ret)
  778. goto err_rhtbl;
  779. INIT_LIST_HEAD(&vg->vlan_list);
  780. rcu_assign_pointer(p->vlgrp, vg);
  781. if (p->br->default_pvid) {
  782. ret = nbp_vlan_add(p, p->br->default_pvid,
  783. BRIDGE_VLAN_INFO_PVID |
  784. BRIDGE_VLAN_INFO_UNTAGGED);
  785. if (ret)
  786. goto err_vlan_add;
  787. }
  788. out:
  789. return ret;
  790. err_vlan_add:
  791. RCU_INIT_POINTER(p->vlgrp, NULL);
  792. synchronize_rcu();
  793. rhashtable_destroy(&vg->vlan_hash);
  794. err_vlan_enabled:
  795. err_rhtbl:
  796. kfree(vg);
  797. goto out;
  798. }
  799. /* Must be protected by RTNL.
  800. * Must be called with vid in range from 1 to 4094 inclusive.
  801. */
  802. int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
  803. {
  804. struct net_bridge_vlan *vlan;
  805. int ret;
  806. ASSERT_RTNL();
  807. vlan = br_vlan_find(nbp_vlan_group(port), vid);
  808. if (vlan) {
  809. __vlan_add_flags(vlan, flags);
  810. return 0;
  811. }
  812. vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
  813. if (!vlan)
  814. return -ENOMEM;
  815. vlan->vid = vid;
  816. vlan->port = port;
  817. ret = __vlan_add(vlan, flags);
  818. if (ret)
  819. kfree(vlan);
  820. return ret;
  821. }
  822. /* Must be protected by RTNL.
  823. * Must be called with vid in range from 1 to 4094 inclusive.
  824. */
  825. int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
  826. {
  827. struct net_bridge_vlan *v;
  828. ASSERT_RTNL();
  829. v = br_vlan_find(nbp_vlan_group(port), vid);
  830. if (!v)
  831. return -ENOENT;
  832. br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
  833. br_fdb_delete_by_port(port->br, port, vid, 0);
  834. return __vlan_del(v);
  835. }
  836. void nbp_vlan_flush(struct net_bridge_port *port)
  837. {
  838. struct net_bridge_vlan_group *vg;
  839. ASSERT_RTNL();
  840. vg = nbp_vlan_group(port);
  841. __vlan_flush(vg);
  842. RCU_INIT_POINTER(port->vlgrp, NULL);
  843. synchronize_rcu();
  844. __vlan_group_free(vg);
  845. }