br_vlan.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256
  1. #include <linux/kernel.h>
  2. #include <linux/netdevice.h>
  3. #include <linux/rtnetlink.h>
  4. #include <linux/slab.h>
  5. #include <net/switchdev.h>
  6. #include "br_private.h"
  7. #include "br_private_tunnel.h"
  8. static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
  9. const void *ptr)
  10. {
  11. const struct net_bridge_vlan *vle = ptr;
  12. u16 vid = *(u16 *)arg->key;
  13. return vle->vid != vid;
  14. }
  15. static const struct rhashtable_params br_vlan_rht_params = {
  16. .head_offset = offsetof(struct net_bridge_vlan, vnode),
  17. .key_offset = offsetof(struct net_bridge_vlan, vid),
  18. .key_len = sizeof(u16),
  19. .nelem_hint = 3,
  20. .locks_mul = 1,
  21. .max_size = VLAN_N_VID,
  22. .obj_cmpfn = br_vlan_cmp,
  23. .automatic_shrinking = true,
  24. };
  25. static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
  26. {
  27. return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
  28. }
  29. static bool __vlan_add_pvid(struct net_bridge_vlan_group *vg, u16 vid)
  30. {
  31. if (vg->pvid == vid)
  32. return false;
  33. smp_wmb();
  34. vg->pvid = vid;
  35. return true;
  36. }
  37. static bool __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
  38. {
  39. if (vg->pvid != vid)
  40. return false;
  41. smp_wmb();
  42. vg->pvid = 0;
  43. return true;
  44. }
  45. /* return true if anything changed, false otherwise */
  46. static bool __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
  47. {
  48. struct net_bridge_vlan_group *vg;
  49. u16 old_flags = v->flags;
  50. bool ret;
  51. if (br_vlan_is_master(v))
  52. vg = br_vlan_group(v->br);
  53. else
  54. vg = nbp_vlan_group(v->port);
  55. if (flags & BRIDGE_VLAN_INFO_PVID)
  56. ret = __vlan_add_pvid(vg, v->vid);
  57. else
  58. ret = __vlan_delete_pvid(vg, v->vid);
  59. if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
  60. v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
  61. else
  62. v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
  63. return ret || !!(old_flags ^ v->flags);
  64. }
  65. static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
  66. u16 vid, u16 flags)
  67. {
  68. int err;
  69. /* Try switchdev op first. In case it is not supported, fallback to
  70. * 8021q add.
  71. */
  72. err = br_switchdev_port_vlan_add(dev, vid, flags);
  73. if (err == -EOPNOTSUPP)
  74. return vlan_vid_add(dev, br->vlan_proto, vid);
  75. return err;
  76. }
  77. static void __vlan_add_list(struct net_bridge_vlan *v)
  78. {
  79. struct net_bridge_vlan_group *vg;
  80. struct list_head *headp, *hpos;
  81. struct net_bridge_vlan *vent;
  82. if (br_vlan_is_master(v))
  83. vg = br_vlan_group(v->br);
  84. else
  85. vg = nbp_vlan_group(v->port);
  86. headp = &vg->vlan_list;
  87. list_for_each_prev(hpos, headp) {
  88. vent = list_entry(hpos, struct net_bridge_vlan, vlist);
  89. if (v->vid < vent->vid)
  90. continue;
  91. else
  92. break;
  93. }
  94. list_add_rcu(&v->vlist, hpos);
  95. }
  96. static void __vlan_del_list(struct net_bridge_vlan *v)
  97. {
  98. list_del_rcu(&v->vlist);
  99. }
  100. static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
  101. u16 vid)
  102. {
  103. int err;
  104. /* Try switchdev op first. In case it is not supported, fallback to
  105. * 8021q del.
  106. */
  107. err = br_switchdev_port_vlan_del(dev, vid);
  108. if (err == -EOPNOTSUPP) {
  109. vlan_vid_del(dev, br->vlan_proto, vid);
  110. return 0;
  111. }
  112. return err;
  113. }
  114. /* Returns a master vlan, if it didn't exist it gets created. In all cases a
  115. * a reference is taken to the master vlan before returning.
  116. */
  117. static struct net_bridge_vlan *br_vlan_get_master(struct net_bridge *br, u16 vid)
  118. {
  119. struct net_bridge_vlan_group *vg;
  120. struct net_bridge_vlan *masterv;
  121. vg = br_vlan_group(br);
  122. masterv = br_vlan_find(vg, vid);
  123. if (!masterv) {
  124. bool changed;
  125. /* missing global ctx, create it now */
  126. if (br_vlan_add(br, vid, 0, &changed))
  127. return NULL;
  128. masterv = br_vlan_find(vg, vid);
  129. if (WARN_ON(!masterv))
  130. return NULL;
  131. refcount_set(&masterv->refcnt, 1);
  132. return masterv;
  133. }
  134. refcount_inc(&masterv->refcnt);
  135. return masterv;
  136. }
  137. static void br_master_vlan_rcu_free(struct rcu_head *rcu)
  138. {
  139. struct net_bridge_vlan *v;
  140. v = container_of(rcu, struct net_bridge_vlan, rcu);
  141. WARN_ON(!br_vlan_is_master(v));
  142. free_percpu(v->stats);
  143. v->stats = NULL;
  144. kfree(v);
  145. }
  146. static void br_vlan_put_master(struct net_bridge_vlan *masterv)
  147. {
  148. struct net_bridge_vlan_group *vg;
  149. if (!br_vlan_is_master(masterv))
  150. return;
  151. vg = br_vlan_group(masterv->br);
  152. if (refcount_dec_and_test(&masterv->refcnt)) {
  153. rhashtable_remove_fast(&vg->vlan_hash,
  154. &masterv->vnode, br_vlan_rht_params);
  155. __vlan_del_list(masterv);
  156. call_rcu(&masterv->rcu, br_master_vlan_rcu_free);
  157. }
  158. }
  159. static void nbp_vlan_rcu_free(struct rcu_head *rcu)
  160. {
  161. struct net_bridge_vlan *v;
  162. v = container_of(rcu, struct net_bridge_vlan, rcu);
  163. WARN_ON(br_vlan_is_master(v));
  164. /* if we had per-port stats configured then free them here */
  165. if (v->priv_flags & BR_VLFLAG_PER_PORT_STATS)
  166. free_percpu(v->stats);
  167. v->stats = NULL;
  168. kfree(v);
  169. }
  170. /* This is the shared VLAN add function which works for both ports and bridge
  171. * devices. There are four possible calls to this function in terms of the
  172. * vlan entry type:
  173. * 1. vlan is being added on a port (no master flags, global entry exists)
  174. * 2. vlan is being added on a bridge (both master and brentry flags)
  175. * 3. vlan is being added on a port, but a global entry didn't exist which
  176. * is being created right now (master flag set, brentry flag unset), the
  177. * global entry is used for global per-vlan features, but not for filtering
  178. * 4. same as 3 but with both master and brentry flags set so the entry
  179. * will be used for filtering in both the port and the bridge
  180. */
  181. static int __vlan_add(struct net_bridge_vlan *v, u16 flags)
  182. {
  183. struct net_bridge_vlan *masterv = NULL;
  184. struct net_bridge_port *p = NULL;
  185. struct net_bridge_vlan_group *vg;
  186. struct net_device *dev;
  187. struct net_bridge *br;
  188. int err;
  189. if (br_vlan_is_master(v)) {
  190. br = v->br;
  191. dev = br->dev;
  192. vg = br_vlan_group(br);
  193. } else {
  194. p = v->port;
  195. br = p->br;
  196. dev = p->dev;
  197. vg = nbp_vlan_group(p);
  198. }
  199. if (p) {
  200. /* Add VLAN to the device filter if it is supported.
  201. * This ensures tagged traffic enters the bridge when
  202. * promiscuous mode is disabled by br_manage_promisc().
  203. */
  204. err = __vlan_vid_add(dev, br, v->vid, flags);
  205. if (err)
  206. goto out;
  207. /* need to work on the master vlan too */
  208. if (flags & BRIDGE_VLAN_INFO_MASTER) {
  209. bool changed;
  210. err = br_vlan_add(br, v->vid,
  211. flags | BRIDGE_VLAN_INFO_BRENTRY,
  212. &changed);
  213. if (err)
  214. goto out_filt;
  215. }
  216. masterv = br_vlan_get_master(br, v->vid);
  217. if (!masterv)
  218. goto out_filt;
  219. v->brvlan = masterv;
  220. if (br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)) {
  221. v->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
  222. if (!v->stats) {
  223. err = -ENOMEM;
  224. goto out_filt;
  225. }
  226. v->priv_flags |= BR_VLFLAG_PER_PORT_STATS;
  227. } else {
  228. v->stats = masterv->stats;
  229. }
  230. } else {
  231. err = br_switchdev_port_vlan_add(dev, v->vid, flags);
  232. if (err && err != -EOPNOTSUPP)
  233. goto out;
  234. }
  235. /* Add the dev mac and count the vlan only if it's usable */
  236. if (br_vlan_should_use(v)) {
  237. err = br_fdb_insert(br, p, dev->dev_addr, v->vid);
  238. if (err) {
  239. br_err(br, "failed insert local address into bridge forwarding table\n");
  240. goto out_filt;
  241. }
  242. vg->num_vlans++;
  243. }
  244. err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
  245. br_vlan_rht_params);
  246. if (err)
  247. goto out_fdb_insert;
  248. __vlan_add_list(v);
  249. __vlan_add_flags(v, flags);
  250. out:
  251. return err;
  252. out_fdb_insert:
  253. if (br_vlan_should_use(v)) {
  254. br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
  255. vg->num_vlans--;
  256. }
  257. out_filt:
  258. if (p) {
  259. __vlan_vid_del(dev, br, v->vid);
  260. if (masterv) {
  261. if (v->stats && masterv->stats != v->stats)
  262. free_percpu(v->stats);
  263. v->stats = NULL;
  264. br_vlan_put_master(masterv);
  265. v->brvlan = NULL;
  266. }
  267. } else {
  268. br_switchdev_port_vlan_del(dev, v->vid);
  269. }
  270. goto out;
  271. }
  272. static int __vlan_del(struct net_bridge_vlan *v)
  273. {
  274. struct net_bridge_vlan *masterv = v;
  275. struct net_bridge_vlan_group *vg;
  276. struct net_bridge_port *p = NULL;
  277. int err = 0;
  278. if (br_vlan_is_master(v)) {
  279. vg = br_vlan_group(v->br);
  280. } else {
  281. p = v->port;
  282. vg = nbp_vlan_group(v->port);
  283. masterv = v->brvlan;
  284. }
  285. __vlan_delete_pvid(vg, v->vid);
  286. if (p) {
  287. err = __vlan_vid_del(p->dev, p->br, v->vid);
  288. if (err)
  289. goto out;
  290. } else {
  291. err = br_switchdev_port_vlan_del(v->br->dev, v->vid);
  292. if (err && err != -EOPNOTSUPP)
  293. goto out;
  294. err = 0;
  295. }
  296. if (br_vlan_should_use(v)) {
  297. v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
  298. vg->num_vlans--;
  299. }
  300. if (masterv != v) {
  301. vlan_tunnel_info_del(vg, v);
  302. rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
  303. br_vlan_rht_params);
  304. __vlan_del_list(v);
  305. call_rcu(&v->rcu, nbp_vlan_rcu_free);
  306. }
  307. br_vlan_put_master(masterv);
  308. out:
  309. return err;
  310. }
  311. static void __vlan_group_free(struct net_bridge_vlan_group *vg)
  312. {
  313. WARN_ON(!list_empty(&vg->vlan_list));
  314. rhashtable_destroy(&vg->vlan_hash);
  315. vlan_tunnel_deinit(vg);
  316. kfree(vg);
  317. }
  318. static void __vlan_flush(struct net_bridge_vlan_group *vg)
  319. {
  320. struct net_bridge_vlan *vlan, *tmp;
  321. __vlan_delete_pvid(vg, vg->pvid);
  322. list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist)
  323. __vlan_del(vlan);
  324. }
  325. struct sk_buff *br_handle_vlan(struct net_bridge *br,
  326. const struct net_bridge_port *p,
  327. struct net_bridge_vlan_group *vg,
  328. struct sk_buff *skb)
  329. {
  330. struct br_vlan_stats *stats;
  331. struct net_bridge_vlan *v;
  332. u16 vid;
  333. /* If this packet was not filtered at input, let it pass */
  334. if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
  335. goto out;
  336. /* At this point, we know that the frame was filtered and contains
  337. * a valid vlan id. If the vlan id has untagged flag set,
  338. * send untagged; otherwise, send tagged.
  339. */
  340. br_vlan_get_tag(skb, &vid);
  341. v = br_vlan_find(vg, vid);
  342. /* Vlan entry must be configured at this point. The
  343. * only exception is the bridge is set in promisc mode and the
  344. * packet is destined for the bridge device. In this case
  345. * pass the packet as is.
  346. */
  347. if (!v || !br_vlan_should_use(v)) {
  348. if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
  349. goto out;
  350. } else {
  351. kfree_skb(skb);
  352. return NULL;
  353. }
  354. }
  355. if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
  356. stats = this_cpu_ptr(v->stats);
  357. u64_stats_update_begin(&stats->syncp);
  358. stats->tx_bytes += skb->len;
  359. stats->tx_packets++;
  360. u64_stats_update_end(&stats->syncp);
  361. }
  362. if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
  363. skb->vlan_tci = 0;
  364. if (p && (p->flags & BR_VLAN_TUNNEL) &&
  365. br_handle_egress_vlan_tunnel(skb, v)) {
  366. kfree_skb(skb);
  367. return NULL;
  368. }
  369. out:
  370. return skb;
  371. }
  372. /* Called under RCU */
  373. static bool __allowed_ingress(const struct net_bridge *br,
  374. struct net_bridge_vlan_group *vg,
  375. struct sk_buff *skb, u16 *vid)
  376. {
  377. struct br_vlan_stats *stats;
  378. struct net_bridge_vlan *v;
  379. bool tagged;
  380. BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
  381. /* If vlan tx offload is disabled on bridge device and frame was
  382. * sent from vlan device on the bridge device, it does not have
  383. * HW accelerated vlan tag.
  384. */
  385. if (unlikely(!skb_vlan_tag_present(skb) &&
  386. skb->protocol == br->vlan_proto)) {
  387. skb = skb_vlan_untag(skb);
  388. if (unlikely(!skb))
  389. return false;
  390. }
  391. if (!br_vlan_get_tag(skb, vid)) {
  392. /* Tagged frame */
  393. if (skb->vlan_proto != br->vlan_proto) {
  394. /* Protocol-mismatch, empty out vlan_tci for new tag */
  395. skb_push(skb, ETH_HLEN);
  396. skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
  397. skb_vlan_tag_get(skb));
  398. if (unlikely(!skb))
  399. return false;
  400. skb_pull(skb, ETH_HLEN);
  401. skb_reset_mac_len(skb);
  402. *vid = 0;
  403. tagged = false;
  404. } else {
  405. tagged = true;
  406. }
  407. } else {
  408. /* Untagged frame */
  409. tagged = false;
  410. }
  411. if (!*vid) {
  412. u16 pvid = br_get_pvid(vg);
  413. /* Frame had a tag with VID 0 or did not have a tag.
  414. * See if pvid is set on this port. That tells us which
  415. * vlan untagged or priority-tagged traffic belongs to.
  416. */
  417. if (!pvid)
  418. goto drop;
  419. /* PVID is set on this port. Any untagged or priority-tagged
  420. * ingress frame is considered to belong to this vlan.
  421. */
  422. *vid = pvid;
  423. if (likely(!tagged))
  424. /* Untagged Frame. */
  425. __vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid);
  426. else
  427. /* Priority-tagged Frame.
  428. * At this point, We know that skb->vlan_tci had
  429. * VLAN_TAG_PRESENT bit and its VID field was 0x000.
  430. * We update only VID field and preserve PCP field.
  431. */
  432. skb->vlan_tci |= pvid;
  433. /* if stats are disabled we can avoid the lookup */
  434. if (!br_opt_get(br, BROPT_VLAN_STATS_ENABLED))
  435. return true;
  436. }
  437. v = br_vlan_find(vg, *vid);
  438. if (!v || !br_vlan_should_use(v))
  439. goto drop;
  440. if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
  441. stats = this_cpu_ptr(v->stats);
  442. u64_stats_update_begin(&stats->syncp);
  443. stats->rx_bytes += skb->len;
  444. stats->rx_packets++;
  445. u64_stats_update_end(&stats->syncp);
  446. }
  447. return true;
  448. drop:
  449. kfree_skb(skb);
  450. return false;
  451. }
  452. bool br_allowed_ingress(const struct net_bridge *br,
  453. struct net_bridge_vlan_group *vg, struct sk_buff *skb,
  454. u16 *vid)
  455. {
  456. /* If VLAN filtering is disabled on the bridge, all packets are
  457. * permitted.
  458. */
  459. if (!br_opt_get(br, BROPT_VLAN_ENABLED)) {
  460. BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
  461. return true;
  462. }
  463. return __allowed_ingress(br, vg, skb, vid);
  464. }
  465. /* Called under RCU. */
  466. bool br_allowed_egress(struct net_bridge_vlan_group *vg,
  467. const struct sk_buff *skb)
  468. {
  469. const struct net_bridge_vlan *v;
  470. u16 vid;
  471. /* If this packet was not filtered at input, let it pass */
  472. if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
  473. return true;
  474. br_vlan_get_tag(skb, &vid);
  475. v = br_vlan_find(vg, vid);
  476. if (v && br_vlan_should_use(v))
  477. return true;
  478. return false;
  479. }
  480. /* Called under RCU */
  481. bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
  482. {
  483. struct net_bridge_vlan_group *vg;
  484. struct net_bridge *br = p->br;
  485. /* If filtering was disabled at input, let it pass. */
  486. if (!br_opt_get(br, BROPT_VLAN_ENABLED))
  487. return true;
  488. vg = nbp_vlan_group_rcu(p);
  489. if (!vg || !vg->num_vlans)
  490. return false;
  491. if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
  492. *vid = 0;
  493. if (!*vid) {
  494. *vid = br_get_pvid(vg);
  495. if (!*vid)
  496. return false;
  497. return true;
  498. }
  499. if (br_vlan_find(vg, *vid))
  500. return true;
  501. return false;
  502. }
  503. static int br_vlan_add_existing(struct net_bridge *br,
  504. struct net_bridge_vlan_group *vg,
  505. struct net_bridge_vlan *vlan,
  506. u16 flags, bool *changed)
  507. {
  508. int err;
  509. err = br_switchdev_port_vlan_add(br->dev, vlan->vid, flags);
  510. if (err && err != -EOPNOTSUPP)
  511. return err;
  512. if (!br_vlan_is_brentry(vlan)) {
  513. /* Trying to change flags of non-existent bridge vlan */
  514. if (!(flags & BRIDGE_VLAN_INFO_BRENTRY)) {
  515. err = -EINVAL;
  516. goto err_flags;
  517. }
  518. /* It was only kept for port vlans, now make it real */
  519. err = br_fdb_insert(br, NULL, br->dev->dev_addr,
  520. vlan->vid);
  521. if (err) {
  522. br_err(br, "failed to insert local address into bridge forwarding table\n");
  523. goto err_fdb_insert;
  524. }
  525. refcount_inc(&vlan->refcnt);
  526. vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
  527. vg->num_vlans++;
  528. *changed = true;
  529. }
  530. if (__vlan_add_flags(vlan, flags))
  531. *changed = true;
  532. return 0;
  533. err_fdb_insert:
  534. err_flags:
  535. br_switchdev_port_vlan_del(br->dev, vlan->vid);
  536. return err;
  537. }
  538. /* Must be protected by RTNL.
  539. * Must be called with vid in range from 1 to 4094 inclusive.
  540. * changed must be true only if the vlan was created or updated
  541. */
  542. int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags, bool *changed)
  543. {
  544. struct net_bridge_vlan_group *vg;
  545. struct net_bridge_vlan *vlan;
  546. int ret;
  547. ASSERT_RTNL();
  548. *changed = false;
  549. vg = br_vlan_group(br);
  550. vlan = br_vlan_find(vg, vid);
  551. if (vlan)
  552. return br_vlan_add_existing(br, vg, vlan, flags, changed);
  553. vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
  554. if (!vlan)
  555. return -ENOMEM;
  556. vlan->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
  557. if (!vlan->stats) {
  558. kfree(vlan);
  559. return -ENOMEM;
  560. }
  561. vlan->vid = vid;
  562. vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
  563. vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
  564. vlan->br = br;
  565. if (flags & BRIDGE_VLAN_INFO_BRENTRY)
  566. refcount_set(&vlan->refcnt, 1);
  567. ret = __vlan_add(vlan, flags);
  568. if (ret) {
  569. free_percpu(vlan->stats);
  570. kfree(vlan);
  571. } else {
  572. *changed = true;
  573. }
  574. return ret;
  575. }
  576. /* Must be protected by RTNL.
  577. * Must be called with vid in range from 1 to 4094 inclusive.
  578. */
  579. int br_vlan_delete(struct net_bridge *br, u16 vid)
  580. {
  581. struct net_bridge_vlan_group *vg;
  582. struct net_bridge_vlan *v;
  583. ASSERT_RTNL();
  584. vg = br_vlan_group(br);
  585. v = br_vlan_find(vg, vid);
  586. if (!v || !br_vlan_is_brentry(v))
  587. return -ENOENT;
  588. br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
  589. br_fdb_delete_by_port(br, NULL, vid, 0);
  590. vlan_tunnel_info_del(vg, v);
  591. return __vlan_del(v);
  592. }
  593. void br_vlan_flush(struct net_bridge *br)
  594. {
  595. struct net_bridge_vlan_group *vg;
  596. ASSERT_RTNL();
  597. vg = br_vlan_group(br);
  598. __vlan_flush(vg);
  599. RCU_INIT_POINTER(br->vlgrp, NULL);
  600. synchronize_rcu();
  601. __vlan_group_free(vg);
  602. }
  603. struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
  604. {
  605. if (!vg)
  606. return NULL;
  607. return br_vlan_lookup(&vg->vlan_hash, vid);
  608. }
  609. /* Must be protected by RTNL. */
  610. static void recalculate_group_addr(struct net_bridge *br)
  611. {
  612. if (br_opt_get(br, BROPT_GROUP_ADDR_SET))
  613. return;
  614. spin_lock_bh(&br->lock);
  615. if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
  616. br->vlan_proto == htons(ETH_P_8021Q)) {
  617. /* Bridge Group Address */
  618. br->group_addr[5] = 0x00;
  619. } else { /* vlan_enabled && ETH_P_8021AD */
  620. /* Provider Bridge Group Address */
  621. br->group_addr[5] = 0x08;
  622. }
  623. spin_unlock_bh(&br->lock);
  624. }
  625. /* Must be protected by RTNL. */
  626. void br_recalculate_fwd_mask(struct net_bridge *br)
  627. {
  628. if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
  629. br->vlan_proto == htons(ETH_P_8021Q))
  630. br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
  631. else /* vlan_enabled && ETH_P_8021AD */
  632. br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
  633. ~(1u << br->group_addr[5]);
  634. }
  635. int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
  636. {
  637. struct switchdev_attr attr = {
  638. .orig_dev = br->dev,
  639. .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
  640. .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
  641. .u.vlan_filtering = val,
  642. };
  643. int err;
  644. if (br_opt_get(br, BROPT_VLAN_ENABLED) == !!val)
  645. return 0;
  646. err = switchdev_port_attr_set(br->dev, &attr);
  647. if (err && err != -EOPNOTSUPP)
  648. return err;
  649. br_opt_toggle(br, BROPT_VLAN_ENABLED, !!val);
  650. br_manage_promisc(br);
  651. recalculate_group_addr(br);
  652. br_recalculate_fwd_mask(br);
  653. return 0;
  654. }
  655. int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
  656. {
  657. return __br_vlan_filter_toggle(br, val);
  658. }
  659. bool br_vlan_enabled(const struct net_device *dev)
  660. {
  661. struct net_bridge *br = netdev_priv(dev);
  662. return br_opt_get(br, BROPT_VLAN_ENABLED);
  663. }
  664. EXPORT_SYMBOL_GPL(br_vlan_enabled);
  665. int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
  666. {
  667. int err = 0;
  668. struct net_bridge_port *p;
  669. struct net_bridge_vlan *vlan;
  670. struct net_bridge_vlan_group *vg;
  671. __be16 oldproto;
  672. if (br->vlan_proto == proto)
  673. return 0;
  674. /* Add VLANs for the new proto to the device filter. */
  675. list_for_each_entry(p, &br->port_list, list) {
  676. vg = nbp_vlan_group(p);
  677. list_for_each_entry(vlan, &vg->vlan_list, vlist) {
  678. err = vlan_vid_add(p->dev, proto, vlan->vid);
  679. if (err)
  680. goto err_filt;
  681. }
  682. }
  683. oldproto = br->vlan_proto;
  684. br->vlan_proto = proto;
  685. recalculate_group_addr(br);
  686. br_recalculate_fwd_mask(br);
  687. /* Delete VLANs for the old proto from the device filter. */
  688. list_for_each_entry(p, &br->port_list, list) {
  689. vg = nbp_vlan_group(p);
  690. list_for_each_entry(vlan, &vg->vlan_list, vlist)
  691. vlan_vid_del(p->dev, oldproto, vlan->vid);
  692. }
  693. return 0;
  694. err_filt:
  695. list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist)
  696. vlan_vid_del(p->dev, proto, vlan->vid);
  697. list_for_each_entry_continue_reverse(p, &br->port_list, list) {
  698. vg = nbp_vlan_group(p);
  699. list_for_each_entry(vlan, &vg->vlan_list, vlist)
  700. vlan_vid_del(p->dev, proto, vlan->vid);
  701. }
  702. return err;
  703. }
  704. int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
  705. {
  706. if (val != ETH_P_8021Q && val != ETH_P_8021AD)
  707. return -EPROTONOSUPPORT;
  708. return __br_vlan_set_proto(br, htons(val));
  709. }
  710. int br_vlan_set_stats(struct net_bridge *br, unsigned long val)
  711. {
  712. switch (val) {
  713. case 0:
  714. case 1:
  715. br_opt_toggle(br, BROPT_VLAN_STATS_ENABLED, !!val);
  716. break;
  717. default:
  718. return -EINVAL;
  719. }
  720. return 0;
  721. }
  722. int br_vlan_set_stats_per_port(struct net_bridge *br, unsigned long val)
  723. {
  724. struct net_bridge_port *p;
  725. /* allow to change the option if there are no port vlans configured */
  726. list_for_each_entry(p, &br->port_list, list) {
  727. struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
  728. if (vg->num_vlans)
  729. return -EBUSY;
  730. }
  731. switch (val) {
  732. case 0:
  733. case 1:
  734. br_opt_toggle(br, BROPT_VLAN_STATS_PER_PORT, !!val);
  735. break;
  736. default:
  737. return -EINVAL;
  738. }
  739. return 0;
  740. }
  741. static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
  742. {
  743. struct net_bridge_vlan *v;
  744. if (vid != vg->pvid)
  745. return false;
  746. v = br_vlan_lookup(&vg->vlan_hash, vid);
  747. if (v && br_vlan_should_use(v) &&
  748. (v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
  749. return true;
  750. return false;
  751. }
  752. static void br_vlan_disable_default_pvid(struct net_bridge *br)
  753. {
  754. struct net_bridge_port *p;
  755. u16 pvid = br->default_pvid;
  756. /* Disable default_pvid on all ports where it is still
  757. * configured.
  758. */
  759. if (vlan_default_pvid(br_vlan_group(br), pvid))
  760. br_vlan_delete(br, pvid);
  761. list_for_each_entry(p, &br->port_list, list) {
  762. if (vlan_default_pvid(nbp_vlan_group(p), pvid))
  763. nbp_vlan_delete(p, pvid);
  764. }
  765. br->default_pvid = 0;
  766. }
  767. int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid)
  768. {
  769. const struct net_bridge_vlan *pvent;
  770. struct net_bridge_vlan_group *vg;
  771. struct net_bridge_port *p;
  772. unsigned long *changed;
  773. bool vlchange;
  774. u16 old_pvid;
  775. int err = 0;
  776. if (!pvid) {
  777. br_vlan_disable_default_pvid(br);
  778. return 0;
  779. }
  780. changed = bitmap_zalloc(BR_MAX_PORTS, GFP_KERNEL);
  781. if (!changed)
  782. return -ENOMEM;
  783. old_pvid = br->default_pvid;
  784. /* Update default_pvid config only if we do not conflict with
  785. * user configuration.
  786. */
  787. vg = br_vlan_group(br);
  788. pvent = br_vlan_find(vg, pvid);
  789. if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) &&
  790. (!pvent || !br_vlan_should_use(pvent))) {
  791. err = br_vlan_add(br, pvid,
  792. BRIDGE_VLAN_INFO_PVID |
  793. BRIDGE_VLAN_INFO_UNTAGGED |
  794. BRIDGE_VLAN_INFO_BRENTRY,
  795. &vlchange);
  796. if (err)
  797. goto out;
  798. br_vlan_delete(br, old_pvid);
  799. set_bit(0, changed);
  800. }
  801. list_for_each_entry(p, &br->port_list, list) {
  802. /* Update default_pvid config only if we do not conflict with
  803. * user configuration.
  804. */
  805. vg = nbp_vlan_group(p);
  806. if ((old_pvid &&
  807. !vlan_default_pvid(vg, old_pvid)) ||
  808. br_vlan_find(vg, pvid))
  809. continue;
  810. err = nbp_vlan_add(p, pvid,
  811. BRIDGE_VLAN_INFO_PVID |
  812. BRIDGE_VLAN_INFO_UNTAGGED,
  813. &vlchange);
  814. if (err)
  815. goto err_port;
  816. nbp_vlan_delete(p, old_pvid);
  817. set_bit(p->port_no, changed);
  818. }
  819. br->default_pvid = pvid;
  820. out:
  821. bitmap_free(changed);
  822. return err;
  823. err_port:
  824. list_for_each_entry_continue_reverse(p, &br->port_list, list) {
  825. if (!test_bit(p->port_no, changed))
  826. continue;
  827. if (old_pvid)
  828. nbp_vlan_add(p, old_pvid,
  829. BRIDGE_VLAN_INFO_PVID |
  830. BRIDGE_VLAN_INFO_UNTAGGED,
  831. &vlchange);
  832. nbp_vlan_delete(p, pvid);
  833. }
  834. if (test_bit(0, changed)) {
  835. if (old_pvid)
  836. br_vlan_add(br, old_pvid,
  837. BRIDGE_VLAN_INFO_PVID |
  838. BRIDGE_VLAN_INFO_UNTAGGED |
  839. BRIDGE_VLAN_INFO_BRENTRY,
  840. &vlchange);
  841. br_vlan_delete(br, pvid);
  842. }
  843. goto out;
  844. }
  845. int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val)
  846. {
  847. u16 pvid = val;
  848. int err = 0;
  849. if (val >= VLAN_VID_MASK)
  850. return -EINVAL;
  851. if (pvid == br->default_pvid)
  852. goto out;
  853. /* Only allow default pvid change when filtering is disabled */
  854. if (br_opt_get(br, BROPT_VLAN_ENABLED)) {
  855. pr_info_once("Please disable vlan filtering to change default_pvid\n");
  856. err = -EPERM;
  857. goto out;
  858. }
  859. err = __br_vlan_set_default_pvid(br, pvid);
  860. out:
  861. return err;
  862. }
  863. int br_vlan_init(struct net_bridge *br)
  864. {
  865. struct net_bridge_vlan_group *vg;
  866. int ret = -ENOMEM;
  867. bool changed;
  868. vg = kzalloc(sizeof(*vg), GFP_KERNEL);
  869. if (!vg)
  870. goto out;
  871. ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
  872. if (ret)
  873. goto err_rhtbl;
  874. ret = vlan_tunnel_init(vg);
  875. if (ret)
  876. goto err_tunnel_init;
  877. INIT_LIST_HEAD(&vg->vlan_list);
  878. br->vlan_proto = htons(ETH_P_8021Q);
  879. br->default_pvid = 1;
  880. rcu_assign_pointer(br->vlgrp, vg);
  881. ret = br_vlan_add(br, 1,
  882. BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED |
  883. BRIDGE_VLAN_INFO_BRENTRY, &changed);
  884. if (ret)
  885. goto err_vlan_add;
  886. out:
  887. return ret;
  888. err_vlan_add:
  889. vlan_tunnel_deinit(vg);
  890. err_tunnel_init:
  891. rhashtable_destroy(&vg->vlan_hash);
  892. err_rhtbl:
  893. kfree(vg);
  894. goto out;
  895. }
  896. int nbp_vlan_init(struct net_bridge_port *p)
  897. {
  898. struct switchdev_attr attr = {
  899. .orig_dev = p->br->dev,
  900. .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
  901. .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
  902. .u.vlan_filtering = br_opt_get(p->br, BROPT_VLAN_ENABLED),
  903. };
  904. struct net_bridge_vlan_group *vg;
  905. int ret = -ENOMEM;
  906. vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
  907. if (!vg)
  908. goto out;
  909. ret = switchdev_port_attr_set(p->dev, &attr);
  910. if (ret && ret != -EOPNOTSUPP)
  911. goto err_vlan_enabled;
  912. ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
  913. if (ret)
  914. goto err_rhtbl;
  915. ret = vlan_tunnel_init(vg);
  916. if (ret)
  917. goto err_tunnel_init;
  918. INIT_LIST_HEAD(&vg->vlan_list);
  919. rcu_assign_pointer(p->vlgrp, vg);
  920. if (p->br->default_pvid) {
  921. bool changed;
  922. ret = nbp_vlan_add(p, p->br->default_pvid,
  923. BRIDGE_VLAN_INFO_PVID |
  924. BRIDGE_VLAN_INFO_UNTAGGED,
  925. &changed);
  926. if (ret)
  927. goto err_vlan_add;
  928. }
  929. out:
  930. return ret;
  931. err_vlan_add:
  932. RCU_INIT_POINTER(p->vlgrp, NULL);
  933. synchronize_rcu();
  934. vlan_tunnel_deinit(vg);
  935. err_tunnel_init:
  936. rhashtable_destroy(&vg->vlan_hash);
  937. err_rhtbl:
  938. err_vlan_enabled:
  939. kfree(vg);
  940. goto out;
  941. }
  942. /* Must be protected by RTNL.
  943. * Must be called with vid in range from 1 to 4094 inclusive.
  944. * changed must be true only if the vlan was created or updated
  945. */
  946. int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags,
  947. bool *changed)
  948. {
  949. struct net_bridge_vlan *vlan;
  950. int ret;
  951. ASSERT_RTNL();
  952. *changed = false;
  953. vlan = br_vlan_find(nbp_vlan_group(port), vid);
  954. if (vlan) {
  955. /* Pass the flags to the hardware bridge */
  956. ret = br_switchdev_port_vlan_add(port->dev, vid, flags);
  957. if (ret && ret != -EOPNOTSUPP)
  958. return ret;
  959. *changed = __vlan_add_flags(vlan, flags);
  960. return 0;
  961. }
  962. vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
  963. if (!vlan)
  964. return -ENOMEM;
  965. vlan->vid = vid;
  966. vlan->port = port;
  967. ret = __vlan_add(vlan, flags);
  968. if (ret)
  969. kfree(vlan);
  970. else
  971. *changed = true;
  972. return ret;
  973. }
  974. /* Must be protected by RTNL.
  975. * Must be called with vid in range from 1 to 4094 inclusive.
  976. */
  977. int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
  978. {
  979. struct net_bridge_vlan *v;
  980. ASSERT_RTNL();
  981. v = br_vlan_find(nbp_vlan_group(port), vid);
  982. if (!v)
  983. return -ENOENT;
  984. br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
  985. br_fdb_delete_by_port(port->br, port, vid, 0);
  986. return __vlan_del(v);
  987. }
  988. void nbp_vlan_flush(struct net_bridge_port *port)
  989. {
  990. struct net_bridge_vlan_group *vg;
  991. ASSERT_RTNL();
  992. vg = nbp_vlan_group(port);
  993. __vlan_flush(vg);
  994. RCU_INIT_POINTER(port->vlgrp, NULL);
  995. synchronize_rcu();
  996. __vlan_group_free(vg);
  997. }
  998. void br_vlan_get_stats(const struct net_bridge_vlan *v,
  999. struct br_vlan_stats *stats)
  1000. {
  1001. int i;
  1002. memset(stats, 0, sizeof(*stats));
  1003. for_each_possible_cpu(i) {
  1004. u64 rxpackets, rxbytes, txpackets, txbytes;
  1005. struct br_vlan_stats *cpu_stats;
  1006. unsigned int start;
  1007. cpu_stats = per_cpu_ptr(v->stats, i);
  1008. do {
  1009. start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
  1010. rxpackets = cpu_stats->rx_packets;
  1011. rxbytes = cpu_stats->rx_bytes;
  1012. txbytes = cpu_stats->tx_bytes;
  1013. txpackets = cpu_stats->tx_packets;
  1014. } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
  1015. stats->rx_packets += rxpackets;
  1016. stats->rx_bytes += rxbytes;
  1017. stats->tx_bytes += txbytes;
  1018. stats->tx_packets += txpackets;
  1019. }
  1020. }
  1021. int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
  1022. {
  1023. struct net_bridge_vlan_group *vg;
  1024. ASSERT_RTNL();
  1025. if (netif_is_bridge_master(dev))
  1026. vg = br_vlan_group(netdev_priv(dev));
  1027. else
  1028. return -EINVAL;
  1029. *p_pvid = br_get_pvid(vg);
  1030. return 0;
  1031. }
  1032. EXPORT_SYMBOL_GPL(br_vlan_get_pvid);
  1033. int br_vlan_get_info(const struct net_device *dev, u16 vid,
  1034. struct bridge_vlan_info *p_vinfo)
  1035. {
  1036. struct net_bridge_vlan_group *vg;
  1037. struct net_bridge_vlan *v;
  1038. struct net_bridge_port *p;
  1039. ASSERT_RTNL();
  1040. p = br_port_get_check_rtnl(dev);
  1041. if (p)
  1042. vg = nbp_vlan_group(p);
  1043. else if (netif_is_bridge_master(dev))
  1044. vg = br_vlan_group(netdev_priv(dev));
  1045. else
  1046. return -EINVAL;
  1047. v = br_vlan_find(vg, vid);
  1048. if (!v)
  1049. return -ENOENT;
  1050. p_vinfo->vid = vid;
  1051. p_vinfo->flags = v->flags;
  1052. return 0;
  1053. }
  1054. EXPORT_SYMBOL_GPL(br_vlan_get_info);