br_netlink.c 40 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444
  1. /*
  2. * Bridge netlink control interface
  3. *
  4. * Authors:
  5. * Stephen Hemminger <shemminger@osdl.org>
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/slab.h>
  14. #include <linux/etherdevice.h>
  15. #include <net/rtnetlink.h>
  16. #include <net/net_namespace.h>
  17. #include <net/sock.h>
  18. #include <uapi/linux/if_bridge.h>
  19. #include "br_private.h"
  20. #include "br_private_stp.h"
  21. static int __get_num_vlan_infos(struct net_bridge_vlan_group *vg,
  22. u32 filter_mask)
  23. {
  24. struct net_bridge_vlan *v;
  25. u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0;
  26. u16 flags, pvid;
  27. int num_vlans = 0;
  28. if (!(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED))
  29. return 0;
  30. pvid = br_get_pvid(vg);
  31. /* Count number of vlan infos */
  32. list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
  33. flags = 0;
  34. /* only a context, bridge vlan not activated */
  35. if (!br_vlan_should_use(v))
  36. continue;
  37. if (v->vid == pvid)
  38. flags |= BRIDGE_VLAN_INFO_PVID;
  39. if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
  40. flags |= BRIDGE_VLAN_INFO_UNTAGGED;
  41. if (vid_range_start == 0) {
  42. goto initvars;
  43. } else if ((v->vid - vid_range_end) == 1 &&
  44. flags == vid_range_flags) {
  45. vid_range_end = v->vid;
  46. continue;
  47. } else {
  48. if ((vid_range_end - vid_range_start) > 0)
  49. num_vlans += 2;
  50. else
  51. num_vlans += 1;
  52. }
  53. initvars:
  54. vid_range_start = v->vid;
  55. vid_range_end = v->vid;
  56. vid_range_flags = flags;
  57. }
  58. if (vid_range_start != 0) {
  59. if ((vid_range_end - vid_range_start) > 0)
  60. num_vlans += 2;
  61. else
  62. num_vlans += 1;
  63. }
  64. return num_vlans;
  65. }
  66. static int br_get_num_vlan_infos(struct net_bridge_vlan_group *vg,
  67. u32 filter_mask)
  68. {
  69. int num_vlans;
  70. if (!vg)
  71. return 0;
  72. if (filter_mask & RTEXT_FILTER_BRVLAN)
  73. return vg->num_vlans;
  74. rcu_read_lock();
  75. num_vlans = __get_num_vlan_infos(vg, filter_mask);
  76. rcu_read_unlock();
  77. return num_vlans;
  78. }
  79. static size_t br_get_link_af_size_filtered(const struct net_device *dev,
  80. u32 filter_mask)
  81. {
  82. struct net_bridge_vlan_group *vg = NULL;
  83. struct net_bridge_port *p;
  84. struct net_bridge *br;
  85. int num_vlan_infos;
  86. rcu_read_lock();
  87. if (br_port_exists(dev)) {
  88. p = br_port_get_rcu(dev);
  89. vg = nbp_vlan_group_rcu(p);
  90. } else if (dev->priv_flags & IFF_EBRIDGE) {
  91. br = netdev_priv(dev);
  92. vg = br_vlan_group_rcu(br);
  93. }
  94. num_vlan_infos = br_get_num_vlan_infos(vg, filter_mask);
  95. rcu_read_unlock();
  96. /* Each VLAN is returned in bridge_vlan_info along with flags */
  97. return num_vlan_infos * nla_total_size(sizeof(struct bridge_vlan_info));
  98. }
  99. static inline size_t br_port_info_size(void)
  100. {
  101. return nla_total_size(1) /* IFLA_BRPORT_STATE */
  102. + nla_total_size(2) /* IFLA_BRPORT_PRIORITY */
  103. + nla_total_size(4) /* IFLA_BRPORT_COST */
  104. + nla_total_size(1) /* IFLA_BRPORT_MODE */
  105. + nla_total_size(1) /* IFLA_BRPORT_GUARD */
  106. + nla_total_size(1) /* IFLA_BRPORT_PROTECT */
  107. + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */
  108. + nla_total_size(1) /* IFLA_BRPORT_LEARNING */
  109. + nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */
  110. + nla_total_size(1) /* IFLA_BRPORT_PROXYARP */
  111. + nla_total_size(1) /* IFLA_BRPORT_PROXYARP_WIFI */
  112. + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_ROOT_ID */
  113. + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_BRIDGE_ID */
  114. + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_PORT */
  115. + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_COST */
  116. + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_ID */
  117. + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_NO */
  118. + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_TOPOLOGY_CHANGE_ACK */
  119. + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_CONFIG_PENDING */
  120. + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_MESSAGE_AGE_TIMER */
  121. + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_FORWARD_DELAY_TIMER */
  122. + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_HOLD_TIMER */
  123. #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
  124. + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_MULTICAST_ROUTER */
  125. #endif
  126. + 0;
  127. }
  128. static inline size_t br_nlmsg_size(struct net_device *dev, u32 filter_mask)
  129. {
  130. return NLMSG_ALIGN(sizeof(struct ifinfomsg))
  131. + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
  132. + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
  133. + nla_total_size(4) /* IFLA_MASTER */
  134. + nla_total_size(4) /* IFLA_MTU */
  135. + nla_total_size(4) /* IFLA_LINK */
  136. + nla_total_size(1) /* IFLA_OPERSTATE */
  137. + nla_total_size(br_port_info_size()) /* IFLA_PROTINFO */
  138. + nla_total_size(br_get_link_af_size_filtered(dev,
  139. filter_mask)); /* IFLA_AF_SPEC */
  140. }
  141. static int br_port_fill_attrs(struct sk_buff *skb,
  142. const struct net_bridge_port *p)
  143. {
  144. u8 mode = !!(p->flags & BR_HAIRPIN_MODE);
  145. u64 timerval;
  146. if (nla_put_u8(skb, IFLA_BRPORT_STATE, p->state) ||
  147. nla_put_u16(skb, IFLA_BRPORT_PRIORITY, p->priority) ||
  148. nla_put_u32(skb, IFLA_BRPORT_COST, p->path_cost) ||
  149. nla_put_u8(skb, IFLA_BRPORT_MODE, mode) ||
  150. nla_put_u8(skb, IFLA_BRPORT_GUARD, !!(p->flags & BR_BPDU_GUARD)) ||
  151. nla_put_u8(skb, IFLA_BRPORT_PROTECT, !!(p->flags & BR_ROOT_BLOCK)) ||
  152. nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, !!(p->flags & BR_MULTICAST_FAST_LEAVE)) ||
  153. nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING)) ||
  154. nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, !!(p->flags & BR_FLOOD)) ||
  155. nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP)) ||
  156. nla_put_u8(skb, IFLA_BRPORT_PROXYARP_WIFI,
  157. !!(p->flags & BR_PROXYARP_WIFI)) ||
  158. nla_put(skb, IFLA_BRPORT_ROOT_ID, sizeof(struct ifla_bridge_id),
  159. &p->designated_root) ||
  160. nla_put(skb, IFLA_BRPORT_BRIDGE_ID, sizeof(struct ifla_bridge_id),
  161. &p->designated_bridge) ||
  162. nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_PORT, p->designated_port) ||
  163. nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_COST, p->designated_cost) ||
  164. nla_put_u16(skb, IFLA_BRPORT_ID, p->port_id) ||
  165. nla_put_u16(skb, IFLA_BRPORT_NO, p->port_no) ||
  166. nla_put_u8(skb, IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
  167. p->topology_change_ack) ||
  168. nla_put_u8(skb, IFLA_BRPORT_CONFIG_PENDING, p->config_pending))
  169. return -EMSGSIZE;
  170. timerval = br_timer_value(&p->message_age_timer);
  171. if (nla_put_u64_64bit(skb, IFLA_BRPORT_MESSAGE_AGE_TIMER, timerval,
  172. IFLA_BRPORT_PAD))
  173. return -EMSGSIZE;
  174. timerval = br_timer_value(&p->forward_delay_timer);
  175. if (nla_put_u64_64bit(skb, IFLA_BRPORT_FORWARD_DELAY_TIMER, timerval,
  176. IFLA_BRPORT_PAD))
  177. return -EMSGSIZE;
  178. timerval = br_timer_value(&p->hold_timer);
  179. if (nla_put_u64_64bit(skb, IFLA_BRPORT_HOLD_TIMER, timerval,
  180. IFLA_BRPORT_PAD))
  181. return -EMSGSIZE;
  182. #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
  183. if (nla_put_u8(skb, IFLA_BRPORT_MULTICAST_ROUTER,
  184. p->multicast_router))
  185. return -EMSGSIZE;
  186. #endif
  187. return 0;
  188. }
  189. static int br_fill_ifvlaninfo_range(struct sk_buff *skb, u16 vid_start,
  190. u16 vid_end, u16 flags)
  191. {
  192. struct bridge_vlan_info vinfo;
  193. if ((vid_end - vid_start) > 0) {
  194. /* add range to skb */
  195. vinfo.vid = vid_start;
  196. vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_BEGIN;
  197. if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
  198. sizeof(vinfo), &vinfo))
  199. goto nla_put_failure;
  200. vinfo.vid = vid_end;
  201. vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_END;
  202. if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
  203. sizeof(vinfo), &vinfo))
  204. goto nla_put_failure;
  205. } else {
  206. vinfo.vid = vid_start;
  207. vinfo.flags = flags;
  208. if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
  209. sizeof(vinfo), &vinfo))
  210. goto nla_put_failure;
  211. }
  212. return 0;
  213. nla_put_failure:
  214. return -EMSGSIZE;
  215. }
  216. static int br_fill_ifvlaninfo_compressed(struct sk_buff *skb,
  217. struct net_bridge_vlan_group *vg)
  218. {
  219. struct net_bridge_vlan *v;
  220. u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0;
  221. u16 flags, pvid;
  222. int err = 0;
  223. /* Pack IFLA_BRIDGE_VLAN_INFO's for every vlan
  224. * and mark vlan info with begin and end flags
  225. * if vlaninfo represents a range
  226. */
  227. pvid = br_get_pvid(vg);
  228. list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
  229. flags = 0;
  230. if (!br_vlan_should_use(v))
  231. continue;
  232. if (v->vid == pvid)
  233. flags |= BRIDGE_VLAN_INFO_PVID;
  234. if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
  235. flags |= BRIDGE_VLAN_INFO_UNTAGGED;
  236. if (vid_range_start == 0) {
  237. goto initvars;
  238. } else if ((v->vid - vid_range_end) == 1 &&
  239. flags == vid_range_flags) {
  240. vid_range_end = v->vid;
  241. continue;
  242. } else {
  243. err = br_fill_ifvlaninfo_range(skb, vid_range_start,
  244. vid_range_end,
  245. vid_range_flags);
  246. if (err)
  247. return err;
  248. }
  249. initvars:
  250. vid_range_start = v->vid;
  251. vid_range_end = v->vid;
  252. vid_range_flags = flags;
  253. }
  254. if (vid_range_start != 0) {
  255. /* Call it once more to send any left over vlans */
  256. err = br_fill_ifvlaninfo_range(skb, vid_range_start,
  257. vid_range_end,
  258. vid_range_flags);
  259. if (err)
  260. return err;
  261. }
  262. return 0;
  263. }
  264. static int br_fill_ifvlaninfo(struct sk_buff *skb,
  265. struct net_bridge_vlan_group *vg)
  266. {
  267. struct bridge_vlan_info vinfo;
  268. struct net_bridge_vlan *v;
  269. u16 pvid;
  270. pvid = br_get_pvid(vg);
  271. list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
  272. if (!br_vlan_should_use(v))
  273. continue;
  274. vinfo.vid = v->vid;
  275. vinfo.flags = 0;
  276. if (v->vid == pvid)
  277. vinfo.flags |= BRIDGE_VLAN_INFO_PVID;
  278. if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
  279. vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
  280. if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
  281. sizeof(vinfo), &vinfo))
  282. goto nla_put_failure;
  283. }
  284. return 0;
  285. nla_put_failure:
  286. return -EMSGSIZE;
  287. }
  288. /*
  289. * Create one netlink message for one interface
  290. * Contains port and master info as well as carrier and bridge state.
  291. */
  292. static int br_fill_ifinfo(struct sk_buff *skb,
  293. struct net_bridge_port *port,
  294. u32 pid, u32 seq, int event, unsigned int flags,
  295. u32 filter_mask, const struct net_device *dev)
  296. {
  297. struct net_bridge *br;
  298. struct ifinfomsg *hdr;
  299. struct nlmsghdr *nlh;
  300. u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
  301. if (port)
  302. br = port->br;
  303. else
  304. br = netdev_priv(dev);
  305. br_debug(br, "br_fill_info event %d port %s master %s\n",
  306. event, dev->name, br->dev->name);
  307. nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags);
  308. if (nlh == NULL)
  309. return -EMSGSIZE;
  310. hdr = nlmsg_data(nlh);
  311. hdr->ifi_family = AF_BRIDGE;
  312. hdr->__ifi_pad = 0;
  313. hdr->ifi_type = dev->type;
  314. hdr->ifi_index = dev->ifindex;
  315. hdr->ifi_flags = dev_get_flags(dev);
  316. hdr->ifi_change = 0;
  317. if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
  318. nla_put_u32(skb, IFLA_MASTER, br->dev->ifindex) ||
  319. nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
  320. nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
  321. (dev->addr_len &&
  322. nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
  323. (dev->ifindex != dev_get_iflink(dev) &&
  324. nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
  325. goto nla_put_failure;
  326. if (event == RTM_NEWLINK && port) {
  327. struct nlattr *nest
  328. = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED);
  329. if (nest == NULL || br_port_fill_attrs(skb, port) < 0)
  330. goto nla_put_failure;
  331. nla_nest_end(skb, nest);
  332. }
  333. /* Check if the VID information is requested */
  334. if ((filter_mask & RTEXT_FILTER_BRVLAN) ||
  335. (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) {
  336. struct net_bridge_vlan_group *vg;
  337. struct nlattr *af;
  338. int err;
  339. /* RCU needed because of the VLAN locking rules (rcu || rtnl) */
  340. rcu_read_lock();
  341. if (port)
  342. vg = nbp_vlan_group_rcu(port);
  343. else
  344. vg = br_vlan_group_rcu(br);
  345. if (!vg || !vg->num_vlans) {
  346. rcu_read_unlock();
  347. goto done;
  348. }
  349. af = nla_nest_start(skb, IFLA_AF_SPEC);
  350. if (!af) {
  351. rcu_read_unlock();
  352. goto nla_put_failure;
  353. }
  354. if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)
  355. err = br_fill_ifvlaninfo_compressed(skb, vg);
  356. else
  357. err = br_fill_ifvlaninfo(skb, vg);
  358. rcu_read_unlock();
  359. if (err)
  360. goto nla_put_failure;
  361. nla_nest_end(skb, af);
  362. }
  363. done:
  364. nlmsg_end(skb, nlh);
  365. return 0;
  366. nla_put_failure:
  367. nlmsg_cancel(skb, nlh);
  368. return -EMSGSIZE;
  369. }
  370. /*
  371. * Notify listeners of a change in port information
  372. */
  373. void br_ifinfo_notify(int event, struct net_bridge_port *port)
  374. {
  375. struct net *net;
  376. struct sk_buff *skb;
  377. int err = -ENOBUFS;
  378. u32 filter = RTEXT_FILTER_BRVLAN_COMPRESSED;
  379. if (!port)
  380. return;
  381. net = dev_net(port->dev);
  382. br_debug(port->br, "port %u(%s) event %d\n",
  383. (unsigned int)port->port_no, port->dev->name, event);
  384. skb = nlmsg_new(br_nlmsg_size(port->dev, filter), GFP_ATOMIC);
  385. if (skb == NULL)
  386. goto errout;
  387. err = br_fill_ifinfo(skb, port, 0, 0, event, 0, filter, port->dev);
  388. if (err < 0) {
  389. /* -EMSGSIZE implies BUG in br_nlmsg_size() */
  390. WARN_ON(err == -EMSGSIZE);
  391. kfree_skb(skb);
  392. goto errout;
  393. }
  394. rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
  395. return;
  396. errout:
  397. rtnl_set_sk_err(net, RTNLGRP_LINK, err);
  398. }
  399. /*
  400. * Dump information about all ports, in response to GETLINK
  401. */
  402. int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
  403. struct net_device *dev, u32 filter_mask, int nlflags)
  404. {
  405. struct net_bridge_port *port = br_port_get_rtnl(dev);
  406. if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN) &&
  407. !(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED))
  408. return 0;
  409. return br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, nlflags,
  410. filter_mask, dev);
  411. }
  412. static int br_vlan_info(struct net_bridge *br, struct net_bridge_port *p,
  413. int cmd, struct bridge_vlan_info *vinfo)
  414. {
  415. int err = 0;
  416. switch (cmd) {
  417. case RTM_SETLINK:
  418. if (p) {
  419. /* if the MASTER flag is set this will act on the global
  420. * per-VLAN entry as well
  421. */
  422. err = nbp_vlan_add(p, vinfo->vid, vinfo->flags);
  423. if (err)
  424. break;
  425. } else {
  426. vinfo->flags |= BRIDGE_VLAN_INFO_BRENTRY;
  427. err = br_vlan_add(br, vinfo->vid, vinfo->flags);
  428. }
  429. break;
  430. case RTM_DELLINK:
  431. if (p) {
  432. nbp_vlan_delete(p, vinfo->vid);
  433. if (vinfo->flags & BRIDGE_VLAN_INFO_MASTER)
  434. br_vlan_delete(p->br, vinfo->vid);
  435. } else {
  436. br_vlan_delete(br, vinfo->vid);
  437. }
  438. break;
  439. }
  440. return err;
  441. }
  442. static int br_afspec(struct net_bridge *br,
  443. struct net_bridge_port *p,
  444. struct nlattr *af_spec,
  445. int cmd)
  446. {
  447. struct bridge_vlan_info *vinfo_start = NULL;
  448. struct bridge_vlan_info *vinfo = NULL;
  449. struct nlattr *attr;
  450. int err = 0;
  451. int rem;
  452. nla_for_each_nested(attr, af_spec, rem) {
  453. if (nla_type(attr) != IFLA_BRIDGE_VLAN_INFO)
  454. continue;
  455. if (nla_len(attr) != sizeof(struct bridge_vlan_info))
  456. return -EINVAL;
  457. vinfo = nla_data(attr);
  458. if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK)
  459. return -EINVAL;
  460. if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
  461. if (vinfo_start)
  462. return -EINVAL;
  463. vinfo_start = vinfo;
  464. /* don't allow range of pvids */
  465. if (vinfo_start->flags & BRIDGE_VLAN_INFO_PVID)
  466. return -EINVAL;
  467. continue;
  468. }
  469. if (vinfo_start) {
  470. struct bridge_vlan_info tmp_vinfo;
  471. int v;
  472. if (!(vinfo->flags & BRIDGE_VLAN_INFO_RANGE_END))
  473. return -EINVAL;
  474. if (vinfo->vid <= vinfo_start->vid)
  475. return -EINVAL;
  476. memcpy(&tmp_vinfo, vinfo_start,
  477. sizeof(struct bridge_vlan_info));
  478. for (v = vinfo_start->vid; v <= vinfo->vid; v++) {
  479. tmp_vinfo.vid = v;
  480. err = br_vlan_info(br, p, cmd, &tmp_vinfo);
  481. if (err)
  482. break;
  483. }
  484. vinfo_start = NULL;
  485. } else {
  486. err = br_vlan_info(br, p, cmd, vinfo);
  487. }
  488. if (err)
  489. break;
  490. }
  491. return err;
  492. }
  493. static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
  494. [IFLA_BRPORT_STATE] = { .type = NLA_U8 },
  495. [IFLA_BRPORT_COST] = { .type = NLA_U32 },
  496. [IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 },
  497. [IFLA_BRPORT_MODE] = { .type = NLA_U8 },
  498. [IFLA_BRPORT_GUARD] = { .type = NLA_U8 },
  499. [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 },
  500. [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 },
  501. [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 },
  502. [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
  503. [IFLA_BRPORT_PROXYARP] = { .type = NLA_U8 },
  504. [IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 },
  505. [IFLA_BRPORT_MULTICAST_ROUTER] = { .type = NLA_U8 },
  506. };
  507. /* Change the state of the port and notify spanning tree */
  508. static int br_set_port_state(struct net_bridge_port *p, u8 state)
  509. {
  510. if (state > BR_STATE_BLOCKING)
  511. return -EINVAL;
  512. /* if kernel STP is running, don't allow changes */
  513. if (p->br->stp_enabled == BR_KERNEL_STP)
  514. return -EBUSY;
  515. /* if device is not up, change is not allowed
  516. * if link is not present, only allowable state is disabled
  517. */
  518. if (!netif_running(p->dev) ||
  519. (!netif_oper_up(p->dev) && state != BR_STATE_DISABLED))
  520. return -ENETDOWN;
  521. br_set_state(p, state);
  522. br_port_state_selection(p->br);
  523. return 0;
  524. }
  525. /* Set/clear or port flags based on attribute */
  526. static void br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[],
  527. int attrtype, unsigned long mask)
  528. {
  529. if (tb[attrtype]) {
  530. u8 flag = nla_get_u8(tb[attrtype]);
  531. if (flag)
  532. p->flags |= mask;
  533. else
  534. p->flags &= ~mask;
  535. }
  536. }
  537. /* Process bridge protocol info on port */
  538. static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
  539. {
  540. int err;
  541. unsigned long old_flags = p->flags;
  542. br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE);
  543. br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD);
  544. br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE);
  545. br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK);
  546. br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING);
  547. br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD);
  548. br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP);
  549. br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI);
  550. if (tb[IFLA_BRPORT_COST]) {
  551. err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST]));
  552. if (err)
  553. return err;
  554. }
  555. if (tb[IFLA_BRPORT_PRIORITY]) {
  556. err = br_stp_set_port_priority(p, nla_get_u16(tb[IFLA_BRPORT_PRIORITY]));
  557. if (err)
  558. return err;
  559. }
  560. if (tb[IFLA_BRPORT_STATE]) {
  561. err = br_set_port_state(p, nla_get_u8(tb[IFLA_BRPORT_STATE]));
  562. if (err)
  563. return err;
  564. }
  565. if (tb[IFLA_BRPORT_FLUSH])
  566. br_fdb_delete_by_port(p->br, p, 0, 0);
  567. #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
  568. if (tb[IFLA_BRPORT_MULTICAST_ROUTER]) {
  569. u8 mcast_router = nla_get_u8(tb[IFLA_BRPORT_MULTICAST_ROUTER]);
  570. err = br_multicast_set_port_router(p, mcast_router);
  571. if (err)
  572. return err;
  573. }
  574. #endif
  575. br_port_flags_change(p, old_flags ^ p->flags);
  576. return 0;
  577. }
  578. /* Change state and parameters on port. */
  579. int br_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags)
  580. {
  581. struct nlattr *protinfo;
  582. struct nlattr *afspec;
  583. struct net_bridge_port *p;
  584. struct nlattr *tb[IFLA_BRPORT_MAX + 1];
  585. int err = 0;
  586. protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_PROTINFO);
  587. afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
  588. if (!protinfo && !afspec)
  589. return 0;
  590. p = br_port_get_rtnl(dev);
  591. /* We want to accept dev as bridge itself if the AF_SPEC
  592. * is set to see if someone is setting vlan info on the bridge
  593. */
  594. if (!p && !afspec)
  595. return -EINVAL;
  596. if (p && protinfo) {
  597. if (protinfo->nla_type & NLA_F_NESTED) {
  598. err = nla_parse_nested(tb, IFLA_BRPORT_MAX,
  599. protinfo, br_port_policy);
  600. if (err)
  601. return err;
  602. spin_lock_bh(&p->br->lock);
  603. err = br_setport(p, tb);
  604. spin_unlock_bh(&p->br->lock);
  605. } else {
  606. /* Binary compatibility with old RSTP */
  607. if (nla_len(protinfo) < sizeof(u8))
  608. return -EINVAL;
  609. spin_lock_bh(&p->br->lock);
  610. err = br_set_port_state(p, nla_get_u8(protinfo));
  611. spin_unlock_bh(&p->br->lock);
  612. }
  613. if (err)
  614. goto out;
  615. }
  616. if (afspec) {
  617. err = br_afspec((struct net_bridge *)netdev_priv(dev), p,
  618. afspec, RTM_SETLINK);
  619. }
  620. if (err == 0)
  621. br_ifinfo_notify(RTM_NEWLINK, p);
  622. out:
  623. return err;
  624. }
  625. /* Delete port information */
  626. int br_dellink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags)
  627. {
  628. struct nlattr *afspec;
  629. struct net_bridge_port *p;
  630. int err = 0;
  631. afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
  632. if (!afspec)
  633. return 0;
  634. p = br_port_get_rtnl(dev);
  635. /* We want to accept dev as bridge itself as well */
  636. if (!p && !(dev->priv_flags & IFF_EBRIDGE))
  637. return -EINVAL;
  638. err = br_afspec((struct net_bridge *)netdev_priv(dev), p,
  639. afspec, RTM_DELLINK);
  640. if (err == 0)
  641. /* Send RTM_NEWLINK because userspace
  642. * expects RTM_NEWLINK for vlan dels
  643. */
  644. br_ifinfo_notify(RTM_NEWLINK, p);
  645. return err;
  646. }
  647. static int br_validate(struct nlattr *tb[], struct nlattr *data[])
  648. {
  649. if (tb[IFLA_ADDRESS]) {
  650. if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
  651. return -EINVAL;
  652. if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
  653. return -EADDRNOTAVAIL;
  654. }
  655. if (!data)
  656. return 0;
  657. #ifdef CONFIG_BRIDGE_VLAN_FILTERING
  658. if (data[IFLA_BR_VLAN_PROTOCOL]) {
  659. switch (nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL])) {
  660. case htons(ETH_P_8021Q):
  661. case htons(ETH_P_8021AD):
  662. break;
  663. default:
  664. return -EPROTONOSUPPORT;
  665. }
  666. }
  667. #endif
  668. return 0;
  669. }
  670. static int br_dev_newlink(struct net *src_net, struct net_device *dev,
  671. struct nlattr *tb[], struct nlattr *data[])
  672. {
  673. struct net_bridge *br = netdev_priv(dev);
  674. if (tb[IFLA_ADDRESS]) {
  675. spin_lock_bh(&br->lock);
  676. br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
  677. spin_unlock_bh(&br->lock);
  678. }
  679. return register_netdevice(dev);
  680. }
  681. static int br_port_slave_changelink(struct net_device *brdev,
  682. struct net_device *dev,
  683. struct nlattr *tb[],
  684. struct nlattr *data[])
  685. {
  686. struct net_bridge *br = netdev_priv(brdev);
  687. int ret;
  688. if (!data)
  689. return 0;
  690. spin_lock_bh(&br->lock);
  691. ret = br_setport(br_port_get_rtnl(dev), data);
  692. spin_unlock_bh(&br->lock);
  693. return ret;
  694. }
  695. static int br_port_fill_slave_info(struct sk_buff *skb,
  696. const struct net_device *brdev,
  697. const struct net_device *dev)
  698. {
  699. return br_port_fill_attrs(skb, br_port_get_rtnl(dev));
  700. }
  701. static size_t br_port_get_slave_size(const struct net_device *brdev,
  702. const struct net_device *dev)
  703. {
  704. return br_port_info_size();
  705. }
  706. static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = {
  707. [IFLA_BR_FORWARD_DELAY] = { .type = NLA_U32 },
  708. [IFLA_BR_HELLO_TIME] = { .type = NLA_U32 },
  709. [IFLA_BR_MAX_AGE] = { .type = NLA_U32 },
  710. [IFLA_BR_AGEING_TIME] = { .type = NLA_U32 },
  711. [IFLA_BR_STP_STATE] = { .type = NLA_U32 },
  712. [IFLA_BR_PRIORITY] = { .type = NLA_U16 },
  713. [IFLA_BR_VLAN_FILTERING] = { .type = NLA_U8 },
  714. [IFLA_BR_VLAN_PROTOCOL] = { .type = NLA_U16 },
  715. [IFLA_BR_GROUP_FWD_MASK] = { .type = NLA_U16 },
  716. [IFLA_BR_GROUP_ADDR] = { .type = NLA_BINARY,
  717. .len = ETH_ALEN },
  718. [IFLA_BR_MCAST_ROUTER] = { .type = NLA_U8 },
  719. [IFLA_BR_MCAST_SNOOPING] = { .type = NLA_U8 },
  720. [IFLA_BR_MCAST_QUERY_USE_IFADDR] = { .type = NLA_U8 },
  721. [IFLA_BR_MCAST_QUERIER] = { .type = NLA_U8 },
  722. [IFLA_BR_MCAST_HASH_ELASTICITY] = { .type = NLA_U32 },
  723. [IFLA_BR_MCAST_HASH_MAX] = { .type = NLA_U32 },
  724. [IFLA_BR_MCAST_LAST_MEMBER_CNT] = { .type = NLA_U32 },
  725. [IFLA_BR_MCAST_STARTUP_QUERY_CNT] = { .type = NLA_U32 },
  726. [IFLA_BR_MCAST_LAST_MEMBER_INTVL] = { .type = NLA_U64 },
  727. [IFLA_BR_MCAST_MEMBERSHIP_INTVL] = { .type = NLA_U64 },
  728. [IFLA_BR_MCAST_QUERIER_INTVL] = { .type = NLA_U64 },
  729. [IFLA_BR_MCAST_QUERY_INTVL] = { .type = NLA_U64 },
  730. [IFLA_BR_MCAST_QUERY_RESPONSE_INTVL] = { .type = NLA_U64 },
  731. [IFLA_BR_MCAST_STARTUP_QUERY_INTVL] = { .type = NLA_U64 },
  732. [IFLA_BR_NF_CALL_IPTABLES] = { .type = NLA_U8 },
  733. [IFLA_BR_NF_CALL_IP6TABLES] = { .type = NLA_U8 },
  734. [IFLA_BR_NF_CALL_ARPTABLES] = { .type = NLA_U8 },
  735. [IFLA_BR_VLAN_DEFAULT_PVID] = { .type = NLA_U16 },
  736. [IFLA_BR_VLAN_STATS_ENABLED] = { .type = NLA_U8 },
  737. [IFLA_BR_MCAST_STATS_ENABLED] = { .type = NLA_U8 },
  738. };
  739. static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
  740. struct nlattr *data[])
  741. {
  742. struct net_bridge *br = netdev_priv(brdev);
  743. int err;
  744. if (!data)
  745. return 0;
  746. if (data[IFLA_BR_FORWARD_DELAY]) {
  747. err = br_set_forward_delay(br, nla_get_u32(data[IFLA_BR_FORWARD_DELAY]));
  748. if (err)
  749. return err;
  750. }
  751. if (data[IFLA_BR_HELLO_TIME]) {
  752. err = br_set_hello_time(br, nla_get_u32(data[IFLA_BR_HELLO_TIME]));
  753. if (err)
  754. return err;
  755. }
  756. if (data[IFLA_BR_MAX_AGE]) {
  757. err = br_set_max_age(br, nla_get_u32(data[IFLA_BR_MAX_AGE]));
  758. if (err)
  759. return err;
  760. }
  761. if (data[IFLA_BR_AGEING_TIME]) {
  762. err = br_set_ageing_time(br, nla_get_u32(data[IFLA_BR_AGEING_TIME]));
  763. if (err)
  764. return err;
  765. }
  766. if (data[IFLA_BR_STP_STATE]) {
  767. u32 stp_enabled = nla_get_u32(data[IFLA_BR_STP_STATE]);
  768. br_stp_set_enabled(br, stp_enabled);
  769. }
  770. if (data[IFLA_BR_PRIORITY]) {
  771. u32 priority = nla_get_u16(data[IFLA_BR_PRIORITY]);
  772. br_stp_set_bridge_priority(br, priority);
  773. }
  774. if (data[IFLA_BR_VLAN_FILTERING]) {
  775. u8 vlan_filter = nla_get_u8(data[IFLA_BR_VLAN_FILTERING]);
  776. err = __br_vlan_filter_toggle(br, vlan_filter);
  777. if (err)
  778. return err;
  779. }
  780. #ifdef CONFIG_BRIDGE_VLAN_FILTERING
  781. if (data[IFLA_BR_VLAN_PROTOCOL]) {
  782. __be16 vlan_proto = nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL]);
  783. err = __br_vlan_set_proto(br, vlan_proto);
  784. if (err)
  785. return err;
  786. }
  787. if (data[IFLA_BR_VLAN_DEFAULT_PVID]) {
  788. __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]);
  789. err = __br_vlan_set_default_pvid(br, defpvid);
  790. if (err)
  791. return err;
  792. }
  793. if (data[IFLA_BR_VLAN_STATS_ENABLED]) {
  794. __u8 vlan_stats = nla_get_u8(data[IFLA_BR_VLAN_STATS_ENABLED]);
  795. err = br_vlan_set_stats(br, vlan_stats);
  796. if (err)
  797. return err;
  798. }
  799. #endif
  800. if (data[IFLA_BR_GROUP_FWD_MASK]) {
  801. u16 fwd_mask = nla_get_u16(data[IFLA_BR_GROUP_FWD_MASK]);
  802. if (fwd_mask & BR_GROUPFWD_RESTRICTED)
  803. return -EINVAL;
  804. br->group_fwd_mask = fwd_mask;
  805. }
  806. if (data[IFLA_BR_GROUP_ADDR]) {
  807. u8 new_addr[ETH_ALEN];
  808. if (nla_len(data[IFLA_BR_GROUP_ADDR]) != ETH_ALEN)
  809. return -EINVAL;
  810. memcpy(new_addr, nla_data(data[IFLA_BR_GROUP_ADDR]), ETH_ALEN);
  811. if (!is_link_local_ether_addr(new_addr))
  812. return -EINVAL;
  813. if (new_addr[5] == 1 || /* 802.3x Pause address */
  814. new_addr[5] == 2 || /* 802.3ad Slow protocols */
  815. new_addr[5] == 3) /* 802.1X PAE address */
  816. return -EINVAL;
  817. spin_lock_bh(&br->lock);
  818. memcpy(br->group_addr, new_addr, sizeof(br->group_addr));
  819. spin_unlock_bh(&br->lock);
  820. br->group_addr_set = true;
  821. br_recalculate_fwd_mask(br);
  822. }
  823. if (data[IFLA_BR_FDB_FLUSH])
  824. br_fdb_flush(br);
  825. #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
  826. if (data[IFLA_BR_MCAST_ROUTER]) {
  827. u8 multicast_router = nla_get_u8(data[IFLA_BR_MCAST_ROUTER]);
  828. err = br_multicast_set_router(br, multicast_router);
  829. if (err)
  830. return err;
  831. }
  832. if (data[IFLA_BR_MCAST_SNOOPING]) {
  833. u8 mcast_snooping = nla_get_u8(data[IFLA_BR_MCAST_SNOOPING]);
  834. err = br_multicast_toggle(br, mcast_snooping);
  835. if (err)
  836. return err;
  837. }
  838. if (data[IFLA_BR_MCAST_QUERY_USE_IFADDR]) {
  839. u8 val;
  840. val = nla_get_u8(data[IFLA_BR_MCAST_QUERY_USE_IFADDR]);
  841. br->multicast_query_use_ifaddr = !!val;
  842. }
  843. if (data[IFLA_BR_MCAST_QUERIER]) {
  844. u8 mcast_querier = nla_get_u8(data[IFLA_BR_MCAST_QUERIER]);
  845. err = br_multicast_set_querier(br, mcast_querier);
  846. if (err)
  847. return err;
  848. }
  849. if (data[IFLA_BR_MCAST_HASH_ELASTICITY]) {
  850. u32 val = nla_get_u32(data[IFLA_BR_MCAST_HASH_ELASTICITY]);
  851. br->hash_elasticity = val;
  852. }
  853. if (data[IFLA_BR_MCAST_HASH_MAX]) {
  854. u32 hash_max = nla_get_u32(data[IFLA_BR_MCAST_HASH_MAX]);
  855. err = br_multicast_set_hash_max(br, hash_max);
  856. if (err)
  857. return err;
  858. }
  859. if (data[IFLA_BR_MCAST_LAST_MEMBER_CNT]) {
  860. u32 val = nla_get_u32(data[IFLA_BR_MCAST_LAST_MEMBER_CNT]);
  861. br->multicast_last_member_count = val;
  862. }
  863. if (data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]) {
  864. u32 val = nla_get_u32(data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]);
  865. br->multicast_startup_query_count = val;
  866. }
  867. if (data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]) {
  868. u64 val = nla_get_u64(data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]);
  869. br->multicast_last_member_interval = clock_t_to_jiffies(val);
  870. }
  871. if (data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]) {
  872. u64 val = nla_get_u64(data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]);
  873. br->multicast_membership_interval = clock_t_to_jiffies(val);
  874. }
  875. if (data[IFLA_BR_MCAST_QUERIER_INTVL]) {
  876. u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERIER_INTVL]);
  877. br->multicast_querier_interval = clock_t_to_jiffies(val);
  878. }
  879. if (data[IFLA_BR_MCAST_QUERY_INTVL]) {
  880. u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_INTVL]);
  881. br->multicast_query_interval = clock_t_to_jiffies(val);
  882. }
  883. if (data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]) {
  884. u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]);
  885. br->multicast_query_response_interval = clock_t_to_jiffies(val);
  886. }
  887. if (data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]) {
  888. u64 val = nla_get_u64(data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]);
  889. br->multicast_startup_query_interval = clock_t_to_jiffies(val);
  890. }
  891. if (data[IFLA_BR_MCAST_STATS_ENABLED]) {
  892. __u8 mcast_stats;
  893. mcast_stats = nla_get_u8(data[IFLA_BR_MCAST_STATS_ENABLED]);
  894. br->multicast_stats_enabled = !!mcast_stats;
  895. }
  896. #endif
  897. #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
  898. if (data[IFLA_BR_NF_CALL_IPTABLES]) {
  899. u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IPTABLES]);
  900. br->nf_call_iptables = val ? true : false;
  901. }
  902. if (data[IFLA_BR_NF_CALL_IP6TABLES]) {
  903. u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IP6TABLES]);
  904. br->nf_call_ip6tables = val ? true : false;
  905. }
  906. if (data[IFLA_BR_NF_CALL_ARPTABLES]) {
  907. u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_ARPTABLES]);
  908. br->nf_call_arptables = val ? true : false;
  909. }
  910. #endif
  911. return 0;
  912. }
  913. static size_t br_get_size(const struct net_device *brdev)
  914. {
  915. return nla_total_size(sizeof(u32)) + /* IFLA_BR_FORWARD_DELAY */
  916. nla_total_size(sizeof(u32)) + /* IFLA_BR_HELLO_TIME */
  917. nla_total_size(sizeof(u32)) + /* IFLA_BR_MAX_AGE */
  918. nla_total_size(sizeof(u32)) + /* IFLA_BR_AGEING_TIME */
  919. nla_total_size(sizeof(u32)) + /* IFLA_BR_STP_STATE */
  920. nla_total_size(sizeof(u16)) + /* IFLA_BR_PRIORITY */
  921. nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_FILTERING */
  922. #ifdef CONFIG_BRIDGE_VLAN_FILTERING
  923. nla_total_size(sizeof(__be16)) + /* IFLA_BR_VLAN_PROTOCOL */
  924. nla_total_size(sizeof(u16)) + /* IFLA_BR_VLAN_DEFAULT_PVID */
  925. nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_STATS_ENABLED */
  926. #endif
  927. nla_total_size(sizeof(u16)) + /* IFLA_BR_GROUP_FWD_MASK */
  928. nla_total_size(sizeof(struct ifla_bridge_id)) + /* IFLA_BR_ROOT_ID */
  929. nla_total_size(sizeof(struct ifla_bridge_id)) + /* IFLA_BR_BRIDGE_ID */
  930. nla_total_size(sizeof(u16)) + /* IFLA_BR_ROOT_PORT */
  931. nla_total_size(sizeof(u32)) + /* IFLA_BR_ROOT_PATH_COST */
  932. nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE */
  933. nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE_DETECTED */
  934. nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_HELLO_TIMER */
  935. nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TCN_TIMER */
  936. nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TOPOLOGY_CHANGE_TIMER */
  937. nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_GC_TIMER */
  938. nla_total_size(ETH_ALEN) + /* IFLA_BR_GROUP_ADDR */
  939. #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
  940. nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_ROUTER */
  941. nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_SNOOPING */
  942. nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_QUERY_USE_IFADDR */
  943. nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_QUERIER */
  944. nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_STATS_ENABLED */
  945. nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_ELASTICITY */
  946. nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_MAX */
  947. nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_LAST_MEMBER_CNT */
  948. nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_STARTUP_QUERY_CNT */
  949. nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_LAST_MEMBER_INTVL */
  950. nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_MEMBERSHIP_INTVL */
  951. nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERIER_INTVL */
  952. nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_INTVL */
  953. nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_RESPONSE_INTVL */
  954. nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_STARTUP_QUERY_INTVL */
  955. #endif
  956. #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
  957. nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IPTABLES */
  958. nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IP6TABLES */
  959. nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_ARPTABLES */
  960. #endif
  961. 0;
  962. }
  963. static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
  964. {
  965. struct net_bridge *br = netdev_priv(brdev);
  966. u32 forward_delay = jiffies_to_clock_t(br->forward_delay);
  967. u32 hello_time = jiffies_to_clock_t(br->hello_time);
  968. u32 age_time = jiffies_to_clock_t(br->max_age);
  969. u32 ageing_time = jiffies_to_clock_t(br->ageing_time);
  970. u32 stp_enabled = br->stp_enabled;
  971. u16 priority = (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1];
  972. u8 vlan_enabled = br_vlan_enabled(br);
  973. u64 clockval;
  974. clockval = br_timer_value(&br->hello_timer);
  975. if (nla_put_u64_64bit(skb, IFLA_BR_HELLO_TIMER, clockval, IFLA_BR_PAD))
  976. return -EMSGSIZE;
  977. clockval = br_timer_value(&br->tcn_timer);
  978. if (nla_put_u64_64bit(skb, IFLA_BR_TCN_TIMER, clockval, IFLA_BR_PAD))
  979. return -EMSGSIZE;
  980. clockval = br_timer_value(&br->topology_change_timer);
  981. if (nla_put_u64_64bit(skb, IFLA_BR_TOPOLOGY_CHANGE_TIMER, clockval,
  982. IFLA_BR_PAD))
  983. return -EMSGSIZE;
  984. clockval = br_timer_value(&br->gc_timer);
  985. if (nla_put_u64_64bit(skb, IFLA_BR_GC_TIMER, clockval, IFLA_BR_PAD))
  986. return -EMSGSIZE;
  987. if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) ||
  988. nla_put_u32(skb, IFLA_BR_HELLO_TIME, hello_time) ||
  989. nla_put_u32(skb, IFLA_BR_MAX_AGE, age_time) ||
  990. nla_put_u32(skb, IFLA_BR_AGEING_TIME, ageing_time) ||
  991. nla_put_u32(skb, IFLA_BR_STP_STATE, stp_enabled) ||
  992. nla_put_u16(skb, IFLA_BR_PRIORITY, priority) ||
  993. nla_put_u8(skb, IFLA_BR_VLAN_FILTERING, vlan_enabled) ||
  994. nla_put_u16(skb, IFLA_BR_GROUP_FWD_MASK, br->group_fwd_mask) ||
  995. nla_put(skb, IFLA_BR_BRIDGE_ID, sizeof(struct ifla_bridge_id),
  996. &br->bridge_id) ||
  997. nla_put(skb, IFLA_BR_ROOT_ID, sizeof(struct ifla_bridge_id),
  998. &br->designated_root) ||
  999. nla_put_u16(skb, IFLA_BR_ROOT_PORT, br->root_port) ||
  1000. nla_put_u32(skb, IFLA_BR_ROOT_PATH_COST, br->root_path_cost) ||
  1001. nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE, br->topology_change) ||
  1002. nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
  1003. br->topology_change_detected) ||
  1004. nla_put(skb, IFLA_BR_GROUP_ADDR, ETH_ALEN, br->group_addr))
  1005. return -EMSGSIZE;
  1006. #ifdef CONFIG_BRIDGE_VLAN_FILTERING
  1007. if (nla_put_be16(skb, IFLA_BR_VLAN_PROTOCOL, br->vlan_proto) ||
  1008. nla_put_u16(skb, IFLA_BR_VLAN_DEFAULT_PVID, br->default_pvid) ||
  1009. nla_put_u8(skb, IFLA_BR_VLAN_STATS_ENABLED, br->vlan_stats_enabled))
  1010. return -EMSGSIZE;
  1011. #endif
  1012. #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
  1013. if (nla_put_u8(skb, IFLA_BR_MCAST_ROUTER, br->multicast_router) ||
  1014. nla_put_u8(skb, IFLA_BR_MCAST_SNOOPING, !br->multicast_disabled) ||
  1015. nla_put_u8(skb, IFLA_BR_MCAST_QUERY_USE_IFADDR,
  1016. br->multicast_query_use_ifaddr) ||
  1017. nla_put_u8(skb, IFLA_BR_MCAST_QUERIER, br->multicast_querier) ||
  1018. nla_put_u8(skb, IFLA_BR_MCAST_STATS_ENABLED,
  1019. br->multicast_stats_enabled) ||
  1020. nla_put_u32(skb, IFLA_BR_MCAST_HASH_ELASTICITY,
  1021. br->hash_elasticity) ||
  1022. nla_put_u32(skb, IFLA_BR_MCAST_HASH_MAX, br->hash_max) ||
  1023. nla_put_u32(skb, IFLA_BR_MCAST_LAST_MEMBER_CNT,
  1024. br->multicast_last_member_count) ||
  1025. nla_put_u32(skb, IFLA_BR_MCAST_STARTUP_QUERY_CNT,
  1026. br->multicast_startup_query_count))
  1027. return -EMSGSIZE;
  1028. clockval = jiffies_to_clock_t(br->multicast_last_member_interval);
  1029. if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_LAST_MEMBER_INTVL, clockval,
  1030. IFLA_BR_PAD))
  1031. return -EMSGSIZE;
  1032. clockval = jiffies_to_clock_t(br->multicast_membership_interval);
  1033. if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_MEMBERSHIP_INTVL, clockval,
  1034. IFLA_BR_PAD))
  1035. return -EMSGSIZE;
  1036. clockval = jiffies_to_clock_t(br->multicast_querier_interval);
  1037. if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERIER_INTVL, clockval,
  1038. IFLA_BR_PAD))
  1039. return -EMSGSIZE;
  1040. clockval = jiffies_to_clock_t(br->multicast_query_interval);
  1041. if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_INTVL, clockval,
  1042. IFLA_BR_PAD))
  1043. return -EMSGSIZE;
  1044. clockval = jiffies_to_clock_t(br->multicast_query_response_interval);
  1045. if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, clockval,
  1046. IFLA_BR_PAD))
  1047. return -EMSGSIZE;
  1048. clockval = jiffies_to_clock_t(br->multicast_startup_query_interval);
  1049. if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_STARTUP_QUERY_INTVL, clockval,
  1050. IFLA_BR_PAD))
  1051. return -EMSGSIZE;
  1052. #endif
  1053. #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
  1054. if (nla_put_u8(skb, IFLA_BR_NF_CALL_IPTABLES,
  1055. br->nf_call_iptables ? 1 : 0) ||
  1056. nla_put_u8(skb, IFLA_BR_NF_CALL_IP6TABLES,
  1057. br->nf_call_ip6tables ? 1 : 0) ||
  1058. nla_put_u8(skb, IFLA_BR_NF_CALL_ARPTABLES,
  1059. br->nf_call_arptables ? 1 : 0))
  1060. return -EMSGSIZE;
  1061. #endif
  1062. return 0;
  1063. }
  1064. static size_t bridge_get_linkxstats_size(const struct net_device *dev)
  1065. {
  1066. struct net_bridge *br = netdev_priv(dev);
  1067. struct net_bridge_vlan_group *vg;
  1068. struct net_bridge_vlan *v;
  1069. int numvls = 0;
  1070. vg = br_vlan_group(br);
  1071. if (vg) {
  1072. /* we need to count all, even placeholder entries */
  1073. list_for_each_entry(v, &vg->vlan_list, vlist)
  1074. numvls++;
  1075. }
  1076. return numvls * nla_total_size(sizeof(struct bridge_vlan_xstats)) +
  1077. nla_total_size(sizeof(struct br_mcast_stats)) +
  1078. nla_total_size(0);
  1079. }
  1080. static size_t brport_get_linkxstats_size(const struct net_device *dev)
  1081. {
  1082. return nla_total_size(sizeof(struct br_mcast_stats)) +
  1083. nla_total_size(0);
  1084. }
  1085. static size_t br_get_linkxstats_size(const struct net_device *dev, int attr)
  1086. {
  1087. size_t retsize = 0;
  1088. switch (attr) {
  1089. case IFLA_STATS_LINK_XSTATS:
  1090. retsize = bridge_get_linkxstats_size(dev);
  1091. break;
  1092. case IFLA_STATS_LINK_XSTATS_SLAVE:
  1093. retsize = brport_get_linkxstats_size(dev);
  1094. break;
  1095. }
  1096. return retsize;
  1097. }
  1098. static int bridge_fill_linkxstats(struct sk_buff *skb,
  1099. const struct net_device *dev,
  1100. int *prividx)
  1101. {
  1102. struct net_bridge *br = netdev_priv(dev);
  1103. struct nlattr *nla __maybe_unused;
  1104. struct net_bridge_vlan_group *vg;
  1105. struct net_bridge_vlan *v;
  1106. struct nlattr *nest;
  1107. int vl_idx = 0;
  1108. nest = nla_nest_start(skb, LINK_XSTATS_TYPE_BRIDGE);
  1109. if (!nest)
  1110. return -EMSGSIZE;
  1111. vg = br_vlan_group(br);
  1112. if (vg) {
  1113. list_for_each_entry(v, &vg->vlan_list, vlist) {
  1114. struct bridge_vlan_xstats vxi;
  1115. struct br_vlan_stats stats;
  1116. if (++vl_idx < *prividx)
  1117. continue;
  1118. memset(&vxi, 0, sizeof(vxi));
  1119. vxi.vid = v->vid;
  1120. br_vlan_get_stats(v, &stats);
  1121. vxi.rx_bytes = stats.rx_bytes;
  1122. vxi.rx_packets = stats.rx_packets;
  1123. vxi.tx_bytes = stats.tx_bytes;
  1124. vxi.tx_packets = stats.tx_packets;
  1125. if (nla_put(skb, BRIDGE_XSTATS_VLAN, sizeof(vxi), &vxi))
  1126. goto nla_put_failure;
  1127. }
  1128. }
  1129. #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
  1130. if (++vl_idx >= *prividx) {
  1131. nla = nla_reserve_64bit(skb, BRIDGE_XSTATS_MCAST,
  1132. sizeof(struct br_mcast_stats),
  1133. BRIDGE_XSTATS_PAD);
  1134. if (!nla)
  1135. goto nla_put_failure;
  1136. br_multicast_get_stats(br, NULL, nla_data(nla));
  1137. }
  1138. #endif
  1139. nla_nest_end(skb, nest);
  1140. *prividx = 0;
  1141. return 0;
  1142. nla_put_failure:
  1143. nla_nest_end(skb, nest);
  1144. *prividx = vl_idx;
  1145. return -EMSGSIZE;
  1146. }
  1147. static int brport_fill_linkxstats(struct sk_buff *skb,
  1148. const struct net_device *dev,
  1149. int *prividx)
  1150. {
  1151. struct net_bridge_port *p = br_port_get_rtnl(dev);
  1152. struct nlattr *nla __maybe_unused;
  1153. struct nlattr *nest;
  1154. if (!p)
  1155. return 0;
  1156. nest = nla_nest_start(skb, LINK_XSTATS_TYPE_BRIDGE);
  1157. if (!nest)
  1158. return -EMSGSIZE;
  1159. #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
  1160. nla = nla_reserve_64bit(skb, BRIDGE_XSTATS_MCAST,
  1161. sizeof(struct br_mcast_stats),
  1162. BRIDGE_XSTATS_PAD);
  1163. if (!nla) {
  1164. nla_nest_end(skb, nest);
  1165. return -EMSGSIZE;
  1166. }
  1167. br_multicast_get_stats(p->br, p, nla_data(nla));
  1168. #endif
  1169. nla_nest_end(skb, nest);
  1170. return 0;
  1171. }
  1172. static int br_fill_linkxstats(struct sk_buff *skb, const struct net_device *dev,
  1173. int *prividx, int attr)
  1174. {
  1175. int ret = -EINVAL;
  1176. switch (attr) {
  1177. case IFLA_STATS_LINK_XSTATS:
  1178. ret = bridge_fill_linkxstats(skb, dev, prividx);
  1179. break;
  1180. case IFLA_STATS_LINK_XSTATS_SLAVE:
  1181. ret = brport_fill_linkxstats(skb, dev, prividx);
  1182. break;
  1183. }
  1184. return ret;
  1185. }
  1186. static struct rtnl_af_ops br_af_ops __read_mostly = {
  1187. .family = AF_BRIDGE,
  1188. .get_link_af_size = br_get_link_af_size_filtered,
  1189. };
  1190. struct rtnl_link_ops br_link_ops __read_mostly = {
  1191. .kind = "bridge",
  1192. .priv_size = sizeof(struct net_bridge),
  1193. .setup = br_dev_setup,
  1194. .maxtype = IFLA_BR_MAX,
  1195. .policy = br_policy,
  1196. .validate = br_validate,
  1197. .newlink = br_dev_newlink,
  1198. .changelink = br_changelink,
  1199. .dellink = br_dev_delete,
  1200. .get_size = br_get_size,
  1201. .fill_info = br_fill_info,
  1202. .fill_linkxstats = br_fill_linkxstats,
  1203. .get_linkxstats_size = br_get_linkxstats_size,
  1204. .slave_maxtype = IFLA_BRPORT_MAX,
  1205. .slave_policy = br_port_policy,
  1206. .slave_changelink = br_port_slave_changelink,
  1207. .get_slave_size = br_port_get_slave_size,
  1208. .fill_slave_info = br_port_fill_slave_info,
  1209. };
  1210. int __init br_netlink_init(void)
  1211. {
  1212. int err;
  1213. br_mdb_init();
  1214. rtnl_af_register(&br_af_ops);
  1215. err = rtnl_link_register(&br_link_ops);
  1216. if (err)
  1217. goto out_af;
  1218. return 0;
  1219. out_af:
  1220. rtnl_af_unregister(&br_af_ops);
  1221. br_mdb_uninit();
  1222. return err;
  1223. }
  1224. void br_netlink_fini(void)
  1225. {
  1226. br_mdb_uninit();
  1227. rtnl_af_unregister(&br_af_ops);
  1228. rtnl_link_unregister(&br_link_ops);
  1229. }