bond_netlink.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614
  1. /*
  2. * drivers/net/bond/bond_netlink.c - Netlink interface for bonding
  3. * Copyright (c) 2013 Jiri Pirko <jiri@resnulli.us>
  4. * Copyright (c) 2013 Scott Feldman <sfeldma@cumulusnetworks.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/errno.h>
  13. #include <linux/netdevice.h>
  14. #include <linux/etherdevice.h>
  15. #include <linux/if_link.h>
  16. #include <linux/if_ether.h>
  17. #include <net/netlink.h>
  18. #include <net/rtnetlink.h>
  19. #include "bonding.h"
  20. static size_t bond_get_slave_size(const struct net_device *bond_dev,
  21. const struct net_device *slave_dev)
  22. {
  23. return nla_total_size(sizeof(u8)) + /* IFLA_BOND_SLAVE_STATE */
  24. nla_total_size(sizeof(u8)) + /* IFLA_BOND_SLAVE_MII_STATUS */
  25. nla_total_size(sizeof(u32)) + /* IFLA_BOND_SLAVE_LINK_FAILURE_COUNT */
  26. nla_total_size(MAX_ADDR_LEN) + /* IFLA_BOND_SLAVE_PERM_HWADDR */
  27. nla_total_size(sizeof(u16)) + /* IFLA_BOND_SLAVE_QUEUE_ID */
  28. nla_total_size(sizeof(u16)) + /* IFLA_BOND_SLAVE_AD_AGGREGATOR_ID */
  29. 0;
  30. }
  31. static int bond_fill_slave_info(struct sk_buff *skb,
  32. const struct net_device *bond_dev,
  33. const struct net_device *slave_dev)
  34. {
  35. struct slave *slave = bond_slave_get_rtnl(slave_dev);
  36. if (nla_put_u8(skb, IFLA_BOND_SLAVE_STATE, bond_slave_state(slave)))
  37. goto nla_put_failure;
  38. if (nla_put_u8(skb, IFLA_BOND_SLAVE_MII_STATUS, slave->link))
  39. goto nla_put_failure;
  40. if (nla_put_u32(skb, IFLA_BOND_SLAVE_LINK_FAILURE_COUNT,
  41. slave->link_failure_count))
  42. goto nla_put_failure;
  43. if (nla_put(skb, IFLA_BOND_SLAVE_PERM_HWADDR,
  44. slave_dev->addr_len, slave->perm_hwaddr))
  45. goto nla_put_failure;
  46. if (nla_put_u16(skb, IFLA_BOND_SLAVE_QUEUE_ID, slave->queue_id))
  47. goto nla_put_failure;
  48. if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
  49. const struct aggregator *agg;
  50. agg = SLAVE_AD_INFO(slave)->port.aggregator;
  51. if (agg)
  52. if (nla_put_u16(skb, IFLA_BOND_SLAVE_AD_AGGREGATOR_ID,
  53. agg->aggregator_identifier))
  54. goto nla_put_failure;
  55. }
  56. return 0;
  57. nla_put_failure:
  58. return -EMSGSIZE;
  59. }
  60. static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
  61. [IFLA_BOND_MODE] = { .type = NLA_U8 },
  62. [IFLA_BOND_ACTIVE_SLAVE] = { .type = NLA_U32 },
  63. [IFLA_BOND_MIIMON] = { .type = NLA_U32 },
  64. [IFLA_BOND_UPDELAY] = { .type = NLA_U32 },
  65. [IFLA_BOND_DOWNDELAY] = { .type = NLA_U32 },
  66. [IFLA_BOND_USE_CARRIER] = { .type = NLA_U8 },
  67. [IFLA_BOND_ARP_INTERVAL] = { .type = NLA_U32 },
  68. [IFLA_BOND_ARP_IP_TARGET] = { .type = NLA_NESTED },
  69. [IFLA_BOND_ARP_VALIDATE] = { .type = NLA_U32 },
  70. [IFLA_BOND_ARP_ALL_TARGETS] = { .type = NLA_U32 },
  71. [IFLA_BOND_PRIMARY] = { .type = NLA_U32 },
  72. [IFLA_BOND_PRIMARY_RESELECT] = { .type = NLA_U8 },
  73. [IFLA_BOND_FAIL_OVER_MAC] = { .type = NLA_U8 },
  74. [IFLA_BOND_XMIT_HASH_POLICY] = { .type = NLA_U8 },
  75. [IFLA_BOND_RESEND_IGMP] = { .type = NLA_U32 },
  76. [IFLA_BOND_NUM_PEER_NOTIF] = { .type = NLA_U8 },
  77. [IFLA_BOND_ALL_SLAVES_ACTIVE] = { .type = NLA_U8 },
  78. [IFLA_BOND_MIN_LINKS] = { .type = NLA_U32 },
  79. [IFLA_BOND_LP_INTERVAL] = { .type = NLA_U32 },
  80. [IFLA_BOND_PACKETS_PER_SLAVE] = { .type = NLA_U32 },
  81. [IFLA_BOND_AD_LACP_RATE] = { .type = NLA_U8 },
  82. [IFLA_BOND_AD_SELECT] = { .type = NLA_U8 },
  83. [IFLA_BOND_AD_INFO] = { .type = NLA_NESTED },
  84. };
  85. static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = {
  86. [IFLA_BOND_SLAVE_QUEUE_ID] = { .type = NLA_U16 },
  87. };
  88. static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
  89. {
  90. if (tb[IFLA_ADDRESS]) {
  91. if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
  92. return -EINVAL;
  93. if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
  94. return -EADDRNOTAVAIL;
  95. }
  96. return 0;
  97. }
  98. static int bond_slave_changelink(struct net_device *bond_dev,
  99. struct net_device *slave_dev,
  100. struct nlattr *tb[], struct nlattr *data[])
  101. {
  102. struct bonding *bond = netdev_priv(bond_dev);
  103. struct bond_opt_value newval;
  104. int err;
  105. if (!data)
  106. return 0;
  107. if (data[IFLA_BOND_SLAVE_QUEUE_ID]) {
  108. u16 queue_id = nla_get_u16(data[IFLA_BOND_SLAVE_QUEUE_ID]);
  109. char queue_id_str[IFNAMSIZ + 7];
  110. /* queue_id option setting expects slave_name:queue_id */
  111. snprintf(queue_id_str, sizeof(queue_id_str), "%s:%u\n",
  112. slave_dev->name, queue_id);
  113. bond_opt_initstr(&newval, queue_id_str);
  114. err = __bond_opt_set(bond, BOND_OPT_QUEUE_ID, &newval);
  115. if (err)
  116. return err;
  117. }
  118. return 0;
  119. }
  120. static int bond_changelink(struct net_device *bond_dev,
  121. struct nlattr *tb[], struct nlattr *data[])
  122. {
  123. struct bonding *bond = netdev_priv(bond_dev);
  124. struct bond_opt_value newval;
  125. int miimon = 0;
  126. int err;
  127. if (!data)
  128. return 0;
  129. if (data[IFLA_BOND_MODE]) {
  130. int mode = nla_get_u8(data[IFLA_BOND_MODE]);
  131. bond_opt_initval(&newval, mode);
  132. err = __bond_opt_set(bond, BOND_OPT_MODE, &newval);
  133. if (err)
  134. return err;
  135. }
  136. if (data[IFLA_BOND_ACTIVE_SLAVE]) {
  137. int ifindex = nla_get_u32(data[IFLA_BOND_ACTIVE_SLAVE]);
  138. struct net_device *slave_dev;
  139. char *active_slave = "";
  140. if (ifindex != 0) {
  141. slave_dev = __dev_get_by_index(dev_net(bond_dev),
  142. ifindex);
  143. if (!slave_dev)
  144. return -ENODEV;
  145. active_slave = slave_dev->name;
  146. }
  147. bond_opt_initstr(&newval, active_slave);
  148. err = __bond_opt_set(bond, BOND_OPT_ACTIVE_SLAVE, &newval);
  149. if (err)
  150. return err;
  151. }
  152. if (data[IFLA_BOND_MIIMON]) {
  153. miimon = nla_get_u32(data[IFLA_BOND_MIIMON]);
  154. bond_opt_initval(&newval, miimon);
  155. err = __bond_opt_set(bond, BOND_OPT_MIIMON, &newval);
  156. if (err)
  157. return err;
  158. }
  159. if (data[IFLA_BOND_UPDELAY]) {
  160. int updelay = nla_get_u32(data[IFLA_BOND_UPDELAY]);
  161. bond_opt_initval(&newval, updelay);
  162. err = __bond_opt_set(bond, BOND_OPT_UPDELAY, &newval);
  163. if (err)
  164. return err;
  165. }
  166. if (data[IFLA_BOND_DOWNDELAY]) {
  167. int downdelay = nla_get_u32(data[IFLA_BOND_DOWNDELAY]);
  168. bond_opt_initval(&newval, downdelay);
  169. err = __bond_opt_set(bond, BOND_OPT_DOWNDELAY, &newval);
  170. if (err)
  171. return err;
  172. }
  173. if (data[IFLA_BOND_USE_CARRIER]) {
  174. int use_carrier = nla_get_u8(data[IFLA_BOND_USE_CARRIER]);
  175. bond_opt_initval(&newval, use_carrier);
  176. err = __bond_opt_set(bond, BOND_OPT_USE_CARRIER, &newval);
  177. if (err)
  178. return err;
  179. }
  180. if (data[IFLA_BOND_ARP_INTERVAL]) {
  181. int arp_interval = nla_get_u32(data[IFLA_BOND_ARP_INTERVAL]);
  182. if (arp_interval && miimon) {
  183. netdev_err(bond->dev, "ARP monitoring cannot be used with MII monitoring\n");
  184. return -EINVAL;
  185. }
  186. bond_opt_initval(&newval, arp_interval);
  187. err = __bond_opt_set(bond, BOND_OPT_ARP_INTERVAL, &newval);
  188. if (err)
  189. return err;
  190. }
  191. if (data[IFLA_BOND_ARP_IP_TARGET]) {
  192. struct nlattr *attr;
  193. int i = 0, rem;
  194. bond_option_arp_ip_targets_clear(bond);
  195. nla_for_each_nested(attr, data[IFLA_BOND_ARP_IP_TARGET], rem) {
  196. __be32 target = nla_get_be32(attr);
  197. bond_opt_initval(&newval, (__force u64)target);
  198. err = __bond_opt_set(bond, BOND_OPT_ARP_TARGETS,
  199. &newval);
  200. if (err)
  201. break;
  202. i++;
  203. }
  204. if (i == 0 && bond->params.arp_interval)
  205. netdev_warn(bond->dev, "Removing last arp target with arp_interval on\n");
  206. if (err)
  207. return err;
  208. }
  209. if (data[IFLA_BOND_ARP_VALIDATE]) {
  210. int arp_validate = nla_get_u32(data[IFLA_BOND_ARP_VALIDATE]);
  211. if (arp_validate && miimon) {
  212. netdev_err(bond->dev, "ARP validating cannot be used with MII monitoring\n");
  213. return -EINVAL;
  214. }
  215. bond_opt_initval(&newval, arp_validate);
  216. err = __bond_opt_set(bond, BOND_OPT_ARP_VALIDATE, &newval);
  217. if (err)
  218. return err;
  219. }
  220. if (data[IFLA_BOND_ARP_ALL_TARGETS]) {
  221. int arp_all_targets =
  222. nla_get_u32(data[IFLA_BOND_ARP_ALL_TARGETS]);
  223. bond_opt_initval(&newval, arp_all_targets);
  224. err = __bond_opt_set(bond, BOND_OPT_ARP_ALL_TARGETS, &newval);
  225. if (err)
  226. return err;
  227. }
  228. if (data[IFLA_BOND_PRIMARY]) {
  229. int ifindex = nla_get_u32(data[IFLA_BOND_PRIMARY]);
  230. struct net_device *dev;
  231. char *primary = "";
  232. dev = __dev_get_by_index(dev_net(bond_dev), ifindex);
  233. if (dev)
  234. primary = dev->name;
  235. bond_opt_initstr(&newval, primary);
  236. err = __bond_opt_set(bond, BOND_OPT_PRIMARY, &newval);
  237. if (err)
  238. return err;
  239. }
  240. if (data[IFLA_BOND_PRIMARY_RESELECT]) {
  241. int primary_reselect =
  242. nla_get_u8(data[IFLA_BOND_PRIMARY_RESELECT]);
  243. bond_opt_initval(&newval, primary_reselect);
  244. err = __bond_opt_set(bond, BOND_OPT_PRIMARY_RESELECT, &newval);
  245. if (err)
  246. return err;
  247. }
  248. if (data[IFLA_BOND_FAIL_OVER_MAC]) {
  249. int fail_over_mac =
  250. nla_get_u8(data[IFLA_BOND_FAIL_OVER_MAC]);
  251. bond_opt_initval(&newval, fail_over_mac);
  252. err = __bond_opt_set(bond, BOND_OPT_FAIL_OVER_MAC, &newval);
  253. if (err)
  254. return err;
  255. }
  256. if (data[IFLA_BOND_XMIT_HASH_POLICY]) {
  257. int xmit_hash_policy =
  258. nla_get_u8(data[IFLA_BOND_XMIT_HASH_POLICY]);
  259. bond_opt_initval(&newval, xmit_hash_policy);
  260. err = __bond_opt_set(bond, BOND_OPT_XMIT_HASH, &newval);
  261. if (err)
  262. return err;
  263. }
  264. if (data[IFLA_BOND_RESEND_IGMP]) {
  265. int resend_igmp =
  266. nla_get_u32(data[IFLA_BOND_RESEND_IGMP]);
  267. bond_opt_initval(&newval, resend_igmp);
  268. err = __bond_opt_set(bond, BOND_OPT_RESEND_IGMP, &newval);
  269. if (err)
  270. return err;
  271. }
  272. if (data[IFLA_BOND_NUM_PEER_NOTIF]) {
  273. int num_peer_notif =
  274. nla_get_u8(data[IFLA_BOND_NUM_PEER_NOTIF]);
  275. bond_opt_initval(&newval, num_peer_notif);
  276. err = __bond_opt_set(bond, BOND_OPT_NUM_PEER_NOTIF, &newval);
  277. if (err)
  278. return err;
  279. }
  280. if (data[IFLA_BOND_ALL_SLAVES_ACTIVE]) {
  281. int all_slaves_active =
  282. nla_get_u8(data[IFLA_BOND_ALL_SLAVES_ACTIVE]);
  283. bond_opt_initval(&newval, all_slaves_active);
  284. err = __bond_opt_set(bond, BOND_OPT_ALL_SLAVES_ACTIVE, &newval);
  285. if (err)
  286. return err;
  287. }
  288. if (data[IFLA_BOND_MIN_LINKS]) {
  289. int min_links =
  290. nla_get_u32(data[IFLA_BOND_MIN_LINKS]);
  291. bond_opt_initval(&newval, min_links);
  292. err = __bond_opt_set(bond, BOND_OPT_MINLINKS, &newval);
  293. if (err)
  294. return err;
  295. }
  296. if (data[IFLA_BOND_LP_INTERVAL]) {
  297. int lp_interval =
  298. nla_get_u32(data[IFLA_BOND_LP_INTERVAL]);
  299. bond_opt_initval(&newval, lp_interval);
  300. err = __bond_opt_set(bond, BOND_OPT_LP_INTERVAL, &newval);
  301. if (err)
  302. return err;
  303. }
  304. if (data[IFLA_BOND_PACKETS_PER_SLAVE]) {
  305. int packets_per_slave =
  306. nla_get_u32(data[IFLA_BOND_PACKETS_PER_SLAVE]);
  307. bond_opt_initval(&newval, packets_per_slave);
  308. err = __bond_opt_set(bond, BOND_OPT_PACKETS_PER_SLAVE, &newval);
  309. if (err)
  310. return err;
  311. }
  312. if (data[IFLA_BOND_AD_LACP_RATE]) {
  313. int lacp_rate =
  314. nla_get_u8(data[IFLA_BOND_AD_LACP_RATE]);
  315. bond_opt_initval(&newval, lacp_rate);
  316. err = __bond_opt_set(bond, BOND_OPT_LACP_RATE, &newval);
  317. if (err)
  318. return err;
  319. }
  320. if (data[IFLA_BOND_AD_SELECT]) {
  321. int ad_select =
  322. nla_get_u8(data[IFLA_BOND_AD_SELECT]);
  323. bond_opt_initval(&newval, ad_select);
  324. err = __bond_opt_set(bond, BOND_OPT_AD_SELECT, &newval);
  325. if (err)
  326. return err;
  327. }
  328. return 0;
  329. }
  330. static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
  331. struct nlattr *tb[], struct nlattr *data[])
  332. {
  333. int err;
  334. err = bond_changelink(bond_dev, tb, data);
  335. if (err < 0)
  336. return err;
  337. return register_netdevice(bond_dev);
  338. }
  339. static size_t bond_get_size(const struct net_device *bond_dev)
  340. {
  341. return nla_total_size(sizeof(u8)) + /* IFLA_BOND_MODE */
  342. nla_total_size(sizeof(u32)) + /* IFLA_BOND_ACTIVE_SLAVE */
  343. nla_total_size(sizeof(u32)) + /* IFLA_BOND_MIIMON */
  344. nla_total_size(sizeof(u32)) + /* IFLA_BOND_UPDELAY */
  345. nla_total_size(sizeof(u32)) + /* IFLA_BOND_DOWNDELAY */
  346. nla_total_size(sizeof(u8)) + /* IFLA_BOND_USE_CARRIER */
  347. nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_INTERVAL */
  348. /* IFLA_BOND_ARP_IP_TARGET */
  349. nla_total_size(sizeof(struct nlattr)) +
  350. nla_total_size(sizeof(u32)) * BOND_MAX_ARP_TARGETS +
  351. nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_VALIDATE */
  352. nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_ALL_TARGETS */
  353. nla_total_size(sizeof(u32)) + /* IFLA_BOND_PRIMARY */
  354. nla_total_size(sizeof(u8)) + /* IFLA_BOND_PRIMARY_RESELECT */
  355. nla_total_size(sizeof(u8)) + /* IFLA_BOND_FAIL_OVER_MAC */
  356. nla_total_size(sizeof(u8)) + /* IFLA_BOND_XMIT_HASH_POLICY */
  357. nla_total_size(sizeof(u32)) + /* IFLA_BOND_RESEND_IGMP */
  358. nla_total_size(sizeof(u8)) + /* IFLA_BOND_NUM_PEER_NOTIF */
  359. nla_total_size(sizeof(u8)) + /* IFLA_BOND_ALL_SLAVES_ACTIVE */
  360. nla_total_size(sizeof(u32)) + /* IFLA_BOND_MIN_LINKS */
  361. nla_total_size(sizeof(u32)) + /* IFLA_BOND_LP_INTERVAL */
  362. nla_total_size(sizeof(u32)) + /* IFLA_BOND_PACKETS_PER_SLAVE */
  363. nla_total_size(sizeof(u8)) + /* IFLA_BOND_AD_LACP_RATE */
  364. nla_total_size(sizeof(u8)) + /* IFLA_BOND_AD_SELECT */
  365. nla_total_size(sizeof(struct nlattr)) + /* IFLA_BOND_AD_INFO */
  366. nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_AGGREGATOR */
  367. nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_NUM_PORTS */
  368. nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_ACTOR_KEY */
  369. nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_PARTNER_KEY*/
  370. nla_total_size(ETH_ALEN) + /* IFLA_BOND_AD_INFO_PARTNER_MAC*/
  371. 0;
  372. }
  373. static int bond_option_active_slave_get_ifindex(struct bonding *bond)
  374. {
  375. const struct net_device *slave;
  376. int ifindex;
  377. rcu_read_lock();
  378. slave = bond_option_active_slave_get_rcu(bond);
  379. ifindex = slave ? slave->ifindex : 0;
  380. rcu_read_unlock();
  381. return ifindex;
  382. }
  383. static int bond_fill_info(struct sk_buff *skb,
  384. const struct net_device *bond_dev)
  385. {
  386. struct bonding *bond = netdev_priv(bond_dev);
  387. unsigned int packets_per_slave;
  388. int ifindex, i, targets_added;
  389. struct nlattr *targets;
  390. struct slave *primary;
  391. if (nla_put_u8(skb, IFLA_BOND_MODE, BOND_MODE(bond)))
  392. goto nla_put_failure;
  393. ifindex = bond_option_active_slave_get_ifindex(bond);
  394. if (ifindex && nla_put_u32(skb, IFLA_BOND_ACTIVE_SLAVE, ifindex))
  395. goto nla_put_failure;
  396. if (nla_put_u32(skb, IFLA_BOND_MIIMON, bond->params.miimon))
  397. goto nla_put_failure;
  398. if (nla_put_u32(skb, IFLA_BOND_UPDELAY,
  399. bond->params.updelay * bond->params.miimon))
  400. goto nla_put_failure;
  401. if (nla_put_u32(skb, IFLA_BOND_DOWNDELAY,
  402. bond->params.downdelay * bond->params.miimon))
  403. goto nla_put_failure;
  404. if (nla_put_u8(skb, IFLA_BOND_USE_CARRIER, bond->params.use_carrier))
  405. goto nla_put_failure;
  406. if (nla_put_u32(skb, IFLA_BOND_ARP_INTERVAL, bond->params.arp_interval))
  407. goto nla_put_failure;
  408. targets = nla_nest_start(skb, IFLA_BOND_ARP_IP_TARGET);
  409. if (!targets)
  410. goto nla_put_failure;
  411. targets_added = 0;
  412. for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) {
  413. if (bond->params.arp_targets[i]) {
  414. nla_put_be32(skb, i, bond->params.arp_targets[i]);
  415. targets_added = 1;
  416. }
  417. }
  418. if (targets_added)
  419. nla_nest_end(skb, targets);
  420. else
  421. nla_nest_cancel(skb, targets);
  422. if (nla_put_u32(skb, IFLA_BOND_ARP_VALIDATE, bond->params.arp_validate))
  423. goto nla_put_failure;
  424. if (nla_put_u32(skb, IFLA_BOND_ARP_ALL_TARGETS,
  425. bond->params.arp_all_targets))
  426. goto nla_put_failure;
  427. primary = rtnl_dereference(bond->primary_slave);
  428. if (primary &&
  429. nla_put_u32(skb, IFLA_BOND_PRIMARY, primary->dev->ifindex))
  430. goto nla_put_failure;
  431. if (nla_put_u8(skb, IFLA_BOND_PRIMARY_RESELECT,
  432. bond->params.primary_reselect))
  433. goto nla_put_failure;
  434. if (nla_put_u8(skb, IFLA_BOND_FAIL_OVER_MAC,
  435. bond->params.fail_over_mac))
  436. goto nla_put_failure;
  437. if (nla_put_u8(skb, IFLA_BOND_XMIT_HASH_POLICY,
  438. bond->params.xmit_policy))
  439. goto nla_put_failure;
  440. if (nla_put_u32(skb, IFLA_BOND_RESEND_IGMP,
  441. bond->params.resend_igmp))
  442. goto nla_put_failure;
  443. if (nla_put_u8(skb, IFLA_BOND_NUM_PEER_NOTIF,
  444. bond->params.num_peer_notif))
  445. goto nla_put_failure;
  446. if (nla_put_u8(skb, IFLA_BOND_ALL_SLAVES_ACTIVE,
  447. bond->params.all_slaves_active))
  448. goto nla_put_failure;
  449. if (nla_put_u32(skb, IFLA_BOND_MIN_LINKS,
  450. bond->params.min_links))
  451. goto nla_put_failure;
  452. if (nla_put_u32(skb, IFLA_BOND_LP_INTERVAL,
  453. bond->params.lp_interval))
  454. goto nla_put_failure;
  455. packets_per_slave = bond->params.packets_per_slave;
  456. if (nla_put_u32(skb, IFLA_BOND_PACKETS_PER_SLAVE,
  457. packets_per_slave))
  458. goto nla_put_failure;
  459. if (nla_put_u8(skb, IFLA_BOND_AD_LACP_RATE,
  460. bond->params.lacp_fast))
  461. goto nla_put_failure;
  462. if (nla_put_u8(skb, IFLA_BOND_AD_SELECT,
  463. bond->params.ad_select))
  464. goto nla_put_failure;
  465. if (BOND_MODE(bond) == BOND_MODE_8023AD) {
  466. struct ad_info info;
  467. if (!bond_3ad_get_active_agg_info(bond, &info)) {
  468. struct nlattr *nest;
  469. nest = nla_nest_start(skb, IFLA_BOND_AD_INFO);
  470. if (!nest)
  471. goto nla_put_failure;
  472. if (nla_put_u16(skb, IFLA_BOND_AD_INFO_AGGREGATOR,
  473. info.aggregator_id))
  474. goto nla_put_failure;
  475. if (nla_put_u16(skb, IFLA_BOND_AD_INFO_NUM_PORTS,
  476. info.ports))
  477. goto nla_put_failure;
  478. if (nla_put_u16(skb, IFLA_BOND_AD_INFO_ACTOR_KEY,
  479. info.actor_key))
  480. goto nla_put_failure;
  481. if (nla_put_u16(skb, IFLA_BOND_AD_INFO_PARTNER_KEY,
  482. info.partner_key))
  483. goto nla_put_failure;
  484. if (nla_put(skb, IFLA_BOND_AD_INFO_PARTNER_MAC,
  485. sizeof(info.partner_system),
  486. &info.partner_system))
  487. goto nla_put_failure;
  488. nla_nest_end(skb, nest);
  489. }
  490. }
  491. return 0;
  492. nla_put_failure:
  493. return -EMSGSIZE;
  494. }
  495. struct rtnl_link_ops bond_link_ops __read_mostly = {
  496. .kind = "bond",
  497. .priv_size = sizeof(struct bonding),
  498. .setup = bond_setup,
  499. .maxtype = IFLA_BOND_MAX,
  500. .policy = bond_policy,
  501. .validate = bond_validate,
  502. .newlink = bond_newlink,
  503. .changelink = bond_changelink,
  504. .get_size = bond_get_size,
  505. .fill_info = bond_fill_info,
  506. .get_num_tx_queues = bond_get_num_tx_queues,
  507. .get_num_rx_queues = bond_get_num_tx_queues, /* Use the same number
  508. as for TX queues */
  509. .slave_maxtype = IFLA_BOND_SLAVE_MAX,
  510. .slave_policy = bond_slave_policy,
  511. .slave_changelink = bond_slave_changelink,
  512. .get_slave_size = bond_get_slave_size,
  513. .fill_slave_info = bond_fill_slave_info,
  514. };
  515. int __init bond_netlink_init(void)
  516. {
  517. return rtnl_link_register(&bond_link_ops);
  518. }
  519. void bond_netlink_fini(void)
  520. {
  521. rtnl_link_unregister(&bond_link_ops);
  522. }
  523. MODULE_ALIAS_RTNL_LINK("bond");