net-sysfs.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398
  1. /*
  2. * net-sysfs.c - network device class and attributes
  3. *
  4. * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org>
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/capability.h>
  12. #include <linux/kernel.h>
  13. #include <linux/netdevice.h>
  14. #include <linux/if_arp.h>
  15. #include <linux/slab.h>
  16. #include <linux/nsproxy.h>
  17. #include <net/sock.h>
  18. #include <net/net_namespace.h>
  19. #include <linux/rtnetlink.h>
  20. #include <linux/vmalloc.h>
  21. #include <linux/export.h>
  22. #include <linux/jiffies.h>
  23. #include <linux/pm_runtime.h>
  24. #include "net-sysfs.h"
  25. #ifdef CONFIG_SYSFS
  26. static const char fmt_hex[] = "%#x\n";
  27. static const char fmt_long_hex[] = "%#lx\n";
  28. static const char fmt_dec[] = "%d\n";
  29. static const char fmt_udec[] = "%u\n";
  30. static const char fmt_ulong[] = "%lu\n";
  31. static const char fmt_u64[] = "%llu\n";
  32. static inline int dev_isalive(const struct net_device *dev)
  33. {
  34. return dev->reg_state <= NETREG_REGISTERED;
  35. }
  36. /* use same locking rules as GIF* ioctl's */
  37. static ssize_t netdev_show(const struct device *dev,
  38. struct device_attribute *attr, char *buf,
  39. ssize_t (*format)(const struct net_device *, char *))
  40. {
  41. struct net_device *net = to_net_dev(dev);
  42. ssize_t ret = -EINVAL;
  43. read_lock(&dev_base_lock);
  44. if (dev_isalive(net))
  45. ret = (*format)(net, buf);
  46. read_unlock(&dev_base_lock);
  47. return ret;
  48. }
  49. /* generate a show function for simple field */
  50. #define NETDEVICE_SHOW(field, format_string) \
  51. static ssize_t format_##field(const struct net_device *net, char *buf) \
  52. { \
  53. return sprintf(buf, format_string, net->field); \
  54. } \
  55. static ssize_t field##_show(struct device *dev, \
  56. struct device_attribute *attr, char *buf) \
  57. { \
  58. return netdev_show(dev, attr, buf, format_##field); \
  59. } \
  60. #define NETDEVICE_SHOW_RO(field, format_string) \
  61. NETDEVICE_SHOW(field, format_string); \
  62. static DEVICE_ATTR_RO(field)
  63. #define NETDEVICE_SHOW_RW(field, format_string) \
  64. NETDEVICE_SHOW(field, format_string); \
  65. static DEVICE_ATTR_RW(field)
  66. /* use same locking and permission rules as SIF* ioctl's */
  67. static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
  68. const char *buf, size_t len,
  69. int (*set)(struct net_device *, unsigned long))
  70. {
  71. struct net_device *netdev = to_net_dev(dev);
  72. struct net *net = dev_net(netdev);
  73. unsigned long new;
  74. int ret = -EINVAL;
  75. if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
  76. return -EPERM;
  77. ret = kstrtoul(buf, 0, &new);
  78. if (ret)
  79. goto err;
  80. if (!rtnl_trylock())
  81. return restart_syscall();
  82. if (dev_isalive(netdev)) {
  83. if ((ret = (*set)(netdev, new)) == 0)
  84. ret = len;
  85. }
  86. rtnl_unlock();
  87. err:
  88. return ret;
  89. }
  90. NETDEVICE_SHOW_RO(dev_id, fmt_hex);
  91. NETDEVICE_SHOW_RO(dev_port, fmt_dec);
  92. NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec);
  93. NETDEVICE_SHOW_RO(addr_len, fmt_dec);
  94. NETDEVICE_SHOW_RO(iflink, fmt_dec);
  95. NETDEVICE_SHOW_RO(ifindex, fmt_dec);
  96. NETDEVICE_SHOW_RO(type, fmt_dec);
  97. NETDEVICE_SHOW_RO(link_mode, fmt_dec);
  98. /* use same locking rules as GIFHWADDR ioctl's */
  99. static ssize_t address_show(struct device *dev, struct device_attribute *attr,
  100. char *buf)
  101. {
  102. struct net_device *net = to_net_dev(dev);
  103. ssize_t ret = -EINVAL;
  104. read_lock(&dev_base_lock);
  105. if (dev_isalive(net))
  106. ret = sysfs_format_mac(buf, net->dev_addr, net->addr_len);
  107. read_unlock(&dev_base_lock);
  108. return ret;
  109. }
  110. static DEVICE_ATTR_RO(address);
  111. static ssize_t broadcast_show(struct device *dev,
  112. struct device_attribute *attr, char *buf)
  113. {
  114. struct net_device *net = to_net_dev(dev);
  115. if (dev_isalive(net))
  116. return sysfs_format_mac(buf, net->broadcast, net->addr_len);
  117. return -EINVAL;
  118. }
  119. static DEVICE_ATTR_RO(broadcast);
  120. static int change_carrier(struct net_device *net, unsigned long new_carrier)
  121. {
  122. if (!netif_running(net))
  123. return -EINVAL;
  124. return dev_change_carrier(net, (bool) new_carrier);
  125. }
  126. static ssize_t carrier_store(struct device *dev, struct device_attribute *attr,
  127. const char *buf, size_t len)
  128. {
  129. return netdev_store(dev, attr, buf, len, change_carrier);
  130. }
  131. static ssize_t carrier_show(struct device *dev,
  132. struct device_attribute *attr, char *buf)
  133. {
  134. struct net_device *netdev = to_net_dev(dev);
  135. if (netif_running(netdev)) {
  136. return sprintf(buf, fmt_dec, !!netif_carrier_ok(netdev));
  137. }
  138. return -EINVAL;
  139. }
  140. static DEVICE_ATTR_RW(carrier);
  141. static ssize_t speed_show(struct device *dev,
  142. struct device_attribute *attr, char *buf)
  143. {
  144. struct net_device *netdev = to_net_dev(dev);
  145. int ret = -EINVAL;
  146. if (!rtnl_trylock())
  147. return restart_syscall();
  148. if (netif_running(netdev)) {
  149. struct ethtool_cmd cmd;
  150. if (!__ethtool_get_settings(netdev, &cmd))
  151. ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd));
  152. }
  153. rtnl_unlock();
  154. return ret;
  155. }
  156. static DEVICE_ATTR_RO(speed);
  157. static ssize_t duplex_show(struct device *dev,
  158. struct device_attribute *attr, char *buf)
  159. {
  160. struct net_device *netdev = to_net_dev(dev);
  161. int ret = -EINVAL;
  162. if (!rtnl_trylock())
  163. return restart_syscall();
  164. if (netif_running(netdev)) {
  165. struct ethtool_cmd cmd;
  166. if (!__ethtool_get_settings(netdev, &cmd)) {
  167. const char *duplex;
  168. switch (cmd.duplex) {
  169. case DUPLEX_HALF:
  170. duplex = "half";
  171. break;
  172. case DUPLEX_FULL:
  173. duplex = "full";
  174. break;
  175. default:
  176. duplex = "unknown";
  177. break;
  178. }
  179. ret = sprintf(buf, "%s\n", duplex);
  180. }
  181. }
  182. rtnl_unlock();
  183. return ret;
  184. }
  185. static DEVICE_ATTR_RO(duplex);
  186. static ssize_t dormant_show(struct device *dev,
  187. struct device_attribute *attr, char *buf)
  188. {
  189. struct net_device *netdev = to_net_dev(dev);
  190. if (netif_running(netdev))
  191. return sprintf(buf, fmt_dec, !!netif_dormant(netdev));
  192. return -EINVAL;
  193. }
  194. static DEVICE_ATTR_RO(dormant);
  195. static const char *const operstates[] = {
  196. "unknown",
  197. "notpresent", /* currently unused */
  198. "down",
  199. "lowerlayerdown",
  200. "testing", /* currently unused */
  201. "dormant",
  202. "up"
  203. };
  204. static ssize_t operstate_show(struct device *dev,
  205. struct device_attribute *attr, char *buf)
  206. {
  207. const struct net_device *netdev = to_net_dev(dev);
  208. unsigned char operstate;
  209. read_lock(&dev_base_lock);
  210. operstate = netdev->operstate;
  211. if (!netif_running(netdev))
  212. operstate = IF_OPER_DOWN;
  213. read_unlock(&dev_base_lock);
  214. if (operstate >= ARRAY_SIZE(operstates))
  215. return -EINVAL; /* should not happen */
  216. return sprintf(buf, "%s\n", operstates[operstate]);
  217. }
  218. static DEVICE_ATTR_RO(operstate);
  219. /* read-write attributes */
  220. static int change_mtu(struct net_device *net, unsigned long new_mtu)
  221. {
  222. return dev_set_mtu(net, (int) new_mtu);
  223. }
  224. static ssize_t mtu_store(struct device *dev, struct device_attribute *attr,
  225. const char *buf, size_t len)
  226. {
  227. return netdev_store(dev, attr, buf, len, change_mtu);
  228. }
  229. NETDEVICE_SHOW_RW(mtu, fmt_dec);
  230. static int change_flags(struct net_device *net, unsigned long new_flags)
  231. {
  232. return dev_change_flags(net, (unsigned int) new_flags);
  233. }
  234. static ssize_t flags_store(struct device *dev, struct device_attribute *attr,
  235. const char *buf, size_t len)
  236. {
  237. return netdev_store(dev, attr, buf, len, change_flags);
  238. }
  239. NETDEVICE_SHOW_RW(flags, fmt_hex);
  240. static int change_tx_queue_len(struct net_device *net, unsigned long new_len)
  241. {
  242. net->tx_queue_len = new_len;
  243. return 0;
  244. }
  245. static ssize_t tx_queue_len_store(struct device *dev,
  246. struct device_attribute *attr,
  247. const char *buf, size_t len)
  248. {
  249. if (!capable(CAP_NET_ADMIN))
  250. return -EPERM;
  251. return netdev_store(dev, attr, buf, len, change_tx_queue_len);
  252. }
  253. NETDEVICE_SHOW_RW(tx_queue_len, fmt_ulong);
  254. static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr,
  255. const char *buf, size_t len)
  256. {
  257. struct net_device *netdev = to_net_dev(dev);
  258. struct net *net = dev_net(netdev);
  259. size_t count = len;
  260. ssize_t ret;
  261. if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
  262. return -EPERM;
  263. /* ignore trailing newline */
  264. if (len > 0 && buf[len - 1] == '\n')
  265. --count;
  266. if (!rtnl_trylock())
  267. return restart_syscall();
  268. ret = dev_set_alias(netdev, buf, count);
  269. rtnl_unlock();
  270. return ret < 0 ? ret : len;
  271. }
  272. static ssize_t ifalias_show(struct device *dev,
  273. struct device_attribute *attr, char *buf)
  274. {
  275. const struct net_device *netdev = to_net_dev(dev);
  276. ssize_t ret = 0;
  277. if (!rtnl_trylock())
  278. return restart_syscall();
  279. if (netdev->ifalias)
  280. ret = sprintf(buf, "%s\n", netdev->ifalias);
  281. rtnl_unlock();
  282. return ret;
  283. }
  284. static DEVICE_ATTR_RW(ifalias);
  285. static int change_group(struct net_device *net, unsigned long new_group)
  286. {
  287. dev_set_group(net, (int) new_group);
  288. return 0;
  289. }
  290. static ssize_t group_store(struct device *dev, struct device_attribute *attr,
  291. const char *buf, size_t len)
  292. {
  293. return netdev_store(dev, attr, buf, len, change_group);
  294. }
  295. NETDEVICE_SHOW(group, fmt_dec);
  296. static DEVICE_ATTR(netdev_group, S_IRUGO | S_IWUSR, group_show, group_store);
  297. static ssize_t phys_port_id_show(struct device *dev,
  298. struct device_attribute *attr, char *buf)
  299. {
  300. struct net_device *netdev = to_net_dev(dev);
  301. ssize_t ret = -EINVAL;
  302. if (!rtnl_trylock())
  303. return restart_syscall();
  304. if (dev_isalive(netdev)) {
  305. struct netdev_phys_port_id ppid;
  306. ret = dev_get_phys_port_id(netdev, &ppid);
  307. if (!ret)
  308. ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id);
  309. }
  310. rtnl_unlock();
  311. return ret;
  312. }
  313. static DEVICE_ATTR_RO(phys_port_id);
  314. static struct attribute *net_class_attrs[] = {
  315. &dev_attr_netdev_group.attr,
  316. &dev_attr_type.attr,
  317. &dev_attr_dev_id.attr,
  318. &dev_attr_dev_port.attr,
  319. &dev_attr_iflink.attr,
  320. &dev_attr_ifindex.attr,
  321. &dev_attr_addr_assign_type.attr,
  322. &dev_attr_addr_len.attr,
  323. &dev_attr_link_mode.attr,
  324. &dev_attr_address.attr,
  325. &dev_attr_broadcast.attr,
  326. &dev_attr_speed.attr,
  327. &dev_attr_duplex.attr,
  328. &dev_attr_dormant.attr,
  329. &dev_attr_operstate.attr,
  330. &dev_attr_ifalias.attr,
  331. &dev_attr_carrier.attr,
  332. &dev_attr_mtu.attr,
  333. &dev_attr_flags.attr,
  334. &dev_attr_tx_queue_len.attr,
  335. &dev_attr_phys_port_id.attr,
  336. NULL,
  337. };
  338. ATTRIBUTE_GROUPS(net_class);
  339. /* Show a given an attribute in the statistics group */
  340. static ssize_t netstat_show(const struct device *d,
  341. struct device_attribute *attr, char *buf,
  342. unsigned long offset)
  343. {
  344. struct net_device *dev = to_net_dev(d);
  345. ssize_t ret = -EINVAL;
  346. WARN_ON(offset > sizeof(struct rtnl_link_stats64) ||
  347. offset % sizeof(u64) != 0);
  348. read_lock(&dev_base_lock);
  349. if (dev_isalive(dev)) {
  350. struct rtnl_link_stats64 temp;
  351. const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
  352. ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *) stats) + offset));
  353. }
  354. read_unlock(&dev_base_lock);
  355. return ret;
  356. }
  357. /* generate a read-only statistics attribute */
  358. #define NETSTAT_ENTRY(name) \
  359. static ssize_t name##_show(struct device *d, \
  360. struct device_attribute *attr, char *buf) \
  361. { \
  362. return netstat_show(d, attr, buf, \
  363. offsetof(struct rtnl_link_stats64, name)); \
  364. } \
  365. static DEVICE_ATTR_RO(name)
  366. NETSTAT_ENTRY(rx_packets);
  367. NETSTAT_ENTRY(tx_packets);
  368. NETSTAT_ENTRY(rx_bytes);
  369. NETSTAT_ENTRY(tx_bytes);
  370. NETSTAT_ENTRY(rx_errors);
  371. NETSTAT_ENTRY(tx_errors);
  372. NETSTAT_ENTRY(rx_dropped);
  373. NETSTAT_ENTRY(tx_dropped);
  374. NETSTAT_ENTRY(multicast);
  375. NETSTAT_ENTRY(collisions);
  376. NETSTAT_ENTRY(rx_length_errors);
  377. NETSTAT_ENTRY(rx_over_errors);
  378. NETSTAT_ENTRY(rx_crc_errors);
  379. NETSTAT_ENTRY(rx_frame_errors);
  380. NETSTAT_ENTRY(rx_fifo_errors);
  381. NETSTAT_ENTRY(rx_missed_errors);
  382. NETSTAT_ENTRY(tx_aborted_errors);
  383. NETSTAT_ENTRY(tx_carrier_errors);
  384. NETSTAT_ENTRY(tx_fifo_errors);
  385. NETSTAT_ENTRY(tx_heartbeat_errors);
  386. NETSTAT_ENTRY(tx_window_errors);
  387. NETSTAT_ENTRY(rx_compressed);
  388. NETSTAT_ENTRY(tx_compressed);
  389. static struct attribute *netstat_attrs[] = {
  390. &dev_attr_rx_packets.attr,
  391. &dev_attr_tx_packets.attr,
  392. &dev_attr_rx_bytes.attr,
  393. &dev_attr_tx_bytes.attr,
  394. &dev_attr_rx_errors.attr,
  395. &dev_attr_tx_errors.attr,
  396. &dev_attr_rx_dropped.attr,
  397. &dev_attr_tx_dropped.attr,
  398. &dev_attr_multicast.attr,
  399. &dev_attr_collisions.attr,
  400. &dev_attr_rx_length_errors.attr,
  401. &dev_attr_rx_over_errors.attr,
  402. &dev_attr_rx_crc_errors.attr,
  403. &dev_attr_rx_frame_errors.attr,
  404. &dev_attr_rx_fifo_errors.attr,
  405. &dev_attr_rx_missed_errors.attr,
  406. &dev_attr_tx_aborted_errors.attr,
  407. &dev_attr_tx_carrier_errors.attr,
  408. &dev_attr_tx_fifo_errors.attr,
  409. &dev_attr_tx_heartbeat_errors.attr,
  410. &dev_attr_tx_window_errors.attr,
  411. &dev_attr_rx_compressed.attr,
  412. &dev_attr_tx_compressed.attr,
  413. NULL
  414. };
  415. static struct attribute_group netstat_group = {
  416. .name = "statistics",
  417. .attrs = netstat_attrs,
  418. };
  419. #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
  420. static struct attribute *wireless_attrs[] = {
  421. NULL
  422. };
  423. static struct attribute_group wireless_group = {
  424. .name = "wireless",
  425. .attrs = wireless_attrs,
  426. };
  427. #endif
  428. #else /* CONFIG_SYSFS */
  429. #define net_class_groups NULL
  430. #endif /* CONFIG_SYSFS */
  431. #ifdef CONFIG_SYSFS
  432. #define to_rx_queue_attr(_attr) container_of(_attr, \
  433. struct rx_queue_attribute, attr)
  434. #define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj)
  435. static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr,
  436. char *buf)
  437. {
  438. struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
  439. struct netdev_rx_queue *queue = to_rx_queue(kobj);
  440. if (!attribute->show)
  441. return -EIO;
  442. return attribute->show(queue, attribute, buf);
  443. }
  444. static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr,
  445. const char *buf, size_t count)
  446. {
  447. struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
  448. struct netdev_rx_queue *queue = to_rx_queue(kobj);
  449. if (!attribute->store)
  450. return -EIO;
  451. return attribute->store(queue, attribute, buf, count);
  452. }
  453. static const struct sysfs_ops rx_queue_sysfs_ops = {
  454. .show = rx_queue_attr_show,
  455. .store = rx_queue_attr_store,
  456. };
  457. #ifdef CONFIG_RPS
  458. static ssize_t show_rps_map(struct netdev_rx_queue *queue,
  459. struct rx_queue_attribute *attribute, char *buf)
  460. {
  461. struct rps_map *map;
  462. cpumask_var_t mask;
  463. size_t len = 0;
  464. int i;
  465. if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
  466. return -ENOMEM;
  467. rcu_read_lock();
  468. map = rcu_dereference(queue->rps_map);
  469. if (map)
  470. for (i = 0; i < map->len; i++)
  471. cpumask_set_cpu(map->cpus[i], mask);
  472. len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask);
  473. if (PAGE_SIZE - len < 3) {
  474. rcu_read_unlock();
  475. free_cpumask_var(mask);
  476. return -EINVAL;
  477. }
  478. rcu_read_unlock();
  479. free_cpumask_var(mask);
  480. len += sprintf(buf + len, "\n");
  481. return len;
  482. }
  483. static ssize_t store_rps_map(struct netdev_rx_queue *queue,
  484. struct rx_queue_attribute *attribute,
  485. const char *buf, size_t len)
  486. {
  487. struct rps_map *old_map, *map;
  488. cpumask_var_t mask;
  489. int err, cpu, i;
  490. static DEFINE_SPINLOCK(rps_map_lock);
  491. if (!capable(CAP_NET_ADMIN))
  492. return -EPERM;
  493. if (!alloc_cpumask_var(&mask, GFP_KERNEL))
  494. return -ENOMEM;
  495. err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
  496. if (err) {
  497. free_cpumask_var(mask);
  498. return err;
  499. }
  500. map = kzalloc(max_t(unsigned int,
  501. RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
  502. GFP_KERNEL);
  503. if (!map) {
  504. free_cpumask_var(mask);
  505. return -ENOMEM;
  506. }
  507. i = 0;
  508. for_each_cpu_and(cpu, mask, cpu_online_mask)
  509. map->cpus[i++] = cpu;
  510. if (i)
  511. map->len = i;
  512. else {
  513. kfree(map);
  514. map = NULL;
  515. }
  516. spin_lock(&rps_map_lock);
  517. old_map = rcu_dereference_protected(queue->rps_map,
  518. lockdep_is_held(&rps_map_lock));
  519. rcu_assign_pointer(queue->rps_map, map);
  520. spin_unlock(&rps_map_lock);
  521. if (map)
  522. static_key_slow_inc(&rps_needed);
  523. if (old_map) {
  524. kfree_rcu(old_map, rcu);
  525. static_key_slow_dec(&rps_needed);
  526. }
  527. free_cpumask_var(mask);
  528. return len;
  529. }
  530. static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
  531. struct rx_queue_attribute *attr,
  532. char *buf)
  533. {
  534. struct rps_dev_flow_table *flow_table;
  535. unsigned long val = 0;
  536. rcu_read_lock();
  537. flow_table = rcu_dereference(queue->rps_flow_table);
  538. if (flow_table)
  539. val = (unsigned long)flow_table->mask + 1;
  540. rcu_read_unlock();
  541. return sprintf(buf, "%lu\n", val);
  542. }
  543. static void rps_dev_flow_table_release(struct rcu_head *rcu)
  544. {
  545. struct rps_dev_flow_table *table = container_of(rcu,
  546. struct rps_dev_flow_table, rcu);
  547. vfree(table);
  548. }
  549. static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
  550. struct rx_queue_attribute *attr,
  551. const char *buf, size_t len)
  552. {
  553. unsigned long mask, count;
  554. struct rps_dev_flow_table *table, *old_table;
  555. static DEFINE_SPINLOCK(rps_dev_flow_lock);
  556. int rc;
  557. if (!capable(CAP_NET_ADMIN))
  558. return -EPERM;
  559. rc = kstrtoul(buf, 0, &count);
  560. if (rc < 0)
  561. return rc;
  562. if (count) {
  563. mask = count - 1;
  564. /* mask = roundup_pow_of_two(count) - 1;
  565. * without overflows...
  566. */
  567. while ((mask | (mask >> 1)) != mask)
  568. mask |= (mask >> 1);
  569. /* On 64 bit arches, must check mask fits in table->mask (u32),
  570. * and on 32bit arches, must check
  571. * RPS_DEV_FLOW_TABLE_SIZE(mask + 1) doesn't overflow.
  572. */
  573. #if BITS_PER_LONG > 32
  574. if (mask > (unsigned long)(u32)mask)
  575. return -EINVAL;
  576. #else
  577. if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1))
  578. / sizeof(struct rps_dev_flow)) {
  579. /* Enforce a limit to prevent overflow */
  580. return -EINVAL;
  581. }
  582. #endif
  583. table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1));
  584. if (!table)
  585. return -ENOMEM;
  586. table->mask = mask;
  587. for (count = 0; count <= mask; count++)
  588. table->flows[count].cpu = RPS_NO_CPU;
  589. } else
  590. table = NULL;
  591. spin_lock(&rps_dev_flow_lock);
  592. old_table = rcu_dereference_protected(queue->rps_flow_table,
  593. lockdep_is_held(&rps_dev_flow_lock));
  594. rcu_assign_pointer(queue->rps_flow_table, table);
  595. spin_unlock(&rps_dev_flow_lock);
  596. if (old_table)
  597. call_rcu(&old_table->rcu, rps_dev_flow_table_release);
  598. return len;
  599. }
  600. static struct rx_queue_attribute rps_cpus_attribute =
  601. __ATTR(rps_cpus, S_IRUGO | S_IWUSR, show_rps_map, store_rps_map);
  602. static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute =
  603. __ATTR(rps_flow_cnt, S_IRUGO | S_IWUSR,
  604. show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt);
  605. #endif /* CONFIG_RPS */
  606. static struct attribute *rx_queue_default_attrs[] = {
  607. #ifdef CONFIG_RPS
  608. &rps_cpus_attribute.attr,
  609. &rps_dev_flow_table_cnt_attribute.attr,
  610. #endif
  611. NULL
  612. };
  613. static void rx_queue_release(struct kobject *kobj)
  614. {
  615. struct netdev_rx_queue *queue = to_rx_queue(kobj);
  616. #ifdef CONFIG_RPS
  617. struct rps_map *map;
  618. struct rps_dev_flow_table *flow_table;
  619. map = rcu_dereference_protected(queue->rps_map, 1);
  620. if (map) {
  621. RCU_INIT_POINTER(queue->rps_map, NULL);
  622. kfree_rcu(map, rcu);
  623. }
  624. flow_table = rcu_dereference_protected(queue->rps_flow_table, 1);
  625. if (flow_table) {
  626. RCU_INIT_POINTER(queue->rps_flow_table, NULL);
  627. call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
  628. }
  629. #endif
  630. memset(kobj, 0, sizeof(*kobj));
  631. dev_put(queue->dev);
  632. }
  633. static const void *rx_queue_namespace(struct kobject *kobj)
  634. {
  635. struct netdev_rx_queue *queue = to_rx_queue(kobj);
  636. struct device *dev = &queue->dev->dev;
  637. const void *ns = NULL;
  638. if (dev->class && dev->class->ns_type)
  639. ns = dev->class->namespace(dev);
  640. return ns;
  641. }
  642. static struct kobj_type rx_queue_ktype = {
  643. .sysfs_ops = &rx_queue_sysfs_ops,
  644. .release = rx_queue_release,
  645. .default_attrs = rx_queue_default_attrs,
  646. .namespace = rx_queue_namespace
  647. };
  648. static int rx_queue_add_kobject(struct net_device *net, int index)
  649. {
  650. struct netdev_rx_queue *queue = net->_rx + index;
  651. struct kobject *kobj = &queue->kobj;
  652. int error = 0;
  653. kobj->kset = net->queues_kset;
  654. error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
  655. "rx-%u", index);
  656. if (error)
  657. goto exit;
  658. if (net->sysfs_rx_queue_group) {
  659. error = sysfs_create_group(kobj, net->sysfs_rx_queue_group);
  660. if (error)
  661. goto exit;
  662. }
  663. kobject_uevent(kobj, KOBJ_ADD);
  664. dev_hold(queue->dev);
  665. return error;
  666. exit:
  667. kobject_put(kobj);
  668. return error;
  669. }
  670. #endif /* CONFIG_SYFS */
  671. int
  672. net_rx_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
  673. {
  674. #ifdef CONFIG_SYSFS
  675. int i;
  676. int error = 0;
  677. #ifndef CONFIG_RPS
  678. if (!net->sysfs_rx_queue_group)
  679. return 0;
  680. #endif
  681. for (i = old_num; i < new_num; i++) {
  682. error = rx_queue_add_kobject(net, i);
  683. if (error) {
  684. new_num = old_num;
  685. break;
  686. }
  687. }
  688. while (--i >= new_num) {
  689. if (net->sysfs_rx_queue_group)
  690. sysfs_remove_group(&net->_rx[i].kobj,
  691. net->sysfs_rx_queue_group);
  692. kobject_put(&net->_rx[i].kobj);
  693. }
  694. return error;
  695. #else
  696. return 0;
  697. #endif
  698. }
  699. #ifdef CONFIG_SYSFS
  700. /*
  701. * netdev_queue sysfs structures and functions.
  702. */
  703. struct netdev_queue_attribute {
  704. struct attribute attr;
  705. ssize_t (*show)(struct netdev_queue *queue,
  706. struct netdev_queue_attribute *attr, char *buf);
  707. ssize_t (*store)(struct netdev_queue *queue,
  708. struct netdev_queue_attribute *attr, const char *buf, size_t len);
  709. };
  710. #define to_netdev_queue_attr(_attr) container_of(_attr, \
  711. struct netdev_queue_attribute, attr)
  712. #define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj)
  713. static ssize_t netdev_queue_attr_show(struct kobject *kobj,
  714. struct attribute *attr, char *buf)
  715. {
  716. struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
  717. struct netdev_queue *queue = to_netdev_queue(kobj);
  718. if (!attribute->show)
  719. return -EIO;
  720. return attribute->show(queue, attribute, buf);
  721. }
  722. static ssize_t netdev_queue_attr_store(struct kobject *kobj,
  723. struct attribute *attr,
  724. const char *buf, size_t count)
  725. {
  726. struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
  727. struct netdev_queue *queue = to_netdev_queue(kobj);
  728. if (!attribute->store)
  729. return -EIO;
  730. return attribute->store(queue, attribute, buf, count);
  731. }
  732. static const struct sysfs_ops netdev_queue_sysfs_ops = {
  733. .show = netdev_queue_attr_show,
  734. .store = netdev_queue_attr_store,
  735. };
  736. static ssize_t show_trans_timeout(struct netdev_queue *queue,
  737. struct netdev_queue_attribute *attribute,
  738. char *buf)
  739. {
  740. unsigned long trans_timeout;
  741. spin_lock_irq(&queue->_xmit_lock);
  742. trans_timeout = queue->trans_timeout;
  743. spin_unlock_irq(&queue->_xmit_lock);
  744. return sprintf(buf, "%lu", trans_timeout);
  745. }
  746. static struct netdev_queue_attribute queue_trans_timeout =
  747. __ATTR(tx_timeout, S_IRUGO, show_trans_timeout, NULL);
  748. #ifdef CONFIG_BQL
  749. /*
  750. * Byte queue limits sysfs structures and functions.
  751. */
  752. static ssize_t bql_show(char *buf, unsigned int value)
  753. {
  754. return sprintf(buf, "%u\n", value);
  755. }
  756. static ssize_t bql_set(const char *buf, const size_t count,
  757. unsigned int *pvalue)
  758. {
  759. unsigned int value;
  760. int err;
  761. if (!strcmp(buf, "max") || !strcmp(buf, "max\n"))
  762. value = DQL_MAX_LIMIT;
  763. else {
  764. err = kstrtouint(buf, 10, &value);
  765. if (err < 0)
  766. return err;
  767. if (value > DQL_MAX_LIMIT)
  768. return -EINVAL;
  769. }
  770. *pvalue = value;
  771. return count;
  772. }
  773. static ssize_t bql_show_hold_time(struct netdev_queue *queue,
  774. struct netdev_queue_attribute *attr,
  775. char *buf)
  776. {
  777. struct dql *dql = &queue->dql;
  778. return sprintf(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time));
  779. }
  780. static ssize_t bql_set_hold_time(struct netdev_queue *queue,
  781. struct netdev_queue_attribute *attribute,
  782. const char *buf, size_t len)
  783. {
  784. struct dql *dql = &queue->dql;
  785. unsigned int value;
  786. int err;
  787. err = kstrtouint(buf, 10, &value);
  788. if (err < 0)
  789. return err;
  790. dql->slack_hold_time = msecs_to_jiffies(value);
  791. return len;
  792. }
  793. static struct netdev_queue_attribute bql_hold_time_attribute =
  794. __ATTR(hold_time, S_IRUGO | S_IWUSR, bql_show_hold_time,
  795. bql_set_hold_time);
  796. static ssize_t bql_show_inflight(struct netdev_queue *queue,
  797. struct netdev_queue_attribute *attr,
  798. char *buf)
  799. {
  800. struct dql *dql = &queue->dql;
  801. return sprintf(buf, "%u\n", dql->num_queued - dql->num_completed);
  802. }
  803. static struct netdev_queue_attribute bql_inflight_attribute =
  804. __ATTR(inflight, S_IRUGO, bql_show_inflight, NULL);
  805. #define BQL_ATTR(NAME, FIELD) \
  806. static ssize_t bql_show_ ## NAME(struct netdev_queue *queue, \
  807. struct netdev_queue_attribute *attr, \
  808. char *buf) \
  809. { \
  810. return bql_show(buf, queue->dql.FIELD); \
  811. } \
  812. \
  813. static ssize_t bql_set_ ## NAME(struct netdev_queue *queue, \
  814. struct netdev_queue_attribute *attr, \
  815. const char *buf, size_t len) \
  816. { \
  817. return bql_set(buf, len, &queue->dql.FIELD); \
  818. } \
  819. \
  820. static struct netdev_queue_attribute bql_ ## NAME ## _attribute = \
  821. __ATTR(NAME, S_IRUGO | S_IWUSR, bql_show_ ## NAME, \
  822. bql_set_ ## NAME);
  823. BQL_ATTR(limit, limit)
  824. BQL_ATTR(limit_max, max_limit)
  825. BQL_ATTR(limit_min, min_limit)
  826. static struct attribute *dql_attrs[] = {
  827. &bql_limit_attribute.attr,
  828. &bql_limit_max_attribute.attr,
  829. &bql_limit_min_attribute.attr,
  830. &bql_hold_time_attribute.attr,
  831. &bql_inflight_attribute.attr,
  832. NULL
  833. };
  834. static struct attribute_group dql_group = {
  835. .name = "byte_queue_limits",
  836. .attrs = dql_attrs,
  837. };
  838. #endif /* CONFIG_BQL */
  839. #ifdef CONFIG_XPS
  840. static unsigned int get_netdev_queue_index(struct netdev_queue *queue)
  841. {
  842. struct net_device *dev = queue->dev;
  843. unsigned int i;
  844. i = queue - dev->_tx;
  845. BUG_ON(i >= dev->num_tx_queues);
  846. return i;
  847. }
  848. static ssize_t show_xps_map(struct netdev_queue *queue,
  849. struct netdev_queue_attribute *attribute, char *buf)
  850. {
  851. struct net_device *dev = queue->dev;
  852. struct xps_dev_maps *dev_maps;
  853. cpumask_var_t mask;
  854. unsigned long index;
  855. size_t len = 0;
  856. int i;
  857. if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
  858. return -ENOMEM;
  859. index = get_netdev_queue_index(queue);
  860. rcu_read_lock();
  861. dev_maps = rcu_dereference(dev->xps_maps);
  862. if (dev_maps) {
  863. for_each_possible_cpu(i) {
  864. struct xps_map *map =
  865. rcu_dereference(dev_maps->cpu_map[i]);
  866. if (map) {
  867. int j;
  868. for (j = 0; j < map->len; j++) {
  869. if (map->queues[j] == index) {
  870. cpumask_set_cpu(i, mask);
  871. break;
  872. }
  873. }
  874. }
  875. }
  876. }
  877. rcu_read_unlock();
  878. len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask);
  879. if (PAGE_SIZE - len < 3) {
  880. free_cpumask_var(mask);
  881. return -EINVAL;
  882. }
  883. free_cpumask_var(mask);
  884. len += sprintf(buf + len, "\n");
  885. return len;
  886. }
  887. static ssize_t store_xps_map(struct netdev_queue *queue,
  888. struct netdev_queue_attribute *attribute,
  889. const char *buf, size_t len)
  890. {
  891. struct net_device *dev = queue->dev;
  892. unsigned long index;
  893. cpumask_var_t mask;
  894. int err;
  895. if (!capable(CAP_NET_ADMIN))
  896. return -EPERM;
  897. if (!alloc_cpumask_var(&mask, GFP_KERNEL))
  898. return -ENOMEM;
  899. index = get_netdev_queue_index(queue);
  900. err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
  901. if (err) {
  902. free_cpumask_var(mask);
  903. return err;
  904. }
  905. err = netif_set_xps_queue(dev, mask, index);
  906. free_cpumask_var(mask);
  907. return err ? : len;
  908. }
  909. static struct netdev_queue_attribute xps_cpus_attribute =
  910. __ATTR(xps_cpus, S_IRUGO | S_IWUSR, show_xps_map, store_xps_map);
  911. #endif /* CONFIG_XPS */
  912. static struct attribute *netdev_queue_default_attrs[] = {
  913. &queue_trans_timeout.attr,
  914. #ifdef CONFIG_XPS
  915. &xps_cpus_attribute.attr,
  916. #endif
  917. NULL
  918. };
  919. static void netdev_queue_release(struct kobject *kobj)
  920. {
  921. struct netdev_queue *queue = to_netdev_queue(kobj);
  922. memset(kobj, 0, sizeof(*kobj));
  923. dev_put(queue->dev);
  924. }
  925. static const void *netdev_queue_namespace(struct kobject *kobj)
  926. {
  927. struct netdev_queue *queue = to_netdev_queue(kobj);
  928. struct device *dev = &queue->dev->dev;
  929. const void *ns = NULL;
  930. if (dev->class && dev->class->ns_type)
  931. ns = dev->class->namespace(dev);
  932. return ns;
  933. }
  934. static struct kobj_type netdev_queue_ktype = {
  935. .sysfs_ops = &netdev_queue_sysfs_ops,
  936. .release = netdev_queue_release,
  937. .default_attrs = netdev_queue_default_attrs,
  938. .namespace = netdev_queue_namespace,
  939. };
  940. static int netdev_queue_add_kobject(struct net_device *net, int index)
  941. {
  942. struct netdev_queue *queue = net->_tx + index;
  943. struct kobject *kobj = &queue->kobj;
  944. int error = 0;
  945. kobj->kset = net->queues_kset;
  946. error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
  947. "tx-%u", index);
  948. if (error)
  949. goto exit;
  950. #ifdef CONFIG_BQL
  951. error = sysfs_create_group(kobj, &dql_group);
  952. if (error)
  953. goto exit;
  954. #endif
  955. kobject_uevent(kobj, KOBJ_ADD);
  956. dev_hold(queue->dev);
  957. return 0;
  958. exit:
  959. kobject_put(kobj);
  960. return error;
  961. }
  962. #endif /* CONFIG_SYSFS */
  963. int
  964. netdev_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
  965. {
  966. #ifdef CONFIG_SYSFS
  967. int i;
  968. int error = 0;
  969. for (i = old_num; i < new_num; i++) {
  970. error = netdev_queue_add_kobject(net, i);
  971. if (error) {
  972. new_num = old_num;
  973. break;
  974. }
  975. }
  976. while (--i >= new_num) {
  977. struct netdev_queue *queue = net->_tx + i;
  978. #ifdef CONFIG_BQL
  979. sysfs_remove_group(&queue->kobj, &dql_group);
  980. #endif
  981. kobject_put(&queue->kobj);
  982. }
  983. return error;
  984. #else
  985. return 0;
  986. #endif /* CONFIG_SYSFS */
  987. }
  988. static int register_queue_kobjects(struct net_device *net)
  989. {
  990. int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
  991. #ifdef CONFIG_SYSFS
  992. net->queues_kset = kset_create_and_add("queues",
  993. NULL, &net->dev.kobj);
  994. if (!net->queues_kset)
  995. return -ENOMEM;
  996. real_rx = net->real_num_rx_queues;
  997. #endif
  998. real_tx = net->real_num_tx_queues;
  999. error = net_rx_queue_update_kobjects(net, 0, real_rx);
  1000. if (error)
  1001. goto error;
  1002. rxq = real_rx;
  1003. error = netdev_queue_update_kobjects(net, 0, real_tx);
  1004. if (error)
  1005. goto error;
  1006. txq = real_tx;
  1007. return 0;
  1008. error:
  1009. netdev_queue_update_kobjects(net, txq, 0);
  1010. net_rx_queue_update_kobjects(net, rxq, 0);
  1011. return error;
  1012. }
  1013. static void remove_queue_kobjects(struct net_device *net)
  1014. {
  1015. int real_rx = 0, real_tx = 0;
  1016. #ifdef CONFIG_SYSFS
  1017. real_rx = net->real_num_rx_queues;
  1018. #endif
  1019. real_tx = net->real_num_tx_queues;
  1020. net_rx_queue_update_kobjects(net, real_rx, 0);
  1021. netdev_queue_update_kobjects(net, real_tx, 0);
  1022. #ifdef CONFIG_SYSFS
  1023. kset_unregister(net->queues_kset);
  1024. #endif
  1025. }
  1026. static bool net_current_may_mount(void)
  1027. {
  1028. struct net *net = current->nsproxy->net_ns;
  1029. return ns_capable(net->user_ns, CAP_SYS_ADMIN);
  1030. }
  1031. static void *net_grab_current_ns(void)
  1032. {
  1033. struct net *ns = current->nsproxy->net_ns;
  1034. #ifdef CONFIG_NET_NS
  1035. if (ns)
  1036. atomic_inc(&ns->passive);
  1037. #endif
  1038. return ns;
  1039. }
  1040. static const void *net_initial_ns(void)
  1041. {
  1042. return &init_net;
  1043. }
  1044. static const void *net_netlink_ns(struct sock *sk)
  1045. {
  1046. return sock_net(sk);
  1047. }
  1048. struct kobj_ns_type_operations net_ns_type_operations = {
  1049. .type = KOBJ_NS_TYPE_NET,
  1050. .current_may_mount = net_current_may_mount,
  1051. .grab_current_ns = net_grab_current_ns,
  1052. .netlink_ns = net_netlink_ns,
  1053. .initial_ns = net_initial_ns,
  1054. .drop_ns = net_drop_ns,
  1055. };
  1056. EXPORT_SYMBOL_GPL(net_ns_type_operations);
  1057. static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
  1058. {
  1059. struct net_device *dev = to_net_dev(d);
  1060. int retval;
  1061. /* pass interface to uevent. */
  1062. retval = add_uevent_var(env, "INTERFACE=%s", dev->name);
  1063. if (retval)
  1064. goto exit;
  1065. /* pass ifindex to uevent.
  1066. * ifindex is useful as it won't change (interface name may change)
  1067. * and is what RtNetlink uses natively. */
  1068. retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex);
  1069. exit:
  1070. return retval;
  1071. }
  1072. /*
  1073. * netdev_release -- destroy and free a dead device.
  1074. * Called when last reference to device kobject is gone.
  1075. */
  1076. static void netdev_release(struct device *d)
  1077. {
  1078. struct net_device *dev = to_net_dev(d);
  1079. BUG_ON(dev->reg_state != NETREG_RELEASED);
  1080. kfree(dev->ifalias);
  1081. netdev_freemem(dev);
  1082. }
  1083. static const void *net_namespace(struct device *d)
  1084. {
  1085. struct net_device *dev;
  1086. dev = container_of(d, struct net_device, dev);
  1087. return dev_net(dev);
  1088. }
  1089. static struct class net_class = {
  1090. .name = "net",
  1091. .dev_release = netdev_release,
  1092. .dev_groups = net_class_groups,
  1093. .dev_uevent = netdev_uevent,
  1094. .ns_type = &net_ns_type_operations,
  1095. .namespace = net_namespace,
  1096. };
  1097. /* Delete sysfs entries but hold kobject reference until after all
  1098. * netdev references are gone.
  1099. */
  1100. void netdev_unregister_kobject(struct net_device * net)
  1101. {
  1102. struct device *dev = &(net->dev);
  1103. kobject_get(&dev->kobj);
  1104. remove_queue_kobjects(net);
  1105. pm_runtime_set_memalloc_noio(dev, false);
  1106. device_del(dev);
  1107. }
  1108. /* Create sysfs entries for network device. */
  1109. int netdev_register_kobject(struct net_device *net)
  1110. {
  1111. struct device *dev = &(net->dev);
  1112. const struct attribute_group **groups = net->sysfs_groups;
  1113. int error = 0;
  1114. device_initialize(dev);
  1115. dev->class = &net_class;
  1116. dev->platform_data = net;
  1117. dev->groups = groups;
  1118. dev_set_name(dev, "%s", net->name);
  1119. #ifdef CONFIG_SYSFS
  1120. /* Allow for a device specific group */
  1121. if (*groups)
  1122. groups++;
  1123. *groups++ = &netstat_group;
  1124. #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
  1125. if (net->ieee80211_ptr)
  1126. *groups++ = &wireless_group;
  1127. #if IS_ENABLED(CONFIG_WIRELESS_EXT)
  1128. else if (net->wireless_handlers)
  1129. *groups++ = &wireless_group;
  1130. #endif
  1131. #endif
  1132. #endif /* CONFIG_SYSFS */
  1133. error = device_add(dev);
  1134. if (error)
  1135. return error;
  1136. error = register_queue_kobjects(net);
  1137. if (error) {
  1138. device_del(dev);
  1139. return error;
  1140. }
  1141. pm_runtime_set_memalloc_noio(dev, true);
  1142. return error;
  1143. }
  1144. int netdev_class_create_file_ns(struct class_attribute *class_attr,
  1145. const void *ns)
  1146. {
  1147. return class_create_file_ns(&net_class, class_attr, ns);
  1148. }
  1149. EXPORT_SYMBOL(netdev_class_create_file_ns);
  1150. void netdev_class_remove_file_ns(struct class_attribute *class_attr,
  1151. const void *ns)
  1152. {
  1153. class_remove_file_ns(&net_class, class_attr, ns);
  1154. }
  1155. EXPORT_SYMBOL(netdev_class_remove_file_ns);
  1156. int __init netdev_kobject_init(void)
  1157. {
  1158. kobj_ns_type_register(&net_ns_type_operations);
  1159. return class_register(&net_class);
  1160. }