net-sysfs.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784
  1. /*
  2. * net-sysfs.c - network device class and attributes
  3. *
  4. * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org>
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/capability.h>
  12. #include <linux/kernel.h>
  13. #include <linux/netdevice.h>
  14. #include <net/switchdev.h>
  15. #include <linux/if_arp.h>
  16. #include <linux/slab.h>
  17. #include <linux/sched/signal.h>
  18. #include <linux/nsproxy.h>
  19. #include <net/sock.h>
  20. #include <net/net_namespace.h>
  21. #include <linux/rtnetlink.h>
  22. #include <linux/vmalloc.h>
  23. #include <linux/export.h>
  24. #include <linux/jiffies.h>
  25. #include <linux/pm_runtime.h>
  26. #include <linux/of.h>
  27. #include <linux/of_net.h>
  28. #include <linux/cpu.h>
  29. #include "net-sysfs.h"
  30. #ifdef CONFIG_SYSFS
  31. static const char fmt_hex[] = "%#x\n";
  32. static const char fmt_dec[] = "%d\n";
  33. static const char fmt_ulong[] = "%lu\n";
  34. static const char fmt_u64[] = "%llu\n";
  35. static inline int dev_isalive(const struct net_device *dev)
  36. {
  37. return dev->reg_state <= NETREG_REGISTERED;
  38. }
  39. /* use same locking rules as GIF* ioctl's */
  40. static ssize_t netdev_show(const struct device *dev,
  41. struct device_attribute *attr, char *buf,
  42. ssize_t (*format)(const struct net_device *, char *))
  43. {
  44. struct net_device *ndev = to_net_dev(dev);
  45. ssize_t ret = -EINVAL;
  46. read_lock(&dev_base_lock);
  47. if (dev_isalive(ndev))
  48. ret = (*format)(ndev, buf);
  49. read_unlock(&dev_base_lock);
  50. return ret;
  51. }
  52. /* generate a show function for simple field */
  53. #define NETDEVICE_SHOW(field, format_string) \
  54. static ssize_t format_##field(const struct net_device *dev, char *buf) \
  55. { \
  56. return sprintf(buf, format_string, dev->field); \
  57. } \
  58. static ssize_t field##_show(struct device *dev, \
  59. struct device_attribute *attr, char *buf) \
  60. { \
  61. return netdev_show(dev, attr, buf, format_##field); \
  62. } \
  63. #define NETDEVICE_SHOW_RO(field, format_string) \
  64. NETDEVICE_SHOW(field, format_string); \
  65. static DEVICE_ATTR_RO(field)
  66. #define NETDEVICE_SHOW_RW(field, format_string) \
  67. NETDEVICE_SHOW(field, format_string); \
  68. static DEVICE_ATTR_RW(field)
  69. /* use same locking and permission rules as SIF* ioctl's */
  70. static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
  71. const char *buf, size_t len,
  72. int (*set)(struct net_device *, unsigned long))
  73. {
  74. struct net_device *netdev = to_net_dev(dev);
  75. struct net *net = dev_net(netdev);
  76. unsigned long new;
  77. int ret = -EINVAL;
  78. if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
  79. return -EPERM;
  80. ret = kstrtoul(buf, 0, &new);
  81. if (ret)
  82. goto err;
  83. if (!rtnl_trylock())
  84. return restart_syscall();
  85. if (dev_isalive(netdev)) {
  86. ret = (*set)(netdev, new);
  87. if (ret == 0)
  88. ret = len;
  89. }
  90. rtnl_unlock();
  91. err:
  92. return ret;
  93. }
  94. NETDEVICE_SHOW_RO(dev_id, fmt_hex);
  95. NETDEVICE_SHOW_RO(dev_port, fmt_dec);
  96. NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec);
  97. NETDEVICE_SHOW_RO(addr_len, fmt_dec);
  98. NETDEVICE_SHOW_RO(ifindex, fmt_dec);
  99. NETDEVICE_SHOW_RO(type, fmt_dec);
  100. NETDEVICE_SHOW_RO(link_mode, fmt_dec);
  101. static ssize_t iflink_show(struct device *dev, struct device_attribute *attr,
  102. char *buf)
  103. {
  104. struct net_device *ndev = to_net_dev(dev);
  105. return sprintf(buf, fmt_dec, dev_get_iflink(ndev));
  106. }
  107. static DEVICE_ATTR_RO(iflink);
  108. static ssize_t format_name_assign_type(const struct net_device *dev, char *buf)
  109. {
  110. return sprintf(buf, fmt_dec, dev->name_assign_type);
  111. }
  112. static ssize_t name_assign_type_show(struct device *dev,
  113. struct device_attribute *attr,
  114. char *buf)
  115. {
  116. struct net_device *ndev = to_net_dev(dev);
  117. ssize_t ret = -EINVAL;
  118. if (ndev->name_assign_type != NET_NAME_UNKNOWN)
  119. ret = netdev_show(dev, attr, buf, format_name_assign_type);
  120. return ret;
  121. }
  122. static DEVICE_ATTR_RO(name_assign_type);
  123. /* use same locking rules as GIFHWADDR ioctl's */
  124. static ssize_t address_show(struct device *dev, struct device_attribute *attr,
  125. char *buf)
  126. {
  127. struct net_device *ndev = to_net_dev(dev);
  128. ssize_t ret = -EINVAL;
  129. read_lock(&dev_base_lock);
  130. if (dev_isalive(ndev))
  131. ret = sysfs_format_mac(buf, ndev->dev_addr, ndev->addr_len);
  132. read_unlock(&dev_base_lock);
  133. return ret;
  134. }
  135. static DEVICE_ATTR_RO(address);
  136. static ssize_t broadcast_show(struct device *dev,
  137. struct device_attribute *attr, char *buf)
  138. {
  139. struct net_device *ndev = to_net_dev(dev);
  140. if (dev_isalive(ndev))
  141. return sysfs_format_mac(buf, ndev->broadcast, ndev->addr_len);
  142. return -EINVAL;
  143. }
  144. static DEVICE_ATTR_RO(broadcast);
  145. static int change_carrier(struct net_device *dev, unsigned long new_carrier)
  146. {
  147. if (!netif_running(dev))
  148. return -EINVAL;
  149. return dev_change_carrier(dev, (bool)new_carrier);
  150. }
  151. static ssize_t carrier_store(struct device *dev, struct device_attribute *attr,
  152. const char *buf, size_t len)
  153. {
  154. return netdev_store(dev, attr, buf, len, change_carrier);
  155. }
  156. static ssize_t carrier_show(struct device *dev,
  157. struct device_attribute *attr, char *buf)
  158. {
  159. struct net_device *netdev = to_net_dev(dev);
  160. if (netif_running(netdev))
  161. return sprintf(buf, fmt_dec, !!netif_carrier_ok(netdev));
  162. return -EINVAL;
  163. }
  164. static DEVICE_ATTR_RW(carrier);
  165. static ssize_t speed_show(struct device *dev,
  166. struct device_attribute *attr, char *buf)
  167. {
  168. struct net_device *netdev = to_net_dev(dev);
  169. int ret = -EINVAL;
  170. if (!rtnl_trylock())
  171. return restart_syscall();
  172. if (netif_running(netdev)) {
  173. struct ethtool_link_ksettings cmd;
  174. if (!__ethtool_get_link_ksettings(netdev, &cmd))
  175. ret = sprintf(buf, fmt_dec, cmd.base.speed);
  176. }
  177. rtnl_unlock();
  178. return ret;
  179. }
  180. static DEVICE_ATTR_RO(speed);
  181. static ssize_t duplex_show(struct device *dev,
  182. struct device_attribute *attr, char *buf)
  183. {
  184. struct net_device *netdev = to_net_dev(dev);
  185. int ret = -EINVAL;
  186. if (!rtnl_trylock())
  187. return restart_syscall();
  188. if (netif_running(netdev)) {
  189. struct ethtool_link_ksettings cmd;
  190. if (!__ethtool_get_link_ksettings(netdev, &cmd)) {
  191. const char *duplex;
  192. switch (cmd.base.duplex) {
  193. case DUPLEX_HALF:
  194. duplex = "half";
  195. break;
  196. case DUPLEX_FULL:
  197. duplex = "full";
  198. break;
  199. default:
  200. duplex = "unknown";
  201. break;
  202. }
  203. ret = sprintf(buf, "%s\n", duplex);
  204. }
  205. }
  206. rtnl_unlock();
  207. return ret;
  208. }
  209. static DEVICE_ATTR_RO(duplex);
  210. static ssize_t dormant_show(struct device *dev,
  211. struct device_attribute *attr, char *buf)
  212. {
  213. struct net_device *netdev = to_net_dev(dev);
  214. if (netif_running(netdev))
  215. return sprintf(buf, fmt_dec, !!netif_dormant(netdev));
  216. return -EINVAL;
  217. }
  218. static DEVICE_ATTR_RO(dormant);
  219. static const char *const operstates[] = {
  220. "unknown",
  221. "notpresent", /* currently unused */
  222. "down",
  223. "lowerlayerdown",
  224. "testing", /* currently unused */
  225. "dormant",
  226. "up"
  227. };
  228. static ssize_t operstate_show(struct device *dev,
  229. struct device_attribute *attr, char *buf)
  230. {
  231. const struct net_device *netdev = to_net_dev(dev);
  232. unsigned char operstate;
  233. read_lock(&dev_base_lock);
  234. operstate = netdev->operstate;
  235. if (!netif_running(netdev))
  236. operstate = IF_OPER_DOWN;
  237. read_unlock(&dev_base_lock);
  238. if (operstate >= ARRAY_SIZE(operstates))
  239. return -EINVAL; /* should not happen */
  240. return sprintf(buf, "%s\n", operstates[operstate]);
  241. }
  242. static DEVICE_ATTR_RO(operstate);
  243. static ssize_t carrier_changes_show(struct device *dev,
  244. struct device_attribute *attr,
  245. char *buf)
  246. {
  247. struct net_device *netdev = to_net_dev(dev);
  248. return sprintf(buf, fmt_dec,
  249. atomic_read(&netdev->carrier_up_count) +
  250. atomic_read(&netdev->carrier_down_count));
  251. }
  252. static DEVICE_ATTR_RO(carrier_changes);
  253. static ssize_t carrier_up_count_show(struct device *dev,
  254. struct device_attribute *attr,
  255. char *buf)
  256. {
  257. struct net_device *netdev = to_net_dev(dev);
  258. return sprintf(buf, fmt_dec, atomic_read(&netdev->carrier_up_count));
  259. }
  260. static DEVICE_ATTR_RO(carrier_up_count);
  261. static ssize_t carrier_down_count_show(struct device *dev,
  262. struct device_attribute *attr,
  263. char *buf)
  264. {
  265. struct net_device *netdev = to_net_dev(dev);
  266. return sprintf(buf, fmt_dec, atomic_read(&netdev->carrier_down_count));
  267. }
  268. static DEVICE_ATTR_RO(carrier_down_count);
  269. /* read-write attributes */
  270. static int change_mtu(struct net_device *dev, unsigned long new_mtu)
  271. {
  272. return dev_set_mtu(dev, (int)new_mtu);
  273. }
  274. static ssize_t mtu_store(struct device *dev, struct device_attribute *attr,
  275. const char *buf, size_t len)
  276. {
  277. return netdev_store(dev, attr, buf, len, change_mtu);
  278. }
  279. NETDEVICE_SHOW_RW(mtu, fmt_dec);
  280. static int change_flags(struct net_device *dev, unsigned long new_flags)
  281. {
  282. return dev_change_flags(dev, (unsigned int)new_flags);
  283. }
  284. static ssize_t flags_store(struct device *dev, struct device_attribute *attr,
  285. const char *buf, size_t len)
  286. {
  287. return netdev_store(dev, attr, buf, len, change_flags);
  288. }
  289. NETDEVICE_SHOW_RW(flags, fmt_hex);
  290. static ssize_t tx_queue_len_store(struct device *dev,
  291. struct device_attribute *attr,
  292. const char *buf, size_t len)
  293. {
  294. if (!capable(CAP_NET_ADMIN))
  295. return -EPERM;
  296. return netdev_store(dev, attr, buf, len, dev_change_tx_queue_len);
  297. }
  298. NETDEVICE_SHOW_RW(tx_queue_len, fmt_dec);
  299. static int change_gro_flush_timeout(struct net_device *dev, unsigned long val)
  300. {
  301. dev->gro_flush_timeout = val;
  302. return 0;
  303. }
  304. static ssize_t gro_flush_timeout_store(struct device *dev,
  305. struct device_attribute *attr,
  306. const char *buf, size_t len)
  307. {
  308. if (!capable(CAP_NET_ADMIN))
  309. return -EPERM;
  310. return netdev_store(dev, attr, buf, len, change_gro_flush_timeout);
  311. }
  312. NETDEVICE_SHOW_RW(gro_flush_timeout, fmt_ulong);
  313. static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr,
  314. const char *buf, size_t len)
  315. {
  316. struct net_device *netdev = to_net_dev(dev);
  317. struct net *net = dev_net(netdev);
  318. size_t count = len;
  319. ssize_t ret = 0;
  320. if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
  321. return -EPERM;
  322. /* ignore trailing newline */
  323. if (len > 0 && buf[len - 1] == '\n')
  324. --count;
  325. if (!rtnl_trylock())
  326. return restart_syscall();
  327. if (dev_isalive(netdev)) {
  328. ret = dev_set_alias(netdev, buf, count);
  329. if (ret < 0)
  330. goto err;
  331. ret = len;
  332. netdev_state_change(netdev);
  333. }
  334. err:
  335. rtnl_unlock();
  336. return ret;
  337. }
  338. static ssize_t ifalias_show(struct device *dev,
  339. struct device_attribute *attr, char *buf)
  340. {
  341. const struct net_device *netdev = to_net_dev(dev);
  342. char tmp[IFALIASZ];
  343. ssize_t ret = 0;
  344. ret = dev_get_alias(netdev, tmp, sizeof(tmp));
  345. if (ret > 0)
  346. ret = sprintf(buf, "%s\n", tmp);
  347. return ret;
  348. }
  349. static DEVICE_ATTR_RW(ifalias);
  350. static int change_group(struct net_device *dev, unsigned long new_group)
  351. {
  352. dev_set_group(dev, (int)new_group);
  353. return 0;
  354. }
  355. static ssize_t group_store(struct device *dev, struct device_attribute *attr,
  356. const char *buf, size_t len)
  357. {
  358. return netdev_store(dev, attr, buf, len, change_group);
  359. }
  360. NETDEVICE_SHOW(group, fmt_dec);
  361. static DEVICE_ATTR(netdev_group, 0644, group_show, group_store);
  362. static int change_proto_down(struct net_device *dev, unsigned long proto_down)
  363. {
  364. return dev_change_proto_down(dev, (bool)proto_down);
  365. }
  366. static ssize_t proto_down_store(struct device *dev,
  367. struct device_attribute *attr,
  368. const char *buf, size_t len)
  369. {
  370. return netdev_store(dev, attr, buf, len, change_proto_down);
  371. }
  372. NETDEVICE_SHOW_RW(proto_down, fmt_dec);
  373. static ssize_t phys_port_id_show(struct device *dev,
  374. struct device_attribute *attr, char *buf)
  375. {
  376. struct net_device *netdev = to_net_dev(dev);
  377. ssize_t ret = -EINVAL;
  378. if (!rtnl_trylock())
  379. return restart_syscall();
  380. if (dev_isalive(netdev)) {
  381. struct netdev_phys_item_id ppid;
  382. ret = dev_get_phys_port_id(netdev, &ppid);
  383. if (!ret)
  384. ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id);
  385. }
  386. rtnl_unlock();
  387. return ret;
  388. }
  389. static DEVICE_ATTR_RO(phys_port_id);
  390. static ssize_t phys_port_name_show(struct device *dev,
  391. struct device_attribute *attr, char *buf)
  392. {
  393. struct net_device *netdev = to_net_dev(dev);
  394. ssize_t ret = -EINVAL;
  395. if (!rtnl_trylock())
  396. return restart_syscall();
  397. if (dev_isalive(netdev)) {
  398. char name[IFNAMSIZ];
  399. ret = dev_get_phys_port_name(netdev, name, sizeof(name));
  400. if (!ret)
  401. ret = sprintf(buf, "%s\n", name);
  402. }
  403. rtnl_unlock();
  404. return ret;
  405. }
  406. static DEVICE_ATTR_RO(phys_port_name);
  407. static ssize_t phys_switch_id_show(struct device *dev,
  408. struct device_attribute *attr, char *buf)
  409. {
  410. struct net_device *netdev = to_net_dev(dev);
  411. ssize_t ret = -EINVAL;
  412. if (!rtnl_trylock())
  413. return restart_syscall();
  414. if (dev_isalive(netdev)) {
  415. struct switchdev_attr attr = {
  416. .orig_dev = netdev,
  417. .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
  418. .flags = SWITCHDEV_F_NO_RECURSE,
  419. };
  420. ret = switchdev_port_attr_get(netdev, &attr);
  421. if (!ret)
  422. ret = sprintf(buf, "%*phN\n", attr.u.ppid.id_len,
  423. attr.u.ppid.id);
  424. }
  425. rtnl_unlock();
  426. return ret;
  427. }
  428. static DEVICE_ATTR_RO(phys_switch_id);
  429. static struct attribute *net_class_attrs[] __ro_after_init = {
  430. &dev_attr_netdev_group.attr,
  431. &dev_attr_type.attr,
  432. &dev_attr_dev_id.attr,
  433. &dev_attr_dev_port.attr,
  434. &dev_attr_iflink.attr,
  435. &dev_attr_ifindex.attr,
  436. &dev_attr_name_assign_type.attr,
  437. &dev_attr_addr_assign_type.attr,
  438. &dev_attr_addr_len.attr,
  439. &dev_attr_link_mode.attr,
  440. &dev_attr_address.attr,
  441. &dev_attr_broadcast.attr,
  442. &dev_attr_speed.attr,
  443. &dev_attr_duplex.attr,
  444. &dev_attr_dormant.attr,
  445. &dev_attr_operstate.attr,
  446. &dev_attr_carrier_changes.attr,
  447. &dev_attr_ifalias.attr,
  448. &dev_attr_carrier.attr,
  449. &dev_attr_mtu.attr,
  450. &dev_attr_flags.attr,
  451. &dev_attr_tx_queue_len.attr,
  452. &dev_attr_gro_flush_timeout.attr,
  453. &dev_attr_phys_port_id.attr,
  454. &dev_attr_phys_port_name.attr,
  455. &dev_attr_phys_switch_id.attr,
  456. &dev_attr_proto_down.attr,
  457. &dev_attr_carrier_up_count.attr,
  458. &dev_attr_carrier_down_count.attr,
  459. NULL,
  460. };
  461. ATTRIBUTE_GROUPS(net_class);
  462. /* Show a given an attribute in the statistics group */
  463. static ssize_t netstat_show(const struct device *d,
  464. struct device_attribute *attr, char *buf,
  465. unsigned long offset)
  466. {
  467. struct net_device *dev = to_net_dev(d);
  468. ssize_t ret = -EINVAL;
  469. WARN_ON(offset > sizeof(struct rtnl_link_stats64) ||
  470. offset % sizeof(u64) != 0);
  471. read_lock(&dev_base_lock);
  472. if (dev_isalive(dev)) {
  473. struct rtnl_link_stats64 temp;
  474. const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
  475. ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *)stats) + offset));
  476. }
  477. read_unlock(&dev_base_lock);
  478. return ret;
  479. }
  480. /* generate a read-only statistics attribute */
  481. #define NETSTAT_ENTRY(name) \
  482. static ssize_t name##_show(struct device *d, \
  483. struct device_attribute *attr, char *buf) \
  484. { \
  485. return netstat_show(d, attr, buf, \
  486. offsetof(struct rtnl_link_stats64, name)); \
  487. } \
  488. static DEVICE_ATTR_RO(name)
  489. NETSTAT_ENTRY(rx_packets);
  490. NETSTAT_ENTRY(tx_packets);
  491. NETSTAT_ENTRY(rx_bytes);
  492. NETSTAT_ENTRY(tx_bytes);
  493. NETSTAT_ENTRY(rx_errors);
  494. NETSTAT_ENTRY(tx_errors);
  495. NETSTAT_ENTRY(rx_dropped);
  496. NETSTAT_ENTRY(tx_dropped);
  497. NETSTAT_ENTRY(multicast);
  498. NETSTAT_ENTRY(collisions);
  499. NETSTAT_ENTRY(rx_length_errors);
  500. NETSTAT_ENTRY(rx_over_errors);
  501. NETSTAT_ENTRY(rx_crc_errors);
  502. NETSTAT_ENTRY(rx_frame_errors);
  503. NETSTAT_ENTRY(rx_fifo_errors);
  504. NETSTAT_ENTRY(rx_missed_errors);
  505. NETSTAT_ENTRY(tx_aborted_errors);
  506. NETSTAT_ENTRY(tx_carrier_errors);
  507. NETSTAT_ENTRY(tx_fifo_errors);
  508. NETSTAT_ENTRY(tx_heartbeat_errors);
  509. NETSTAT_ENTRY(tx_window_errors);
  510. NETSTAT_ENTRY(rx_compressed);
  511. NETSTAT_ENTRY(tx_compressed);
  512. NETSTAT_ENTRY(rx_nohandler);
  513. static struct attribute *netstat_attrs[] __ro_after_init = {
  514. &dev_attr_rx_packets.attr,
  515. &dev_attr_tx_packets.attr,
  516. &dev_attr_rx_bytes.attr,
  517. &dev_attr_tx_bytes.attr,
  518. &dev_attr_rx_errors.attr,
  519. &dev_attr_tx_errors.attr,
  520. &dev_attr_rx_dropped.attr,
  521. &dev_attr_tx_dropped.attr,
  522. &dev_attr_multicast.attr,
  523. &dev_attr_collisions.attr,
  524. &dev_attr_rx_length_errors.attr,
  525. &dev_attr_rx_over_errors.attr,
  526. &dev_attr_rx_crc_errors.attr,
  527. &dev_attr_rx_frame_errors.attr,
  528. &dev_attr_rx_fifo_errors.attr,
  529. &dev_attr_rx_missed_errors.attr,
  530. &dev_attr_tx_aborted_errors.attr,
  531. &dev_attr_tx_carrier_errors.attr,
  532. &dev_attr_tx_fifo_errors.attr,
  533. &dev_attr_tx_heartbeat_errors.attr,
  534. &dev_attr_tx_window_errors.attr,
  535. &dev_attr_rx_compressed.attr,
  536. &dev_attr_tx_compressed.attr,
  537. &dev_attr_rx_nohandler.attr,
  538. NULL
  539. };
  540. static const struct attribute_group netstat_group = {
  541. .name = "statistics",
  542. .attrs = netstat_attrs,
  543. };
  544. #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
  545. static struct attribute *wireless_attrs[] = {
  546. NULL
  547. };
  548. static const struct attribute_group wireless_group = {
  549. .name = "wireless",
  550. .attrs = wireless_attrs,
  551. };
  552. #endif
  553. #else /* CONFIG_SYSFS */
  554. #define net_class_groups NULL
  555. #endif /* CONFIG_SYSFS */
  556. #ifdef CONFIG_SYSFS
  557. #define to_rx_queue_attr(_attr) \
  558. container_of(_attr, struct rx_queue_attribute, attr)
  559. #define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj)
  560. static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr,
  561. char *buf)
  562. {
  563. const struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
  564. struct netdev_rx_queue *queue = to_rx_queue(kobj);
  565. if (!attribute->show)
  566. return -EIO;
  567. return attribute->show(queue, buf);
  568. }
  569. static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr,
  570. const char *buf, size_t count)
  571. {
  572. const struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
  573. struct netdev_rx_queue *queue = to_rx_queue(kobj);
  574. if (!attribute->store)
  575. return -EIO;
  576. return attribute->store(queue, buf, count);
  577. }
  578. static const struct sysfs_ops rx_queue_sysfs_ops = {
  579. .show = rx_queue_attr_show,
  580. .store = rx_queue_attr_store,
  581. };
  582. #ifdef CONFIG_RPS
  583. static ssize_t show_rps_map(struct netdev_rx_queue *queue, char *buf)
  584. {
  585. struct rps_map *map;
  586. cpumask_var_t mask;
  587. int i, len;
  588. if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
  589. return -ENOMEM;
  590. rcu_read_lock();
  591. map = rcu_dereference(queue->rps_map);
  592. if (map)
  593. for (i = 0; i < map->len; i++)
  594. cpumask_set_cpu(map->cpus[i], mask);
  595. len = snprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask));
  596. rcu_read_unlock();
  597. free_cpumask_var(mask);
  598. return len < PAGE_SIZE ? len : -EINVAL;
  599. }
  600. static ssize_t store_rps_map(struct netdev_rx_queue *queue,
  601. const char *buf, size_t len)
  602. {
  603. struct rps_map *old_map, *map;
  604. cpumask_var_t mask;
  605. int err, cpu, i;
  606. static DEFINE_MUTEX(rps_map_mutex);
  607. if (!capable(CAP_NET_ADMIN))
  608. return -EPERM;
  609. if (!alloc_cpumask_var(&mask, GFP_KERNEL))
  610. return -ENOMEM;
  611. err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
  612. if (err) {
  613. free_cpumask_var(mask);
  614. return err;
  615. }
  616. map = kzalloc(max_t(unsigned int,
  617. RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
  618. GFP_KERNEL);
  619. if (!map) {
  620. free_cpumask_var(mask);
  621. return -ENOMEM;
  622. }
  623. i = 0;
  624. for_each_cpu_and(cpu, mask, cpu_online_mask)
  625. map->cpus[i++] = cpu;
  626. if (i) {
  627. map->len = i;
  628. } else {
  629. kfree(map);
  630. map = NULL;
  631. }
  632. mutex_lock(&rps_map_mutex);
  633. old_map = rcu_dereference_protected(queue->rps_map,
  634. mutex_is_locked(&rps_map_mutex));
  635. rcu_assign_pointer(queue->rps_map, map);
  636. if (map)
  637. static_key_slow_inc(&rps_needed);
  638. if (old_map)
  639. static_key_slow_dec(&rps_needed);
  640. mutex_unlock(&rps_map_mutex);
  641. if (old_map)
  642. kfree_rcu(old_map, rcu);
  643. free_cpumask_var(mask);
  644. return len;
  645. }
  646. static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
  647. char *buf)
  648. {
  649. struct rps_dev_flow_table *flow_table;
  650. unsigned long val = 0;
  651. rcu_read_lock();
  652. flow_table = rcu_dereference(queue->rps_flow_table);
  653. if (flow_table)
  654. val = (unsigned long)flow_table->mask + 1;
  655. rcu_read_unlock();
  656. return sprintf(buf, "%lu\n", val);
  657. }
  658. static void rps_dev_flow_table_release(struct rcu_head *rcu)
  659. {
  660. struct rps_dev_flow_table *table = container_of(rcu,
  661. struct rps_dev_flow_table, rcu);
  662. vfree(table);
  663. }
  664. static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
  665. const char *buf, size_t len)
  666. {
  667. unsigned long mask, count;
  668. struct rps_dev_flow_table *table, *old_table;
  669. static DEFINE_SPINLOCK(rps_dev_flow_lock);
  670. int rc;
  671. if (!capable(CAP_NET_ADMIN))
  672. return -EPERM;
  673. rc = kstrtoul(buf, 0, &count);
  674. if (rc < 0)
  675. return rc;
  676. if (count) {
  677. mask = count - 1;
  678. /* mask = roundup_pow_of_two(count) - 1;
  679. * without overflows...
  680. */
  681. while ((mask | (mask >> 1)) != mask)
  682. mask |= (mask >> 1);
  683. /* On 64 bit arches, must check mask fits in table->mask (u32),
  684. * and on 32bit arches, must check
  685. * RPS_DEV_FLOW_TABLE_SIZE(mask + 1) doesn't overflow.
  686. */
  687. #if BITS_PER_LONG > 32
  688. if (mask > (unsigned long)(u32)mask)
  689. return -EINVAL;
  690. #else
  691. if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1))
  692. / sizeof(struct rps_dev_flow)) {
  693. /* Enforce a limit to prevent overflow */
  694. return -EINVAL;
  695. }
  696. #endif
  697. table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1));
  698. if (!table)
  699. return -ENOMEM;
  700. table->mask = mask;
  701. for (count = 0; count <= mask; count++)
  702. table->flows[count].cpu = RPS_NO_CPU;
  703. } else {
  704. table = NULL;
  705. }
  706. spin_lock(&rps_dev_flow_lock);
  707. old_table = rcu_dereference_protected(queue->rps_flow_table,
  708. lockdep_is_held(&rps_dev_flow_lock));
  709. rcu_assign_pointer(queue->rps_flow_table, table);
  710. spin_unlock(&rps_dev_flow_lock);
  711. if (old_table)
  712. call_rcu(&old_table->rcu, rps_dev_flow_table_release);
  713. return len;
  714. }
  715. static struct rx_queue_attribute rps_cpus_attribute __ro_after_init
  716. = __ATTR(rps_cpus, 0644, show_rps_map, store_rps_map);
  717. static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute __ro_after_init
  718. = __ATTR(rps_flow_cnt, 0644,
  719. show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt);
  720. #endif /* CONFIG_RPS */
  721. static struct attribute *rx_queue_default_attrs[] __ro_after_init = {
  722. #ifdef CONFIG_RPS
  723. &rps_cpus_attribute.attr,
  724. &rps_dev_flow_table_cnt_attribute.attr,
  725. #endif
  726. NULL
  727. };
  728. static void rx_queue_release(struct kobject *kobj)
  729. {
  730. struct netdev_rx_queue *queue = to_rx_queue(kobj);
  731. #ifdef CONFIG_RPS
  732. struct rps_map *map;
  733. struct rps_dev_flow_table *flow_table;
  734. map = rcu_dereference_protected(queue->rps_map, 1);
  735. if (map) {
  736. RCU_INIT_POINTER(queue->rps_map, NULL);
  737. kfree_rcu(map, rcu);
  738. }
  739. flow_table = rcu_dereference_protected(queue->rps_flow_table, 1);
  740. if (flow_table) {
  741. RCU_INIT_POINTER(queue->rps_flow_table, NULL);
  742. call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
  743. }
  744. #endif
  745. memset(kobj, 0, sizeof(*kobj));
  746. dev_put(queue->dev);
  747. }
  748. static const void *rx_queue_namespace(struct kobject *kobj)
  749. {
  750. struct netdev_rx_queue *queue = to_rx_queue(kobj);
  751. struct device *dev = &queue->dev->dev;
  752. const void *ns = NULL;
  753. if (dev->class && dev->class->ns_type)
  754. ns = dev->class->namespace(dev);
  755. return ns;
  756. }
  757. static void rx_queue_get_ownership(struct kobject *kobj,
  758. kuid_t *uid, kgid_t *gid)
  759. {
  760. const struct net *net = rx_queue_namespace(kobj);
  761. net_ns_get_ownership(net, uid, gid);
  762. }
  763. static struct kobj_type rx_queue_ktype __ro_after_init = {
  764. .sysfs_ops = &rx_queue_sysfs_ops,
  765. .release = rx_queue_release,
  766. .default_attrs = rx_queue_default_attrs,
  767. .namespace = rx_queue_namespace,
  768. .get_ownership = rx_queue_get_ownership,
  769. };
  770. static int rx_queue_add_kobject(struct net_device *dev, int index)
  771. {
  772. struct netdev_rx_queue *queue = dev->_rx + index;
  773. struct kobject *kobj = &queue->kobj;
  774. int error = 0;
  775. kobj->kset = dev->queues_kset;
  776. error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
  777. "rx-%u", index);
  778. if (error)
  779. return error;
  780. if (dev->sysfs_rx_queue_group) {
  781. error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group);
  782. if (error) {
  783. kobject_put(kobj);
  784. return error;
  785. }
  786. }
  787. kobject_uevent(kobj, KOBJ_ADD);
  788. dev_hold(queue->dev);
  789. return error;
  790. }
  791. #endif /* CONFIG_SYSFS */
  792. int
  793. net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
  794. {
  795. #ifdef CONFIG_SYSFS
  796. int i;
  797. int error = 0;
  798. #ifndef CONFIG_RPS
  799. if (!dev->sysfs_rx_queue_group)
  800. return 0;
  801. #endif
  802. for (i = old_num; i < new_num; i++) {
  803. error = rx_queue_add_kobject(dev, i);
  804. if (error) {
  805. new_num = old_num;
  806. break;
  807. }
  808. }
  809. while (--i >= new_num) {
  810. struct kobject *kobj = &dev->_rx[i].kobj;
  811. if (!refcount_read(&dev_net(dev)->count))
  812. kobj->uevent_suppress = 1;
  813. if (dev->sysfs_rx_queue_group)
  814. sysfs_remove_group(kobj, dev->sysfs_rx_queue_group);
  815. kobject_put(kobj);
  816. }
  817. return error;
  818. #else
  819. return 0;
  820. #endif
  821. }
  822. #ifdef CONFIG_SYSFS
  823. /*
  824. * netdev_queue sysfs structures and functions.
  825. */
  826. struct netdev_queue_attribute {
  827. struct attribute attr;
  828. ssize_t (*show)(struct netdev_queue *queue, char *buf);
  829. ssize_t (*store)(struct netdev_queue *queue,
  830. const char *buf, size_t len);
  831. };
  832. #define to_netdev_queue_attr(_attr) \
  833. container_of(_attr, struct netdev_queue_attribute, attr)
  834. #define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj)
  835. static ssize_t netdev_queue_attr_show(struct kobject *kobj,
  836. struct attribute *attr, char *buf)
  837. {
  838. const struct netdev_queue_attribute *attribute
  839. = to_netdev_queue_attr(attr);
  840. struct netdev_queue *queue = to_netdev_queue(kobj);
  841. if (!attribute->show)
  842. return -EIO;
  843. return attribute->show(queue, buf);
  844. }
  845. static ssize_t netdev_queue_attr_store(struct kobject *kobj,
  846. struct attribute *attr,
  847. const char *buf, size_t count)
  848. {
  849. const struct netdev_queue_attribute *attribute
  850. = to_netdev_queue_attr(attr);
  851. struct netdev_queue *queue = to_netdev_queue(kobj);
  852. if (!attribute->store)
  853. return -EIO;
  854. return attribute->store(queue, buf, count);
  855. }
  856. static const struct sysfs_ops netdev_queue_sysfs_ops = {
  857. .show = netdev_queue_attr_show,
  858. .store = netdev_queue_attr_store,
  859. };
  860. static ssize_t tx_timeout_show(struct netdev_queue *queue, char *buf)
  861. {
  862. unsigned long trans_timeout;
  863. spin_lock_irq(&queue->_xmit_lock);
  864. trans_timeout = queue->trans_timeout;
  865. spin_unlock_irq(&queue->_xmit_lock);
  866. return sprintf(buf, "%lu", trans_timeout);
  867. }
  868. static unsigned int get_netdev_queue_index(struct netdev_queue *queue)
  869. {
  870. struct net_device *dev = queue->dev;
  871. unsigned int i;
  872. i = queue - dev->_tx;
  873. BUG_ON(i >= dev->num_tx_queues);
  874. return i;
  875. }
  876. static ssize_t traffic_class_show(struct netdev_queue *queue,
  877. char *buf)
  878. {
  879. struct net_device *dev = queue->dev;
  880. int index;
  881. int tc;
  882. if (!netif_is_multiqueue(dev))
  883. return -ENOENT;
  884. index = get_netdev_queue_index(queue);
  885. /* If queue belongs to subordinate dev use its TC mapping */
  886. dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
  887. tc = netdev_txq_to_tc(dev, index);
  888. if (tc < 0)
  889. return -EINVAL;
  890. /* We can report the traffic class one of two ways:
  891. * Subordinate device traffic classes are reported with the traffic
  892. * class first, and then the subordinate class so for example TC0 on
  893. * subordinate device 2 will be reported as "0-2". If the queue
  894. * belongs to the root device it will be reported with just the
  895. * traffic class, so just "0" for TC 0 for example.
  896. */
  897. return dev->num_tc < 0 ? sprintf(buf, "%u%d\n", tc, dev->num_tc) :
  898. sprintf(buf, "%u\n", tc);
  899. }
  900. #ifdef CONFIG_XPS
  901. static ssize_t tx_maxrate_show(struct netdev_queue *queue,
  902. char *buf)
  903. {
  904. return sprintf(buf, "%lu\n", queue->tx_maxrate);
  905. }
  906. static ssize_t tx_maxrate_store(struct netdev_queue *queue,
  907. const char *buf, size_t len)
  908. {
  909. struct net_device *dev = queue->dev;
  910. int err, index = get_netdev_queue_index(queue);
  911. u32 rate = 0;
  912. if (!capable(CAP_NET_ADMIN))
  913. return -EPERM;
  914. err = kstrtou32(buf, 10, &rate);
  915. if (err < 0)
  916. return err;
  917. if (!rtnl_trylock())
  918. return restart_syscall();
  919. err = -EOPNOTSUPP;
  920. if (dev->netdev_ops->ndo_set_tx_maxrate)
  921. err = dev->netdev_ops->ndo_set_tx_maxrate(dev, index, rate);
  922. rtnl_unlock();
  923. if (!err) {
  924. queue->tx_maxrate = rate;
  925. return len;
  926. }
  927. return err;
  928. }
  929. static struct netdev_queue_attribute queue_tx_maxrate __ro_after_init
  930. = __ATTR_RW(tx_maxrate);
  931. #endif
  932. static struct netdev_queue_attribute queue_trans_timeout __ro_after_init
  933. = __ATTR_RO(tx_timeout);
  934. static struct netdev_queue_attribute queue_traffic_class __ro_after_init
  935. = __ATTR_RO(traffic_class);
  936. #ifdef CONFIG_BQL
  937. /*
  938. * Byte queue limits sysfs structures and functions.
  939. */
  940. static ssize_t bql_show(char *buf, unsigned int value)
  941. {
  942. return sprintf(buf, "%u\n", value);
  943. }
  944. static ssize_t bql_set(const char *buf, const size_t count,
  945. unsigned int *pvalue)
  946. {
  947. unsigned int value;
  948. int err;
  949. if (!strcmp(buf, "max") || !strcmp(buf, "max\n")) {
  950. value = DQL_MAX_LIMIT;
  951. } else {
  952. err = kstrtouint(buf, 10, &value);
  953. if (err < 0)
  954. return err;
  955. if (value > DQL_MAX_LIMIT)
  956. return -EINVAL;
  957. }
  958. *pvalue = value;
  959. return count;
  960. }
  961. static ssize_t bql_show_hold_time(struct netdev_queue *queue,
  962. char *buf)
  963. {
  964. struct dql *dql = &queue->dql;
  965. return sprintf(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time));
  966. }
  967. static ssize_t bql_set_hold_time(struct netdev_queue *queue,
  968. const char *buf, size_t len)
  969. {
  970. struct dql *dql = &queue->dql;
  971. unsigned int value;
  972. int err;
  973. err = kstrtouint(buf, 10, &value);
  974. if (err < 0)
  975. return err;
  976. dql->slack_hold_time = msecs_to_jiffies(value);
  977. return len;
  978. }
  979. static struct netdev_queue_attribute bql_hold_time_attribute __ro_after_init
  980. = __ATTR(hold_time, 0644,
  981. bql_show_hold_time, bql_set_hold_time);
  982. static ssize_t bql_show_inflight(struct netdev_queue *queue,
  983. char *buf)
  984. {
  985. struct dql *dql = &queue->dql;
  986. return sprintf(buf, "%u\n", dql->num_queued - dql->num_completed);
  987. }
  988. static struct netdev_queue_attribute bql_inflight_attribute __ro_after_init =
  989. __ATTR(inflight, 0444, bql_show_inflight, NULL);
  990. #define BQL_ATTR(NAME, FIELD) \
  991. static ssize_t bql_show_ ## NAME(struct netdev_queue *queue, \
  992. char *buf) \
  993. { \
  994. return bql_show(buf, queue->dql.FIELD); \
  995. } \
  996. \
  997. static ssize_t bql_set_ ## NAME(struct netdev_queue *queue, \
  998. const char *buf, size_t len) \
  999. { \
  1000. return bql_set(buf, len, &queue->dql.FIELD); \
  1001. } \
  1002. \
  1003. static struct netdev_queue_attribute bql_ ## NAME ## _attribute __ro_after_init \
  1004. = __ATTR(NAME, 0644, \
  1005. bql_show_ ## NAME, bql_set_ ## NAME)
  1006. BQL_ATTR(limit, limit);
  1007. BQL_ATTR(limit_max, max_limit);
  1008. BQL_ATTR(limit_min, min_limit);
  1009. static struct attribute *dql_attrs[] __ro_after_init = {
  1010. &bql_limit_attribute.attr,
  1011. &bql_limit_max_attribute.attr,
  1012. &bql_limit_min_attribute.attr,
  1013. &bql_hold_time_attribute.attr,
  1014. &bql_inflight_attribute.attr,
  1015. NULL
  1016. };
  1017. static const struct attribute_group dql_group = {
  1018. .name = "byte_queue_limits",
  1019. .attrs = dql_attrs,
  1020. };
  1021. #endif /* CONFIG_BQL */
  1022. #ifdef CONFIG_XPS
  1023. static ssize_t xps_cpus_show(struct netdev_queue *queue,
  1024. char *buf)
  1025. {
  1026. struct net_device *dev = queue->dev;
  1027. int cpu, len, num_tc = 1, tc = 0;
  1028. struct xps_dev_maps *dev_maps;
  1029. cpumask_var_t mask;
  1030. unsigned long index;
  1031. if (!netif_is_multiqueue(dev))
  1032. return -ENOENT;
  1033. index = get_netdev_queue_index(queue);
  1034. if (dev->num_tc) {
  1035. /* Do not allow XPS on subordinate device directly */
  1036. num_tc = dev->num_tc;
  1037. if (num_tc < 0)
  1038. return -EINVAL;
  1039. /* If queue belongs to subordinate dev use its map */
  1040. dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
  1041. tc = netdev_txq_to_tc(dev, index);
  1042. if (tc < 0)
  1043. return -EINVAL;
  1044. }
  1045. if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
  1046. return -ENOMEM;
  1047. rcu_read_lock();
  1048. dev_maps = rcu_dereference(dev->xps_cpus_map);
  1049. if (dev_maps) {
  1050. for_each_possible_cpu(cpu) {
  1051. int i, tci = cpu * num_tc + tc;
  1052. struct xps_map *map;
  1053. map = rcu_dereference(dev_maps->attr_map[tci]);
  1054. if (!map)
  1055. continue;
  1056. for (i = map->len; i--;) {
  1057. if (map->queues[i] == index) {
  1058. cpumask_set_cpu(cpu, mask);
  1059. break;
  1060. }
  1061. }
  1062. }
  1063. }
  1064. rcu_read_unlock();
  1065. len = snprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask));
  1066. free_cpumask_var(mask);
  1067. return len < PAGE_SIZE ? len : -EINVAL;
  1068. }
  1069. static ssize_t xps_cpus_store(struct netdev_queue *queue,
  1070. const char *buf, size_t len)
  1071. {
  1072. struct net_device *dev = queue->dev;
  1073. unsigned long index;
  1074. cpumask_var_t mask;
  1075. int err;
  1076. if (!netif_is_multiqueue(dev))
  1077. return -ENOENT;
  1078. if (!capable(CAP_NET_ADMIN))
  1079. return -EPERM;
  1080. if (!alloc_cpumask_var(&mask, GFP_KERNEL))
  1081. return -ENOMEM;
  1082. index = get_netdev_queue_index(queue);
  1083. err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
  1084. if (err) {
  1085. free_cpumask_var(mask);
  1086. return err;
  1087. }
  1088. err = netif_set_xps_queue(dev, mask, index);
  1089. free_cpumask_var(mask);
  1090. return err ? : len;
  1091. }
  1092. static struct netdev_queue_attribute xps_cpus_attribute __ro_after_init
  1093. = __ATTR_RW(xps_cpus);
  1094. static ssize_t xps_rxqs_show(struct netdev_queue *queue, char *buf)
  1095. {
  1096. struct net_device *dev = queue->dev;
  1097. struct xps_dev_maps *dev_maps;
  1098. unsigned long *mask, index;
  1099. int j, len, num_tc = 1, tc = 0;
  1100. index = get_netdev_queue_index(queue);
  1101. if (dev->num_tc) {
  1102. num_tc = dev->num_tc;
  1103. tc = netdev_txq_to_tc(dev, index);
  1104. if (tc < 0)
  1105. return -EINVAL;
  1106. }
  1107. mask = kcalloc(BITS_TO_LONGS(dev->num_rx_queues), sizeof(long),
  1108. GFP_KERNEL);
  1109. if (!mask)
  1110. return -ENOMEM;
  1111. rcu_read_lock();
  1112. dev_maps = rcu_dereference(dev->xps_rxqs_map);
  1113. if (!dev_maps)
  1114. goto out_no_maps;
  1115. for (j = -1; j = netif_attrmask_next(j, NULL, dev->num_rx_queues),
  1116. j < dev->num_rx_queues;) {
  1117. int i, tci = j * num_tc + tc;
  1118. struct xps_map *map;
  1119. map = rcu_dereference(dev_maps->attr_map[tci]);
  1120. if (!map)
  1121. continue;
  1122. for (i = map->len; i--;) {
  1123. if (map->queues[i] == index) {
  1124. set_bit(j, mask);
  1125. break;
  1126. }
  1127. }
  1128. }
  1129. out_no_maps:
  1130. rcu_read_unlock();
  1131. len = bitmap_print_to_pagebuf(false, buf, mask, dev->num_rx_queues);
  1132. kfree(mask);
  1133. return len < PAGE_SIZE ? len : -EINVAL;
  1134. }
  1135. static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf,
  1136. size_t len)
  1137. {
  1138. struct net_device *dev = queue->dev;
  1139. struct net *net = dev_net(dev);
  1140. unsigned long *mask, index;
  1141. int err;
  1142. if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
  1143. return -EPERM;
  1144. mask = kcalloc(BITS_TO_LONGS(dev->num_rx_queues), sizeof(long),
  1145. GFP_KERNEL);
  1146. if (!mask)
  1147. return -ENOMEM;
  1148. index = get_netdev_queue_index(queue);
  1149. err = bitmap_parse(buf, len, mask, dev->num_rx_queues);
  1150. if (err) {
  1151. kfree(mask);
  1152. return err;
  1153. }
  1154. cpus_read_lock();
  1155. err = __netif_set_xps_queue(dev, mask, index, true);
  1156. cpus_read_unlock();
  1157. kfree(mask);
  1158. return err ? : len;
  1159. }
  1160. static struct netdev_queue_attribute xps_rxqs_attribute __ro_after_init
  1161. = __ATTR_RW(xps_rxqs);
  1162. #endif /* CONFIG_XPS */
  1163. static struct attribute *netdev_queue_default_attrs[] __ro_after_init = {
  1164. &queue_trans_timeout.attr,
  1165. &queue_traffic_class.attr,
  1166. #ifdef CONFIG_XPS
  1167. &xps_cpus_attribute.attr,
  1168. &xps_rxqs_attribute.attr,
  1169. &queue_tx_maxrate.attr,
  1170. #endif
  1171. NULL
  1172. };
  1173. static void netdev_queue_release(struct kobject *kobj)
  1174. {
  1175. struct netdev_queue *queue = to_netdev_queue(kobj);
  1176. memset(kobj, 0, sizeof(*kobj));
  1177. dev_put(queue->dev);
  1178. }
  1179. static const void *netdev_queue_namespace(struct kobject *kobj)
  1180. {
  1181. struct netdev_queue *queue = to_netdev_queue(kobj);
  1182. struct device *dev = &queue->dev->dev;
  1183. const void *ns = NULL;
  1184. if (dev->class && dev->class->ns_type)
  1185. ns = dev->class->namespace(dev);
  1186. return ns;
  1187. }
  1188. static void netdev_queue_get_ownership(struct kobject *kobj,
  1189. kuid_t *uid, kgid_t *gid)
  1190. {
  1191. const struct net *net = netdev_queue_namespace(kobj);
  1192. net_ns_get_ownership(net, uid, gid);
  1193. }
  1194. static struct kobj_type netdev_queue_ktype __ro_after_init = {
  1195. .sysfs_ops = &netdev_queue_sysfs_ops,
  1196. .release = netdev_queue_release,
  1197. .default_attrs = netdev_queue_default_attrs,
  1198. .namespace = netdev_queue_namespace,
  1199. .get_ownership = netdev_queue_get_ownership,
  1200. };
  1201. static int netdev_queue_add_kobject(struct net_device *dev, int index)
  1202. {
  1203. struct netdev_queue *queue = dev->_tx + index;
  1204. struct kobject *kobj = &queue->kobj;
  1205. int error = 0;
  1206. kobj->kset = dev->queues_kset;
  1207. error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
  1208. "tx-%u", index);
  1209. if (error)
  1210. return error;
  1211. #ifdef CONFIG_BQL
  1212. error = sysfs_create_group(kobj, &dql_group);
  1213. if (error) {
  1214. kobject_put(kobj);
  1215. return error;
  1216. }
  1217. #endif
  1218. kobject_uevent(kobj, KOBJ_ADD);
  1219. dev_hold(queue->dev);
  1220. return 0;
  1221. }
  1222. #endif /* CONFIG_SYSFS */
  1223. int
  1224. netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
  1225. {
  1226. #ifdef CONFIG_SYSFS
  1227. int i;
  1228. int error = 0;
  1229. for (i = old_num; i < new_num; i++) {
  1230. error = netdev_queue_add_kobject(dev, i);
  1231. if (error) {
  1232. new_num = old_num;
  1233. break;
  1234. }
  1235. }
  1236. while (--i >= new_num) {
  1237. struct netdev_queue *queue = dev->_tx + i;
  1238. if (!refcount_read(&dev_net(dev)->count))
  1239. queue->kobj.uevent_suppress = 1;
  1240. #ifdef CONFIG_BQL
  1241. sysfs_remove_group(&queue->kobj, &dql_group);
  1242. #endif
  1243. kobject_put(&queue->kobj);
  1244. }
  1245. return error;
  1246. #else
  1247. return 0;
  1248. #endif /* CONFIG_SYSFS */
  1249. }
  1250. static int register_queue_kobjects(struct net_device *dev)
  1251. {
  1252. int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
  1253. #ifdef CONFIG_SYSFS
  1254. dev->queues_kset = kset_create_and_add("queues",
  1255. NULL, &dev->dev.kobj);
  1256. if (!dev->queues_kset)
  1257. return -ENOMEM;
  1258. real_rx = dev->real_num_rx_queues;
  1259. #endif
  1260. real_tx = dev->real_num_tx_queues;
  1261. error = net_rx_queue_update_kobjects(dev, 0, real_rx);
  1262. if (error)
  1263. goto error;
  1264. rxq = real_rx;
  1265. error = netdev_queue_update_kobjects(dev, 0, real_tx);
  1266. if (error)
  1267. goto error;
  1268. txq = real_tx;
  1269. return 0;
  1270. error:
  1271. netdev_queue_update_kobjects(dev, txq, 0);
  1272. net_rx_queue_update_kobjects(dev, rxq, 0);
  1273. return error;
  1274. }
  1275. static void remove_queue_kobjects(struct net_device *dev)
  1276. {
  1277. int real_rx = 0, real_tx = 0;
  1278. #ifdef CONFIG_SYSFS
  1279. real_rx = dev->real_num_rx_queues;
  1280. #endif
  1281. real_tx = dev->real_num_tx_queues;
  1282. net_rx_queue_update_kobjects(dev, real_rx, 0);
  1283. netdev_queue_update_kobjects(dev, real_tx, 0);
  1284. #ifdef CONFIG_SYSFS
  1285. kset_unregister(dev->queues_kset);
  1286. #endif
  1287. }
  1288. static bool net_current_may_mount(void)
  1289. {
  1290. struct net *net = current->nsproxy->net_ns;
  1291. return ns_capable(net->user_ns, CAP_SYS_ADMIN);
  1292. }
  1293. static void *net_grab_current_ns(void)
  1294. {
  1295. struct net *ns = current->nsproxy->net_ns;
  1296. #ifdef CONFIG_NET_NS
  1297. if (ns)
  1298. refcount_inc(&ns->passive);
  1299. #endif
  1300. return ns;
  1301. }
  1302. static const void *net_initial_ns(void)
  1303. {
  1304. return &init_net;
  1305. }
  1306. static const void *net_netlink_ns(struct sock *sk)
  1307. {
  1308. return sock_net(sk);
  1309. }
  1310. const struct kobj_ns_type_operations net_ns_type_operations = {
  1311. .type = KOBJ_NS_TYPE_NET,
  1312. .current_may_mount = net_current_may_mount,
  1313. .grab_current_ns = net_grab_current_ns,
  1314. .netlink_ns = net_netlink_ns,
  1315. .initial_ns = net_initial_ns,
  1316. .drop_ns = net_drop_ns,
  1317. };
  1318. EXPORT_SYMBOL_GPL(net_ns_type_operations);
  1319. static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
  1320. {
  1321. struct net_device *dev = to_net_dev(d);
  1322. int retval;
  1323. /* pass interface to uevent. */
  1324. retval = add_uevent_var(env, "INTERFACE=%s", dev->name);
  1325. if (retval)
  1326. goto exit;
  1327. /* pass ifindex to uevent.
  1328. * ifindex is useful as it won't change (interface name may change)
  1329. * and is what RtNetlink uses natively.
  1330. */
  1331. retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex);
  1332. exit:
  1333. return retval;
  1334. }
  1335. /*
  1336. * netdev_release -- destroy and free a dead device.
  1337. * Called when last reference to device kobject is gone.
  1338. */
  1339. static void netdev_release(struct device *d)
  1340. {
  1341. struct net_device *dev = to_net_dev(d);
  1342. BUG_ON(dev->reg_state != NETREG_RELEASED);
  1343. /* no need to wait for rcu grace period:
  1344. * device is dead and about to be freed.
  1345. */
  1346. kfree(rcu_access_pointer(dev->ifalias));
  1347. netdev_freemem(dev);
  1348. }
  1349. static const void *net_namespace(struct device *d)
  1350. {
  1351. struct net_device *dev = to_net_dev(d);
  1352. return dev_net(dev);
  1353. }
  1354. static void net_get_ownership(struct device *d, kuid_t *uid, kgid_t *gid)
  1355. {
  1356. struct net_device *dev = to_net_dev(d);
  1357. const struct net *net = dev_net(dev);
  1358. net_ns_get_ownership(net, uid, gid);
  1359. }
  1360. static struct class net_class __ro_after_init = {
  1361. .name = "net",
  1362. .dev_release = netdev_release,
  1363. .dev_groups = net_class_groups,
  1364. .dev_uevent = netdev_uevent,
  1365. .ns_type = &net_ns_type_operations,
  1366. .namespace = net_namespace,
  1367. .get_ownership = net_get_ownership,
  1368. };
  1369. #ifdef CONFIG_OF_NET
  1370. static int of_dev_node_match(struct device *dev, const void *data)
  1371. {
  1372. int ret = 0;
  1373. if (dev->parent)
  1374. ret = dev->parent->of_node == data;
  1375. return ret == 0 ? dev->of_node == data : ret;
  1376. }
  1377. /*
  1378. * of_find_net_device_by_node - lookup the net device for the device node
  1379. * @np: OF device node
  1380. *
  1381. * Looks up the net_device structure corresponding with the device node.
  1382. * If successful, returns a pointer to the net_device with the embedded
  1383. * struct device refcount incremented by one, or NULL on failure. The
  1384. * refcount must be dropped when done with the net_device.
  1385. */
  1386. struct net_device *of_find_net_device_by_node(struct device_node *np)
  1387. {
  1388. struct device *dev;
  1389. dev = class_find_device(&net_class, NULL, np, of_dev_node_match);
  1390. if (!dev)
  1391. return NULL;
  1392. return to_net_dev(dev);
  1393. }
  1394. EXPORT_SYMBOL(of_find_net_device_by_node);
  1395. #endif
  1396. /* Delete sysfs entries but hold kobject reference until after all
  1397. * netdev references are gone.
  1398. */
  1399. void netdev_unregister_kobject(struct net_device *ndev)
  1400. {
  1401. struct device *dev = &ndev->dev;
  1402. if (!refcount_read(&dev_net(ndev)->count))
  1403. dev_set_uevent_suppress(dev, 1);
  1404. kobject_get(&dev->kobj);
  1405. remove_queue_kobjects(ndev);
  1406. pm_runtime_set_memalloc_noio(dev, false);
  1407. device_del(dev);
  1408. }
  1409. /* Create sysfs entries for network device. */
  1410. int netdev_register_kobject(struct net_device *ndev)
  1411. {
  1412. struct device *dev = &ndev->dev;
  1413. const struct attribute_group **groups = ndev->sysfs_groups;
  1414. int error = 0;
  1415. device_initialize(dev);
  1416. dev->class = &net_class;
  1417. dev->platform_data = ndev;
  1418. dev->groups = groups;
  1419. dev_set_name(dev, "%s", ndev->name);
  1420. #ifdef CONFIG_SYSFS
  1421. /* Allow for a device specific group */
  1422. if (*groups)
  1423. groups++;
  1424. *groups++ = &netstat_group;
  1425. #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
  1426. if (ndev->ieee80211_ptr)
  1427. *groups++ = &wireless_group;
  1428. #if IS_ENABLED(CONFIG_WIRELESS_EXT)
  1429. else if (ndev->wireless_handlers)
  1430. *groups++ = &wireless_group;
  1431. #endif
  1432. #endif
  1433. #endif /* CONFIG_SYSFS */
  1434. error = device_add(dev);
  1435. if (error)
  1436. return error;
  1437. error = register_queue_kobjects(ndev);
  1438. if (error) {
  1439. device_del(dev);
  1440. return error;
  1441. }
  1442. pm_runtime_set_memalloc_noio(dev, true);
  1443. return error;
  1444. }
  1445. int netdev_class_create_file_ns(const struct class_attribute *class_attr,
  1446. const void *ns)
  1447. {
  1448. return class_create_file_ns(&net_class, class_attr, ns);
  1449. }
  1450. EXPORT_SYMBOL(netdev_class_create_file_ns);
  1451. void netdev_class_remove_file_ns(const struct class_attribute *class_attr,
  1452. const void *ns)
  1453. {
  1454. class_remove_file_ns(&net_class, class_attr, ns);
  1455. }
  1456. EXPORT_SYMBOL(netdev_class_remove_file_ns);
  1457. int __init netdev_kobject_init(void)
  1458. {
  1459. kobj_ns_type_register(&net_ns_type_operations);
  1460. return class_register(&net_class);
  1461. }