rtnetlink.c 99 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * Routing netlink socket interface: protocol independent part.
  7. *
  8. * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License
  12. * as published by the Free Software Foundation; either version
  13. * 2 of the License, or (at your option) any later version.
  14. *
  15. * Fixes:
  16. * Vitaly E. Lavrov RTA_OK arithmetics was wrong.
  17. */
  18. #include <linux/errno.h>
  19. #include <linux/module.h>
  20. #include <linux/types.h>
  21. #include <linux/socket.h>
  22. #include <linux/kernel.h>
  23. #include <linux/timer.h>
  24. #include <linux/string.h>
  25. #include <linux/sockios.h>
  26. #include <linux/net.h>
  27. #include <linux/fcntl.h>
  28. #include <linux/mm.h>
  29. #include <linux/slab.h>
  30. #include <linux/interrupt.h>
  31. #include <linux/capability.h>
  32. #include <linux/skbuff.h>
  33. #include <linux/init.h>
  34. #include <linux/security.h>
  35. #include <linux/mutex.h>
  36. #include <linux/if_addr.h>
  37. #include <linux/if_bridge.h>
  38. #include <linux/if_vlan.h>
  39. #include <linux/pci.h>
  40. #include <linux/etherdevice.h>
  41. #include <asm/uaccess.h>
  42. #include <linux/inet.h>
  43. #include <linux/netdevice.h>
  44. #include <net/switchdev.h>
  45. #include <net/ip.h>
  46. #include <net/protocol.h>
  47. #include <net/arp.h>
  48. #include <net/route.h>
  49. #include <net/udp.h>
  50. #include <net/tcp.h>
  51. #include <net/sock.h>
  52. #include <net/pkt_sched.h>
  53. #include <net/fib_rules.h>
  54. #include <net/rtnetlink.h>
  55. #include <net/net_namespace.h>
  56. struct rtnl_link {
  57. rtnl_doit_func doit;
  58. rtnl_dumpit_func dumpit;
  59. rtnl_calcit_func calcit;
  60. };
  61. static DEFINE_MUTEX(rtnl_mutex);
  62. void rtnl_lock(void)
  63. {
  64. mutex_lock(&rtnl_mutex);
  65. }
  66. EXPORT_SYMBOL(rtnl_lock);
  67. static struct sk_buff *defer_kfree_skb_list;
  68. void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail)
  69. {
  70. if (head && tail) {
  71. tail->next = defer_kfree_skb_list;
  72. defer_kfree_skb_list = head;
  73. }
  74. }
  75. EXPORT_SYMBOL(rtnl_kfree_skbs);
  76. void __rtnl_unlock(void)
  77. {
  78. struct sk_buff *head = defer_kfree_skb_list;
  79. defer_kfree_skb_list = NULL;
  80. mutex_unlock(&rtnl_mutex);
  81. while (head) {
  82. struct sk_buff *next = head->next;
  83. kfree_skb(head);
  84. cond_resched();
  85. head = next;
  86. }
  87. }
  88. void rtnl_unlock(void)
  89. {
  90. /* This fellow will unlock it for us. */
  91. netdev_run_todo();
  92. }
  93. EXPORT_SYMBOL(rtnl_unlock);
  94. int rtnl_trylock(void)
  95. {
  96. return mutex_trylock(&rtnl_mutex);
  97. }
  98. EXPORT_SYMBOL(rtnl_trylock);
  99. int rtnl_is_locked(void)
  100. {
  101. return mutex_is_locked(&rtnl_mutex);
  102. }
  103. EXPORT_SYMBOL(rtnl_is_locked);
  104. #ifdef CONFIG_PROVE_LOCKING
  105. bool lockdep_rtnl_is_held(void)
  106. {
  107. return lockdep_is_held(&rtnl_mutex);
  108. }
  109. EXPORT_SYMBOL(lockdep_rtnl_is_held);
  110. #endif /* #ifdef CONFIG_PROVE_LOCKING */
  111. static struct rtnl_link *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
  112. static inline int rtm_msgindex(int msgtype)
  113. {
  114. int msgindex = msgtype - RTM_BASE;
  115. /*
  116. * msgindex < 0 implies someone tried to register a netlink
  117. * control code. msgindex >= RTM_NR_MSGTYPES may indicate that
  118. * the message type has not been added to linux/rtnetlink.h
  119. */
  120. BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES);
  121. return msgindex;
  122. }
  123. static rtnl_doit_func rtnl_get_doit(int protocol, int msgindex)
  124. {
  125. struct rtnl_link *tab;
  126. if (protocol <= RTNL_FAMILY_MAX)
  127. tab = rtnl_msg_handlers[protocol];
  128. else
  129. tab = NULL;
  130. if (tab == NULL || tab[msgindex].doit == NULL)
  131. tab = rtnl_msg_handlers[PF_UNSPEC];
  132. return tab[msgindex].doit;
  133. }
  134. static rtnl_dumpit_func rtnl_get_dumpit(int protocol, int msgindex)
  135. {
  136. struct rtnl_link *tab;
  137. if (protocol <= RTNL_FAMILY_MAX)
  138. tab = rtnl_msg_handlers[protocol];
  139. else
  140. tab = NULL;
  141. if (tab == NULL || tab[msgindex].dumpit == NULL)
  142. tab = rtnl_msg_handlers[PF_UNSPEC];
  143. return tab[msgindex].dumpit;
  144. }
  145. static rtnl_calcit_func rtnl_get_calcit(int protocol, int msgindex)
  146. {
  147. struct rtnl_link *tab;
  148. if (protocol <= RTNL_FAMILY_MAX)
  149. tab = rtnl_msg_handlers[protocol];
  150. else
  151. tab = NULL;
  152. if (tab == NULL || tab[msgindex].calcit == NULL)
  153. tab = rtnl_msg_handlers[PF_UNSPEC];
  154. return tab[msgindex].calcit;
  155. }
  156. /**
  157. * __rtnl_register - Register a rtnetlink message type
  158. * @protocol: Protocol family or PF_UNSPEC
  159. * @msgtype: rtnetlink message type
  160. * @doit: Function pointer called for each request message
  161. * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
  162. * @calcit: Function pointer to calc size of dump message
  163. *
  164. * Registers the specified function pointers (at least one of them has
  165. * to be non-NULL) to be called whenever a request message for the
  166. * specified protocol family and message type is received.
  167. *
  168. * The special protocol family PF_UNSPEC may be used to define fallback
  169. * function pointers for the case when no entry for the specific protocol
  170. * family exists.
  171. *
  172. * Returns 0 on success or a negative error code.
  173. */
  174. int __rtnl_register(int protocol, int msgtype,
  175. rtnl_doit_func doit, rtnl_dumpit_func dumpit,
  176. rtnl_calcit_func calcit)
  177. {
  178. struct rtnl_link *tab;
  179. int msgindex;
  180. BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
  181. msgindex = rtm_msgindex(msgtype);
  182. tab = rtnl_msg_handlers[protocol];
  183. if (tab == NULL) {
  184. tab = kcalloc(RTM_NR_MSGTYPES, sizeof(*tab), GFP_KERNEL);
  185. if (tab == NULL)
  186. return -ENOBUFS;
  187. rtnl_msg_handlers[protocol] = tab;
  188. }
  189. if (doit)
  190. tab[msgindex].doit = doit;
  191. if (dumpit)
  192. tab[msgindex].dumpit = dumpit;
  193. if (calcit)
  194. tab[msgindex].calcit = calcit;
  195. return 0;
  196. }
  197. EXPORT_SYMBOL_GPL(__rtnl_register);
  198. /**
  199. * rtnl_register - Register a rtnetlink message type
  200. *
  201. * Identical to __rtnl_register() but panics on failure. This is useful
  202. * as failure of this function is very unlikely, it can only happen due
  203. * to lack of memory when allocating the chain to store all message
  204. * handlers for a protocol. Meant for use in init functions where lack
  205. * of memory implies no sense in continuing.
  206. */
  207. void rtnl_register(int protocol, int msgtype,
  208. rtnl_doit_func doit, rtnl_dumpit_func dumpit,
  209. rtnl_calcit_func calcit)
  210. {
  211. if (__rtnl_register(protocol, msgtype, doit, dumpit, calcit) < 0)
  212. panic("Unable to register rtnetlink message handler, "
  213. "protocol = %d, message type = %d\n",
  214. protocol, msgtype);
  215. }
  216. EXPORT_SYMBOL_GPL(rtnl_register);
  217. /**
  218. * rtnl_unregister - Unregister a rtnetlink message type
  219. * @protocol: Protocol family or PF_UNSPEC
  220. * @msgtype: rtnetlink message type
  221. *
  222. * Returns 0 on success or a negative error code.
  223. */
  224. int rtnl_unregister(int protocol, int msgtype)
  225. {
  226. int msgindex;
  227. BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
  228. msgindex = rtm_msgindex(msgtype);
  229. if (rtnl_msg_handlers[protocol] == NULL)
  230. return -ENOENT;
  231. rtnl_msg_handlers[protocol][msgindex].doit = NULL;
  232. rtnl_msg_handlers[protocol][msgindex].dumpit = NULL;
  233. rtnl_msg_handlers[protocol][msgindex].calcit = NULL;
  234. return 0;
  235. }
  236. EXPORT_SYMBOL_GPL(rtnl_unregister);
  237. /**
  238. * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol
  239. * @protocol : Protocol family or PF_UNSPEC
  240. *
  241. * Identical to calling rtnl_unregster() for all registered message types
  242. * of a certain protocol family.
  243. */
  244. void rtnl_unregister_all(int protocol)
  245. {
  246. BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
  247. kfree(rtnl_msg_handlers[protocol]);
  248. rtnl_msg_handlers[protocol] = NULL;
  249. }
  250. EXPORT_SYMBOL_GPL(rtnl_unregister_all);
  251. static LIST_HEAD(link_ops);
  252. static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
  253. {
  254. const struct rtnl_link_ops *ops;
  255. list_for_each_entry(ops, &link_ops, list) {
  256. if (!strcmp(ops->kind, kind))
  257. return ops;
  258. }
  259. return NULL;
  260. }
  261. /**
  262. * __rtnl_link_register - Register rtnl_link_ops with rtnetlink.
  263. * @ops: struct rtnl_link_ops * to register
  264. *
  265. * The caller must hold the rtnl_mutex. This function should be used
  266. * by drivers that create devices during module initialization. It
  267. * must be called before registering the devices.
  268. *
  269. * Returns 0 on success or a negative error code.
  270. */
  271. int __rtnl_link_register(struct rtnl_link_ops *ops)
  272. {
  273. if (rtnl_link_ops_get(ops->kind))
  274. return -EEXIST;
  275. /* The check for setup is here because if ops
  276. * does not have that filled up, it is not possible
  277. * to use the ops for creating device. So do not
  278. * fill up dellink as well. That disables rtnl_dellink.
  279. */
  280. if (ops->setup && !ops->dellink)
  281. ops->dellink = unregister_netdevice_queue;
  282. list_add_tail(&ops->list, &link_ops);
  283. return 0;
  284. }
  285. EXPORT_SYMBOL_GPL(__rtnl_link_register);
  286. /**
  287. * rtnl_link_register - Register rtnl_link_ops with rtnetlink.
  288. * @ops: struct rtnl_link_ops * to register
  289. *
  290. * Returns 0 on success or a negative error code.
  291. */
  292. int rtnl_link_register(struct rtnl_link_ops *ops)
  293. {
  294. int err;
  295. rtnl_lock();
  296. err = __rtnl_link_register(ops);
  297. rtnl_unlock();
  298. return err;
  299. }
  300. EXPORT_SYMBOL_GPL(rtnl_link_register);
  301. static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops)
  302. {
  303. struct net_device *dev;
  304. LIST_HEAD(list_kill);
  305. for_each_netdev(net, dev) {
  306. if (dev->rtnl_link_ops == ops)
  307. ops->dellink(dev, &list_kill);
  308. }
  309. unregister_netdevice_many(&list_kill);
  310. }
  311. /**
  312. * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
  313. * @ops: struct rtnl_link_ops * to unregister
  314. *
  315. * The caller must hold the rtnl_mutex.
  316. */
  317. void __rtnl_link_unregister(struct rtnl_link_ops *ops)
  318. {
  319. struct net *net;
  320. for_each_net(net) {
  321. __rtnl_kill_links(net, ops);
  322. }
  323. list_del(&ops->list);
  324. }
  325. EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
  326. /* Return with the rtnl_lock held when there are no network
  327. * devices unregistering in any network namespace.
  328. */
  329. static void rtnl_lock_unregistering_all(void)
  330. {
  331. struct net *net;
  332. bool unregistering;
  333. DEFINE_WAIT_FUNC(wait, woken_wake_function);
  334. add_wait_queue(&netdev_unregistering_wq, &wait);
  335. for (;;) {
  336. unregistering = false;
  337. rtnl_lock();
  338. for_each_net(net) {
  339. if (net->dev_unreg_count > 0) {
  340. unregistering = true;
  341. break;
  342. }
  343. }
  344. if (!unregistering)
  345. break;
  346. __rtnl_unlock();
  347. wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
  348. }
  349. remove_wait_queue(&netdev_unregistering_wq, &wait);
  350. }
  351. /**
  352. * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
  353. * @ops: struct rtnl_link_ops * to unregister
  354. */
  355. void rtnl_link_unregister(struct rtnl_link_ops *ops)
  356. {
  357. /* Close the race with cleanup_net() */
  358. mutex_lock(&net_mutex);
  359. rtnl_lock_unregistering_all();
  360. __rtnl_link_unregister(ops);
  361. rtnl_unlock();
  362. mutex_unlock(&net_mutex);
  363. }
  364. EXPORT_SYMBOL_GPL(rtnl_link_unregister);
  365. static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev)
  366. {
  367. struct net_device *master_dev;
  368. const struct rtnl_link_ops *ops;
  369. master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
  370. if (!master_dev)
  371. return 0;
  372. ops = master_dev->rtnl_link_ops;
  373. if (!ops || !ops->get_slave_size)
  374. return 0;
  375. /* IFLA_INFO_SLAVE_DATA + nested data */
  376. return nla_total_size(sizeof(struct nlattr)) +
  377. ops->get_slave_size(master_dev, dev);
  378. }
  379. static size_t rtnl_link_get_size(const struct net_device *dev)
  380. {
  381. const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
  382. size_t size;
  383. if (!ops)
  384. return 0;
  385. size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
  386. nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */
  387. if (ops->get_size)
  388. /* IFLA_INFO_DATA + nested data */
  389. size += nla_total_size(sizeof(struct nlattr)) +
  390. ops->get_size(dev);
  391. if (ops->get_xstats_size)
  392. /* IFLA_INFO_XSTATS */
  393. size += nla_total_size(ops->get_xstats_size(dev));
  394. size += rtnl_link_get_slave_info_data_size(dev);
  395. return size;
  396. }
  397. static LIST_HEAD(rtnl_af_ops);
  398. static const struct rtnl_af_ops *rtnl_af_lookup(const int family)
  399. {
  400. const struct rtnl_af_ops *ops;
  401. list_for_each_entry(ops, &rtnl_af_ops, list) {
  402. if (ops->family == family)
  403. return ops;
  404. }
  405. return NULL;
  406. }
  407. /**
  408. * rtnl_af_register - Register rtnl_af_ops with rtnetlink.
  409. * @ops: struct rtnl_af_ops * to register
  410. *
  411. * Returns 0 on success or a negative error code.
  412. */
  413. void rtnl_af_register(struct rtnl_af_ops *ops)
  414. {
  415. rtnl_lock();
  416. list_add_tail(&ops->list, &rtnl_af_ops);
  417. rtnl_unlock();
  418. }
  419. EXPORT_SYMBOL_GPL(rtnl_af_register);
  420. /**
  421. * __rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
  422. * @ops: struct rtnl_af_ops * to unregister
  423. *
  424. * The caller must hold the rtnl_mutex.
  425. */
  426. void __rtnl_af_unregister(struct rtnl_af_ops *ops)
  427. {
  428. list_del(&ops->list);
  429. }
  430. EXPORT_SYMBOL_GPL(__rtnl_af_unregister);
  431. /**
  432. * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
  433. * @ops: struct rtnl_af_ops * to unregister
  434. */
  435. void rtnl_af_unregister(struct rtnl_af_ops *ops)
  436. {
  437. rtnl_lock();
  438. __rtnl_af_unregister(ops);
  439. rtnl_unlock();
  440. }
  441. EXPORT_SYMBOL_GPL(rtnl_af_unregister);
  442. static size_t rtnl_link_get_af_size(const struct net_device *dev,
  443. u32 ext_filter_mask)
  444. {
  445. struct rtnl_af_ops *af_ops;
  446. size_t size;
  447. /* IFLA_AF_SPEC */
  448. size = nla_total_size(sizeof(struct nlattr));
  449. list_for_each_entry(af_ops, &rtnl_af_ops, list) {
  450. if (af_ops->get_link_af_size) {
  451. /* AF_* + nested data */
  452. size += nla_total_size(sizeof(struct nlattr)) +
  453. af_ops->get_link_af_size(dev, ext_filter_mask);
  454. }
  455. }
  456. return size;
  457. }
  458. static bool rtnl_have_link_slave_info(const struct net_device *dev)
  459. {
  460. struct net_device *master_dev;
  461. master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
  462. if (master_dev && master_dev->rtnl_link_ops)
  463. return true;
  464. return false;
  465. }
  466. static int rtnl_link_slave_info_fill(struct sk_buff *skb,
  467. const struct net_device *dev)
  468. {
  469. struct net_device *master_dev;
  470. const struct rtnl_link_ops *ops;
  471. struct nlattr *slave_data;
  472. int err;
  473. master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
  474. if (!master_dev)
  475. return 0;
  476. ops = master_dev->rtnl_link_ops;
  477. if (!ops)
  478. return 0;
  479. if (nla_put_string(skb, IFLA_INFO_SLAVE_KIND, ops->kind) < 0)
  480. return -EMSGSIZE;
  481. if (ops->fill_slave_info) {
  482. slave_data = nla_nest_start(skb, IFLA_INFO_SLAVE_DATA);
  483. if (!slave_data)
  484. return -EMSGSIZE;
  485. err = ops->fill_slave_info(skb, master_dev, dev);
  486. if (err < 0)
  487. goto err_cancel_slave_data;
  488. nla_nest_end(skb, slave_data);
  489. }
  490. return 0;
  491. err_cancel_slave_data:
  492. nla_nest_cancel(skb, slave_data);
  493. return err;
  494. }
  495. static int rtnl_link_info_fill(struct sk_buff *skb,
  496. const struct net_device *dev)
  497. {
  498. const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
  499. struct nlattr *data;
  500. int err;
  501. if (!ops)
  502. return 0;
  503. if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0)
  504. return -EMSGSIZE;
  505. if (ops->fill_xstats) {
  506. err = ops->fill_xstats(skb, dev);
  507. if (err < 0)
  508. return err;
  509. }
  510. if (ops->fill_info) {
  511. data = nla_nest_start(skb, IFLA_INFO_DATA);
  512. if (data == NULL)
  513. return -EMSGSIZE;
  514. err = ops->fill_info(skb, dev);
  515. if (err < 0)
  516. goto err_cancel_data;
  517. nla_nest_end(skb, data);
  518. }
  519. return 0;
  520. err_cancel_data:
  521. nla_nest_cancel(skb, data);
  522. return err;
  523. }
  524. static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
  525. {
  526. struct nlattr *linkinfo;
  527. int err = -EMSGSIZE;
  528. linkinfo = nla_nest_start(skb, IFLA_LINKINFO);
  529. if (linkinfo == NULL)
  530. goto out;
  531. err = rtnl_link_info_fill(skb, dev);
  532. if (err < 0)
  533. goto err_cancel_link;
  534. err = rtnl_link_slave_info_fill(skb, dev);
  535. if (err < 0)
  536. goto err_cancel_link;
  537. nla_nest_end(skb, linkinfo);
  538. return 0;
  539. err_cancel_link:
  540. nla_nest_cancel(skb, linkinfo);
  541. out:
  542. return err;
  543. }
  544. int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo)
  545. {
  546. struct sock *rtnl = net->rtnl;
  547. int err = 0;
  548. NETLINK_CB(skb).dst_group = group;
  549. if (echo)
  550. atomic_inc(&skb->users);
  551. netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL);
  552. if (echo)
  553. err = netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT);
  554. return err;
  555. }
  556. int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid)
  557. {
  558. struct sock *rtnl = net->rtnl;
  559. return nlmsg_unicast(rtnl, skb, pid);
  560. }
  561. EXPORT_SYMBOL(rtnl_unicast);
  562. void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
  563. struct nlmsghdr *nlh, gfp_t flags)
  564. {
  565. struct sock *rtnl = net->rtnl;
  566. int report = 0;
  567. if (nlh)
  568. report = nlmsg_report(nlh);
  569. nlmsg_notify(rtnl, skb, pid, group, report, flags);
  570. }
  571. EXPORT_SYMBOL(rtnl_notify);
  572. void rtnl_set_sk_err(struct net *net, u32 group, int error)
  573. {
  574. struct sock *rtnl = net->rtnl;
  575. netlink_set_err(rtnl, 0, group, error);
  576. }
  577. EXPORT_SYMBOL(rtnl_set_sk_err);
  578. int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
  579. {
  580. struct nlattr *mx;
  581. int i, valid = 0;
  582. mx = nla_nest_start(skb, RTA_METRICS);
  583. if (mx == NULL)
  584. return -ENOBUFS;
  585. for (i = 0; i < RTAX_MAX; i++) {
  586. if (metrics[i]) {
  587. if (i == RTAX_CC_ALGO - 1) {
  588. char tmp[TCP_CA_NAME_MAX], *name;
  589. name = tcp_ca_get_name_by_key(metrics[i], tmp);
  590. if (!name)
  591. continue;
  592. if (nla_put_string(skb, i + 1, name))
  593. goto nla_put_failure;
  594. } else if (i == RTAX_FEATURES - 1) {
  595. u32 user_features = metrics[i] & RTAX_FEATURE_MASK;
  596. if (!user_features)
  597. continue;
  598. BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK);
  599. if (nla_put_u32(skb, i + 1, user_features))
  600. goto nla_put_failure;
  601. } else {
  602. if (nla_put_u32(skb, i + 1, metrics[i]))
  603. goto nla_put_failure;
  604. }
  605. valid++;
  606. }
  607. }
  608. if (!valid) {
  609. nla_nest_cancel(skb, mx);
  610. return 0;
  611. }
  612. return nla_nest_end(skb, mx);
  613. nla_put_failure:
  614. nla_nest_cancel(skb, mx);
  615. return -EMSGSIZE;
  616. }
  617. EXPORT_SYMBOL(rtnetlink_put_metrics);
  618. int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
  619. long expires, u32 error)
  620. {
  621. struct rta_cacheinfo ci = {
  622. .rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse),
  623. .rta_used = dst->__use,
  624. .rta_clntref = atomic_read(&(dst->__refcnt)),
  625. .rta_error = error,
  626. .rta_id = id,
  627. };
  628. if (expires) {
  629. unsigned long clock;
  630. clock = jiffies_to_clock_t(abs(expires));
  631. clock = min_t(unsigned long, clock, INT_MAX);
  632. ci.rta_expires = (expires > 0) ? clock : -clock;
  633. }
  634. return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci);
  635. }
  636. EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
  637. static void set_operstate(struct net_device *dev, unsigned char transition)
  638. {
  639. unsigned char operstate = dev->operstate;
  640. switch (transition) {
  641. case IF_OPER_UP:
  642. if ((operstate == IF_OPER_DORMANT ||
  643. operstate == IF_OPER_UNKNOWN) &&
  644. !netif_dormant(dev))
  645. operstate = IF_OPER_UP;
  646. break;
  647. case IF_OPER_DORMANT:
  648. if (operstate == IF_OPER_UP ||
  649. operstate == IF_OPER_UNKNOWN)
  650. operstate = IF_OPER_DORMANT;
  651. break;
  652. }
  653. if (dev->operstate != operstate) {
  654. write_lock_bh(&dev_base_lock);
  655. dev->operstate = operstate;
  656. write_unlock_bh(&dev_base_lock);
  657. netdev_state_change(dev);
  658. }
  659. }
  660. static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
  661. {
  662. return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
  663. (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
  664. }
  665. static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
  666. const struct ifinfomsg *ifm)
  667. {
  668. unsigned int flags = ifm->ifi_flags;
  669. /* bugwards compatibility: ifi_change == 0 is treated as ~0 */
  670. if (ifm->ifi_change)
  671. flags = (flags & ifm->ifi_change) |
  672. (rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
  673. return flags;
  674. }
  675. static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
  676. const struct rtnl_link_stats64 *b)
  677. {
  678. a->rx_packets = b->rx_packets;
  679. a->tx_packets = b->tx_packets;
  680. a->rx_bytes = b->rx_bytes;
  681. a->tx_bytes = b->tx_bytes;
  682. a->rx_errors = b->rx_errors;
  683. a->tx_errors = b->tx_errors;
  684. a->rx_dropped = b->rx_dropped;
  685. a->tx_dropped = b->tx_dropped;
  686. a->multicast = b->multicast;
  687. a->collisions = b->collisions;
  688. a->rx_length_errors = b->rx_length_errors;
  689. a->rx_over_errors = b->rx_over_errors;
  690. a->rx_crc_errors = b->rx_crc_errors;
  691. a->rx_frame_errors = b->rx_frame_errors;
  692. a->rx_fifo_errors = b->rx_fifo_errors;
  693. a->rx_missed_errors = b->rx_missed_errors;
  694. a->tx_aborted_errors = b->tx_aborted_errors;
  695. a->tx_carrier_errors = b->tx_carrier_errors;
  696. a->tx_fifo_errors = b->tx_fifo_errors;
  697. a->tx_heartbeat_errors = b->tx_heartbeat_errors;
  698. a->tx_window_errors = b->tx_window_errors;
  699. a->rx_compressed = b->rx_compressed;
  700. a->tx_compressed = b->tx_compressed;
  701. a->rx_nohandler = b->rx_nohandler;
  702. }
  703. /* All VF info */
  704. static inline int rtnl_vfinfo_size(const struct net_device *dev,
  705. u32 ext_filter_mask)
  706. {
  707. if (dev->dev.parent && dev_is_pci(dev->dev.parent) &&
  708. (ext_filter_mask & RTEXT_FILTER_VF)) {
  709. int num_vfs = dev_num_vf(dev->dev.parent);
  710. size_t size = nla_total_size(sizeof(struct nlattr));
  711. size += nla_total_size(num_vfs * sizeof(struct nlattr));
  712. size += num_vfs *
  713. (nla_total_size(sizeof(struct ifla_vf_mac)) +
  714. nla_total_size(MAX_VLAN_LIST_LEN *
  715. sizeof(struct nlattr)) +
  716. nla_total_size(MAX_VLAN_LIST_LEN *
  717. sizeof(struct ifla_vf_vlan_info)) +
  718. nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
  719. nla_total_size(sizeof(struct ifla_vf_rate)) +
  720. nla_total_size(sizeof(struct ifla_vf_link_state)) +
  721. nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
  722. /* IFLA_VF_STATS_RX_PACKETS */
  723. nla_total_size_64bit(sizeof(__u64)) +
  724. /* IFLA_VF_STATS_TX_PACKETS */
  725. nla_total_size_64bit(sizeof(__u64)) +
  726. /* IFLA_VF_STATS_RX_BYTES */
  727. nla_total_size_64bit(sizeof(__u64)) +
  728. /* IFLA_VF_STATS_TX_BYTES */
  729. nla_total_size_64bit(sizeof(__u64)) +
  730. /* IFLA_VF_STATS_BROADCAST */
  731. nla_total_size_64bit(sizeof(__u64)) +
  732. /* IFLA_VF_STATS_MULTICAST */
  733. nla_total_size_64bit(sizeof(__u64)) +
  734. nla_total_size(sizeof(struct ifla_vf_trust)));
  735. return size;
  736. } else
  737. return 0;
  738. }
  739. static size_t rtnl_port_size(const struct net_device *dev,
  740. u32 ext_filter_mask)
  741. {
  742. size_t port_size = nla_total_size(4) /* PORT_VF */
  743. + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */
  744. + nla_total_size(sizeof(struct ifla_port_vsi))
  745. /* PORT_VSI_TYPE */
  746. + nla_total_size(PORT_UUID_MAX) /* PORT_INSTANCE_UUID */
  747. + nla_total_size(PORT_UUID_MAX) /* PORT_HOST_UUID */
  748. + nla_total_size(1) /* PROT_VDP_REQUEST */
  749. + nla_total_size(2); /* PORT_VDP_RESPONSE */
  750. size_t vf_ports_size = nla_total_size(sizeof(struct nlattr));
  751. size_t vf_port_size = nla_total_size(sizeof(struct nlattr))
  752. + port_size;
  753. size_t port_self_size = nla_total_size(sizeof(struct nlattr))
  754. + port_size;
  755. if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
  756. !(ext_filter_mask & RTEXT_FILTER_VF))
  757. return 0;
  758. if (dev_num_vf(dev->dev.parent))
  759. return port_self_size + vf_ports_size +
  760. vf_port_size * dev_num_vf(dev->dev.parent);
  761. else
  762. return port_self_size;
  763. }
  764. static size_t rtnl_xdp_size(const struct net_device *dev)
  765. {
  766. size_t xdp_size = nla_total_size(1); /* XDP_ATTACHED */
  767. if (!dev->netdev_ops->ndo_xdp)
  768. return 0;
  769. else
  770. return xdp_size;
  771. }
  772. static noinline size_t if_nlmsg_size(const struct net_device *dev,
  773. u32 ext_filter_mask)
  774. {
  775. return NLMSG_ALIGN(sizeof(struct ifinfomsg))
  776. + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
  777. + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */
  778. + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
  779. + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap))
  780. + nla_total_size(sizeof(struct rtnl_link_stats))
  781. + nla_total_size_64bit(sizeof(struct rtnl_link_stats64))
  782. + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
  783. + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */
  784. + nla_total_size(4) /* IFLA_TXQLEN */
  785. + nla_total_size(4) /* IFLA_WEIGHT */
  786. + nla_total_size(4) /* IFLA_MTU */
  787. + nla_total_size(4) /* IFLA_LINK */
  788. + nla_total_size(4) /* IFLA_MASTER */
  789. + nla_total_size(1) /* IFLA_CARRIER */
  790. + nla_total_size(4) /* IFLA_PROMISCUITY */
  791. + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */
  792. + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
  793. + nla_total_size(4) /* IFLA_MAX_GSO_SEGS */
  794. + nla_total_size(4) /* IFLA_MAX_GSO_SIZE */
  795. + nla_total_size(1) /* IFLA_OPERSTATE */
  796. + nla_total_size(1) /* IFLA_LINKMODE */
  797. + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
  798. + nla_total_size(4) /* IFLA_LINK_NETNSID */
  799. + nla_total_size(ext_filter_mask
  800. & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
  801. + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
  802. + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
  803. + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
  804. + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
  805. + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
  806. + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
  807. + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */
  808. + rtnl_xdp_size(dev) /* IFLA_XDP */
  809. + nla_total_size(1); /* IFLA_PROTO_DOWN */
  810. }
  811. static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
  812. {
  813. struct nlattr *vf_ports;
  814. struct nlattr *vf_port;
  815. int vf;
  816. int err;
  817. vf_ports = nla_nest_start(skb, IFLA_VF_PORTS);
  818. if (!vf_ports)
  819. return -EMSGSIZE;
  820. for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) {
  821. vf_port = nla_nest_start(skb, IFLA_VF_PORT);
  822. if (!vf_port)
  823. goto nla_put_failure;
  824. if (nla_put_u32(skb, IFLA_PORT_VF, vf))
  825. goto nla_put_failure;
  826. err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
  827. if (err == -EMSGSIZE)
  828. goto nla_put_failure;
  829. if (err) {
  830. nla_nest_cancel(skb, vf_port);
  831. continue;
  832. }
  833. nla_nest_end(skb, vf_port);
  834. }
  835. nla_nest_end(skb, vf_ports);
  836. return 0;
  837. nla_put_failure:
  838. nla_nest_cancel(skb, vf_ports);
  839. return -EMSGSIZE;
  840. }
  841. static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
  842. {
  843. struct nlattr *port_self;
  844. int err;
  845. port_self = nla_nest_start(skb, IFLA_PORT_SELF);
  846. if (!port_self)
  847. return -EMSGSIZE;
  848. err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb);
  849. if (err) {
  850. nla_nest_cancel(skb, port_self);
  851. return (err == -EMSGSIZE) ? err : 0;
  852. }
  853. nla_nest_end(skb, port_self);
  854. return 0;
  855. }
  856. static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev,
  857. u32 ext_filter_mask)
  858. {
  859. int err;
  860. if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
  861. !(ext_filter_mask & RTEXT_FILTER_VF))
  862. return 0;
  863. err = rtnl_port_self_fill(skb, dev);
  864. if (err)
  865. return err;
  866. if (dev_num_vf(dev->dev.parent)) {
  867. err = rtnl_vf_ports_fill(skb, dev);
  868. if (err)
  869. return err;
  870. }
  871. return 0;
  872. }
  873. static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev)
  874. {
  875. int err;
  876. struct netdev_phys_item_id ppid;
  877. err = dev_get_phys_port_id(dev, &ppid);
  878. if (err) {
  879. if (err == -EOPNOTSUPP)
  880. return 0;
  881. return err;
  882. }
  883. if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id))
  884. return -EMSGSIZE;
  885. return 0;
  886. }
  887. static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev)
  888. {
  889. char name[IFNAMSIZ];
  890. int err;
  891. err = dev_get_phys_port_name(dev, name, sizeof(name));
  892. if (err) {
  893. if (err == -EOPNOTSUPP)
  894. return 0;
  895. return err;
  896. }
  897. if (nla_put(skb, IFLA_PHYS_PORT_NAME, strlen(name), name))
  898. return -EMSGSIZE;
  899. return 0;
  900. }
  901. static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
  902. {
  903. int err;
  904. struct switchdev_attr attr = {
  905. .orig_dev = dev,
  906. .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
  907. .flags = SWITCHDEV_F_NO_RECURSE,
  908. };
  909. err = switchdev_port_attr_get(dev, &attr);
  910. if (err) {
  911. if (err == -EOPNOTSUPP)
  912. return 0;
  913. return err;
  914. }
  915. if (nla_put(skb, IFLA_PHYS_SWITCH_ID, attr.u.ppid.id_len,
  916. attr.u.ppid.id))
  917. return -EMSGSIZE;
  918. return 0;
  919. }
  920. static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb,
  921. struct net_device *dev)
  922. {
  923. struct rtnl_link_stats64 *sp;
  924. struct nlattr *attr;
  925. attr = nla_reserve_64bit(skb, IFLA_STATS64,
  926. sizeof(struct rtnl_link_stats64), IFLA_PAD);
  927. if (!attr)
  928. return -EMSGSIZE;
  929. sp = nla_data(attr);
  930. dev_get_stats(dev, sp);
  931. attr = nla_reserve(skb, IFLA_STATS,
  932. sizeof(struct rtnl_link_stats));
  933. if (!attr)
  934. return -EMSGSIZE;
  935. copy_rtnl_link_stats(nla_data(attr), sp);
  936. return 0;
  937. }
  938. static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
  939. struct net_device *dev,
  940. int vfs_num,
  941. struct nlattr *vfinfo)
  942. {
  943. struct ifla_vf_rss_query_en vf_rss_query_en;
  944. struct nlattr *vf, *vfstats, *vfvlanlist;
  945. struct ifla_vf_link_state vf_linkstate;
  946. struct ifla_vf_vlan_info vf_vlan_info;
  947. struct ifla_vf_spoofchk vf_spoofchk;
  948. struct ifla_vf_tx_rate vf_tx_rate;
  949. struct ifla_vf_stats vf_stats;
  950. struct ifla_vf_trust vf_trust;
  951. struct ifla_vf_vlan vf_vlan;
  952. struct ifla_vf_rate vf_rate;
  953. struct ifla_vf_mac vf_mac;
  954. struct ifla_vf_info ivi;
  955. /* Not all SR-IOV capable drivers support the
  956. * spoofcheck and "RSS query enable" query. Preset to
  957. * -1 so the user space tool can detect that the driver
  958. * didn't report anything.
  959. */
  960. ivi.spoofchk = -1;
  961. ivi.rss_query_en = -1;
  962. ivi.trusted = -1;
  963. memset(ivi.mac, 0, sizeof(ivi.mac));
  964. /* The default value for VF link state is "auto"
  965. * IFLA_VF_LINK_STATE_AUTO which equals zero
  966. */
  967. ivi.linkstate = 0;
  968. /* VLAN Protocol by default is 802.1Q */
  969. ivi.vlan_proto = htons(ETH_P_8021Q);
  970. if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi))
  971. return 0;
  972. memset(&vf_vlan_info, 0, sizeof(vf_vlan_info));
  973. vf_mac.vf =
  974. vf_vlan.vf =
  975. vf_vlan_info.vf =
  976. vf_rate.vf =
  977. vf_tx_rate.vf =
  978. vf_spoofchk.vf =
  979. vf_linkstate.vf =
  980. vf_rss_query_en.vf =
  981. vf_trust.vf = ivi.vf;
  982. memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
  983. vf_vlan.vlan = ivi.vlan;
  984. vf_vlan.qos = ivi.qos;
  985. vf_vlan_info.vlan = ivi.vlan;
  986. vf_vlan_info.qos = ivi.qos;
  987. vf_vlan_info.vlan_proto = ivi.vlan_proto;
  988. vf_tx_rate.rate = ivi.max_tx_rate;
  989. vf_rate.min_tx_rate = ivi.min_tx_rate;
  990. vf_rate.max_tx_rate = ivi.max_tx_rate;
  991. vf_spoofchk.setting = ivi.spoofchk;
  992. vf_linkstate.link_state = ivi.linkstate;
  993. vf_rss_query_en.setting = ivi.rss_query_en;
  994. vf_trust.setting = ivi.trusted;
  995. vf = nla_nest_start(skb, IFLA_VF_INFO);
  996. if (!vf)
  997. goto nla_put_vfinfo_failure;
  998. if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
  999. nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
  1000. nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
  1001. &vf_rate) ||
  1002. nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
  1003. &vf_tx_rate) ||
  1004. nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
  1005. &vf_spoofchk) ||
  1006. nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate),
  1007. &vf_linkstate) ||
  1008. nla_put(skb, IFLA_VF_RSS_QUERY_EN,
  1009. sizeof(vf_rss_query_en),
  1010. &vf_rss_query_en) ||
  1011. nla_put(skb, IFLA_VF_TRUST,
  1012. sizeof(vf_trust), &vf_trust))
  1013. goto nla_put_vf_failure;
  1014. vfvlanlist = nla_nest_start(skb, IFLA_VF_VLAN_LIST);
  1015. if (!vfvlanlist)
  1016. goto nla_put_vf_failure;
  1017. if (nla_put(skb, IFLA_VF_VLAN_INFO, sizeof(vf_vlan_info),
  1018. &vf_vlan_info)) {
  1019. nla_nest_cancel(skb, vfvlanlist);
  1020. goto nla_put_vf_failure;
  1021. }
  1022. nla_nest_end(skb, vfvlanlist);
  1023. memset(&vf_stats, 0, sizeof(vf_stats));
  1024. if (dev->netdev_ops->ndo_get_vf_stats)
  1025. dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
  1026. &vf_stats);
  1027. vfstats = nla_nest_start(skb, IFLA_VF_STATS);
  1028. if (!vfstats)
  1029. goto nla_put_vf_failure;
  1030. if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
  1031. vf_stats.rx_packets, IFLA_VF_STATS_PAD) ||
  1032. nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
  1033. vf_stats.tx_packets, IFLA_VF_STATS_PAD) ||
  1034. nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES,
  1035. vf_stats.rx_bytes, IFLA_VF_STATS_PAD) ||
  1036. nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES,
  1037. vf_stats.tx_bytes, IFLA_VF_STATS_PAD) ||
  1038. nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
  1039. vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
  1040. nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
  1041. vf_stats.multicast, IFLA_VF_STATS_PAD)) {
  1042. nla_nest_cancel(skb, vfstats);
  1043. goto nla_put_vf_failure;
  1044. }
  1045. nla_nest_end(skb, vfstats);
  1046. nla_nest_end(skb, vf);
  1047. return 0;
  1048. nla_put_vf_failure:
  1049. nla_nest_cancel(skb, vf);
  1050. nla_put_vfinfo_failure:
  1051. nla_nest_cancel(skb, vfinfo);
  1052. return -EMSGSIZE;
  1053. }
  1054. static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
  1055. {
  1056. struct rtnl_link_ifmap map;
  1057. memset(&map, 0, sizeof(map));
  1058. map.mem_start = dev->mem_start;
  1059. map.mem_end = dev->mem_end;
  1060. map.base_addr = dev->base_addr;
  1061. map.irq = dev->irq;
  1062. map.dma = dev->dma;
  1063. map.port = dev->if_port;
  1064. if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD))
  1065. return -EMSGSIZE;
  1066. return 0;
  1067. }
  1068. static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
  1069. {
  1070. struct netdev_xdp xdp_op = {};
  1071. struct nlattr *xdp;
  1072. int err;
  1073. if (!dev->netdev_ops->ndo_xdp)
  1074. return 0;
  1075. xdp = nla_nest_start(skb, IFLA_XDP);
  1076. if (!xdp)
  1077. return -EMSGSIZE;
  1078. xdp_op.command = XDP_QUERY_PROG;
  1079. err = dev->netdev_ops->ndo_xdp(dev, &xdp_op);
  1080. if (err)
  1081. goto err_cancel;
  1082. err = nla_put_u8(skb, IFLA_XDP_ATTACHED, xdp_op.prog_attached);
  1083. if (err)
  1084. goto err_cancel;
  1085. nla_nest_end(skb, xdp);
  1086. return 0;
  1087. err_cancel:
  1088. nla_nest_cancel(skb, xdp);
  1089. return err;
  1090. }
  1091. static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
  1092. int type, u32 pid, u32 seq, u32 change,
  1093. unsigned int flags, u32 ext_filter_mask)
  1094. {
  1095. struct ifinfomsg *ifm;
  1096. struct nlmsghdr *nlh;
  1097. struct nlattr *af_spec;
  1098. struct rtnl_af_ops *af_ops;
  1099. struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
  1100. ASSERT_RTNL();
  1101. nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
  1102. if (nlh == NULL)
  1103. return -EMSGSIZE;
  1104. ifm = nlmsg_data(nlh);
  1105. ifm->ifi_family = AF_UNSPEC;
  1106. ifm->__ifi_pad = 0;
  1107. ifm->ifi_type = dev->type;
  1108. ifm->ifi_index = dev->ifindex;
  1109. ifm->ifi_flags = dev_get_flags(dev);
  1110. ifm->ifi_change = change;
  1111. if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
  1112. nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
  1113. nla_put_u8(skb, IFLA_OPERSTATE,
  1114. netif_running(dev) ? dev->operstate : IF_OPER_DOWN) ||
  1115. nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) ||
  1116. nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
  1117. nla_put_u32(skb, IFLA_GROUP, dev->group) ||
  1118. nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) ||
  1119. nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) ||
  1120. nla_put_u32(skb, IFLA_GSO_MAX_SEGS, dev->gso_max_segs) ||
  1121. nla_put_u32(skb, IFLA_GSO_MAX_SIZE, dev->gso_max_size) ||
  1122. #ifdef CONFIG_RPS
  1123. nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
  1124. #endif
  1125. (dev->ifindex != dev_get_iflink(dev) &&
  1126. nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))) ||
  1127. (upper_dev &&
  1128. nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex)) ||
  1129. nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
  1130. (dev->qdisc &&
  1131. nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) ||
  1132. (dev->ifalias &&
  1133. nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)) ||
  1134. nla_put_u32(skb, IFLA_CARRIER_CHANGES,
  1135. atomic_read(&dev->carrier_changes)) ||
  1136. nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down))
  1137. goto nla_put_failure;
  1138. if (rtnl_fill_link_ifmap(skb, dev))
  1139. goto nla_put_failure;
  1140. if (dev->addr_len) {
  1141. if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
  1142. nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
  1143. goto nla_put_failure;
  1144. }
  1145. if (rtnl_phys_port_id_fill(skb, dev))
  1146. goto nla_put_failure;
  1147. if (rtnl_phys_port_name_fill(skb, dev))
  1148. goto nla_put_failure;
  1149. if (rtnl_phys_switch_id_fill(skb, dev))
  1150. goto nla_put_failure;
  1151. if (rtnl_fill_stats(skb, dev))
  1152. goto nla_put_failure;
  1153. if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF) &&
  1154. nla_put_u32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)))
  1155. goto nla_put_failure;
  1156. if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent &&
  1157. ext_filter_mask & RTEXT_FILTER_VF) {
  1158. int i;
  1159. struct nlattr *vfinfo;
  1160. int num_vfs = dev_num_vf(dev->dev.parent);
  1161. vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST);
  1162. if (!vfinfo)
  1163. goto nla_put_failure;
  1164. for (i = 0; i < num_vfs; i++) {
  1165. if (rtnl_fill_vfinfo(skb, dev, i, vfinfo))
  1166. goto nla_put_failure;
  1167. }
  1168. nla_nest_end(skb, vfinfo);
  1169. }
  1170. if (rtnl_port_fill(skb, dev, ext_filter_mask))
  1171. goto nla_put_failure;
  1172. if (rtnl_xdp_fill(skb, dev))
  1173. goto nla_put_failure;
  1174. if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) {
  1175. if (rtnl_link_fill(skb, dev) < 0)
  1176. goto nla_put_failure;
  1177. }
  1178. if (dev->rtnl_link_ops &&
  1179. dev->rtnl_link_ops->get_link_net) {
  1180. struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
  1181. if (!net_eq(dev_net(dev), link_net)) {
  1182. int id = peernet2id_alloc(dev_net(dev), link_net);
  1183. if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
  1184. goto nla_put_failure;
  1185. }
  1186. }
  1187. if (!(af_spec = nla_nest_start(skb, IFLA_AF_SPEC)))
  1188. goto nla_put_failure;
  1189. list_for_each_entry(af_ops, &rtnl_af_ops, list) {
  1190. if (af_ops->fill_link_af) {
  1191. struct nlattr *af;
  1192. int err;
  1193. if (!(af = nla_nest_start(skb, af_ops->family)))
  1194. goto nla_put_failure;
  1195. err = af_ops->fill_link_af(skb, dev, ext_filter_mask);
  1196. /*
  1197. * Caller may return ENODATA to indicate that there
  1198. * was no data to be dumped. This is not an error, it
  1199. * means we should trim the attribute header and
  1200. * continue.
  1201. */
  1202. if (err == -ENODATA)
  1203. nla_nest_cancel(skb, af);
  1204. else if (err < 0)
  1205. goto nla_put_failure;
  1206. nla_nest_end(skb, af);
  1207. }
  1208. }
  1209. nla_nest_end(skb, af_spec);
  1210. nlmsg_end(skb, nlh);
  1211. return 0;
  1212. nla_put_failure:
  1213. nlmsg_cancel(skb, nlh);
  1214. return -EMSGSIZE;
  1215. }
  1216. static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
  1217. [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 },
  1218. [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
  1219. [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
  1220. [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) },
  1221. [IFLA_MTU] = { .type = NLA_U32 },
  1222. [IFLA_LINK] = { .type = NLA_U32 },
  1223. [IFLA_MASTER] = { .type = NLA_U32 },
  1224. [IFLA_CARRIER] = { .type = NLA_U8 },
  1225. [IFLA_TXQLEN] = { .type = NLA_U32 },
  1226. [IFLA_WEIGHT] = { .type = NLA_U32 },
  1227. [IFLA_OPERSTATE] = { .type = NLA_U8 },
  1228. [IFLA_LINKMODE] = { .type = NLA_U8 },
  1229. [IFLA_LINKINFO] = { .type = NLA_NESTED },
  1230. [IFLA_NET_NS_PID] = { .type = NLA_U32 },
  1231. [IFLA_NET_NS_FD] = { .type = NLA_U32 },
  1232. [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 },
  1233. [IFLA_VFINFO_LIST] = {. type = NLA_NESTED },
  1234. [IFLA_VF_PORTS] = { .type = NLA_NESTED },
  1235. [IFLA_PORT_SELF] = { .type = NLA_NESTED },
  1236. [IFLA_AF_SPEC] = { .type = NLA_NESTED },
  1237. [IFLA_EXT_MASK] = { .type = NLA_U32 },
  1238. [IFLA_PROMISCUITY] = { .type = NLA_U32 },
  1239. [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 },
  1240. [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 },
  1241. [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
  1242. [IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */
  1243. [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
  1244. [IFLA_LINK_NETNSID] = { .type = NLA_S32 },
  1245. [IFLA_PROTO_DOWN] = { .type = NLA_U8 },
  1246. [IFLA_XDP] = { .type = NLA_NESTED },
  1247. };
  1248. static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
  1249. [IFLA_INFO_KIND] = { .type = NLA_STRING },
  1250. [IFLA_INFO_DATA] = { .type = NLA_NESTED },
  1251. [IFLA_INFO_SLAVE_KIND] = { .type = NLA_STRING },
  1252. [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED },
  1253. };
  1254. static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
  1255. [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) },
  1256. [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) },
  1257. [IFLA_VF_VLAN_LIST] = { .type = NLA_NESTED },
  1258. [IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) },
  1259. [IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) },
  1260. [IFLA_VF_RATE] = { .len = sizeof(struct ifla_vf_rate) },
  1261. [IFLA_VF_LINK_STATE] = { .len = sizeof(struct ifla_vf_link_state) },
  1262. [IFLA_VF_RSS_QUERY_EN] = { .len = sizeof(struct ifla_vf_rss_query_en) },
  1263. [IFLA_VF_STATS] = { .type = NLA_NESTED },
  1264. [IFLA_VF_TRUST] = { .len = sizeof(struct ifla_vf_trust) },
  1265. [IFLA_VF_IB_NODE_GUID] = { .len = sizeof(struct ifla_vf_guid) },
  1266. [IFLA_VF_IB_PORT_GUID] = { .len = sizeof(struct ifla_vf_guid) },
  1267. };
  1268. static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
  1269. [IFLA_PORT_VF] = { .type = NLA_U32 },
  1270. [IFLA_PORT_PROFILE] = { .type = NLA_STRING,
  1271. .len = PORT_PROFILE_MAX },
  1272. [IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY,
  1273. .len = sizeof(struct ifla_port_vsi)},
  1274. [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY,
  1275. .len = PORT_UUID_MAX },
  1276. [IFLA_PORT_HOST_UUID] = { .type = NLA_STRING,
  1277. .len = PORT_UUID_MAX },
  1278. [IFLA_PORT_REQUEST] = { .type = NLA_U8, },
  1279. [IFLA_PORT_RESPONSE] = { .type = NLA_U16, },
  1280. };
  1281. static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = {
  1282. [IFLA_XDP_FD] = { .type = NLA_S32 },
  1283. [IFLA_XDP_ATTACHED] = { .type = NLA_U8 },
  1284. };
  1285. static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla)
  1286. {
  1287. const struct rtnl_link_ops *ops = NULL;
  1288. struct nlattr *linfo[IFLA_INFO_MAX + 1];
  1289. if (nla_parse_nested(linfo, IFLA_INFO_MAX, nla, ifla_info_policy) < 0)
  1290. return NULL;
  1291. if (linfo[IFLA_INFO_KIND]) {
  1292. char kind[MODULE_NAME_LEN];
  1293. nla_strlcpy(kind, linfo[IFLA_INFO_KIND], sizeof(kind));
  1294. ops = rtnl_link_ops_get(kind);
  1295. }
  1296. return ops;
  1297. }
  1298. static bool link_master_filtered(struct net_device *dev, int master_idx)
  1299. {
  1300. struct net_device *master;
  1301. if (!master_idx)
  1302. return false;
  1303. master = netdev_master_upper_dev_get(dev);
  1304. if (!master || master->ifindex != master_idx)
  1305. return true;
  1306. return false;
  1307. }
  1308. static bool link_kind_filtered(const struct net_device *dev,
  1309. const struct rtnl_link_ops *kind_ops)
  1310. {
  1311. if (kind_ops && dev->rtnl_link_ops != kind_ops)
  1312. return true;
  1313. return false;
  1314. }
  1315. static bool link_dump_filtered(struct net_device *dev,
  1316. int master_idx,
  1317. const struct rtnl_link_ops *kind_ops)
  1318. {
  1319. if (link_master_filtered(dev, master_idx) ||
  1320. link_kind_filtered(dev, kind_ops))
  1321. return true;
  1322. return false;
  1323. }
  1324. static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
  1325. {
  1326. struct net *net = sock_net(skb->sk);
  1327. int h, s_h;
  1328. int idx = 0, s_idx;
  1329. struct net_device *dev;
  1330. struct hlist_head *head;
  1331. struct nlattr *tb[IFLA_MAX+1];
  1332. u32 ext_filter_mask = 0;
  1333. const struct rtnl_link_ops *kind_ops = NULL;
  1334. unsigned int flags = NLM_F_MULTI;
  1335. int master_idx = 0;
  1336. int err;
  1337. int hdrlen;
  1338. s_h = cb->args[0];
  1339. s_idx = cb->args[1];
  1340. cb->seq = net->dev_base_seq;
  1341. /* A hack to preserve kernel<->userspace interface.
  1342. * The correct header is ifinfomsg. It is consistent with rtnl_getlink.
  1343. * However, before Linux v3.9 the code here assumed rtgenmsg and that's
  1344. * what iproute2 < v3.9.0 used.
  1345. * We can detect the old iproute2. Even including the IFLA_EXT_MASK
  1346. * attribute, its netlink message is shorter than struct ifinfomsg.
  1347. */
  1348. hdrlen = nlmsg_len(cb->nlh) < sizeof(struct ifinfomsg) ?
  1349. sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
  1350. if (nlmsg_parse(cb->nlh, hdrlen, tb, IFLA_MAX, ifla_policy) >= 0) {
  1351. if (tb[IFLA_EXT_MASK])
  1352. ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
  1353. if (tb[IFLA_MASTER])
  1354. master_idx = nla_get_u32(tb[IFLA_MASTER]);
  1355. if (tb[IFLA_LINKINFO])
  1356. kind_ops = linkinfo_to_kind_ops(tb[IFLA_LINKINFO]);
  1357. if (master_idx || kind_ops)
  1358. flags |= NLM_F_DUMP_FILTERED;
  1359. }
  1360. for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
  1361. idx = 0;
  1362. head = &net->dev_index_head[h];
  1363. hlist_for_each_entry(dev, head, index_hlist) {
  1364. if (link_dump_filtered(dev, master_idx, kind_ops))
  1365. continue;
  1366. if (idx < s_idx)
  1367. goto cont;
  1368. err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
  1369. NETLINK_CB(cb->skb).portid,
  1370. cb->nlh->nlmsg_seq, 0,
  1371. flags,
  1372. ext_filter_mask);
  1373. /* If we ran out of room on the first message,
  1374. * we're in trouble
  1375. */
  1376. WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
  1377. if (err < 0)
  1378. goto out;
  1379. nl_dump_check_consistent(cb, nlmsg_hdr(skb));
  1380. cont:
  1381. idx++;
  1382. }
  1383. }
  1384. out:
  1385. cb->args[1] = idx;
  1386. cb->args[0] = h;
  1387. return skb->len;
  1388. }
  1389. int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len)
  1390. {
  1391. return nla_parse(tb, IFLA_MAX, head, len, ifla_policy);
  1392. }
  1393. EXPORT_SYMBOL(rtnl_nla_parse_ifla);
  1394. struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
  1395. {
  1396. struct net *net;
  1397. /* Examine the link attributes and figure out which
  1398. * network namespace we are talking about.
  1399. */
  1400. if (tb[IFLA_NET_NS_PID])
  1401. net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID]));
  1402. else if (tb[IFLA_NET_NS_FD])
  1403. net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD]));
  1404. else
  1405. net = get_net(src_net);
  1406. return net;
  1407. }
  1408. EXPORT_SYMBOL(rtnl_link_get_net);
  1409. static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
  1410. {
  1411. if (dev) {
  1412. if (tb[IFLA_ADDRESS] &&
  1413. nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
  1414. return -EINVAL;
  1415. if (tb[IFLA_BROADCAST] &&
  1416. nla_len(tb[IFLA_BROADCAST]) < dev->addr_len)
  1417. return -EINVAL;
  1418. }
  1419. if (tb[IFLA_AF_SPEC]) {
  1420. struct nlattr *af;
  1421. int rem, err;
  1422. nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
  1423. const struct rtnl_af_ops *af_ops;
  1424. if (!(af_ops = rtnl_af_lookup(nla_type(af))))
  1425. return -EAFNOSUPPORT;
  1426. if (!af_ops->set_link_af)
  1427. return -EOPNOTSUPP;
  1428. if (af_ops->validate_link_af) {
  1429. err = af_ops->validate_link_af(dev, af);
  1430. if (err < 0)
  1431. return err;
  1432. }
  1433. }
  1434. }
  1435. return 0;
  1436. }
  1437. static int handle_infiniband_guid(struct net_device *dev, struct ifla_vf_guid *ivt,
  1438. int guid_type)
  1439. {
  1440. const struct net_device_ops *ops = dev->netdev_ops;
  1441. return ops->ndo_set_vf_guid(dev, ivt->vf, ivt->guid, guid_type);
  1442. }
  1443. static int handle_vf_guid(struct net_device *dev, struct ifla_vf_guid *ivt, int guid_type)
  1444. {
  1445. if (dev->type != ARPHRD_INFINIBAND)
  1446. return -EOPNOTSUPP;
  1447. return handle_infiniband_guid(dev, ivt, guid_type);
  1448. }
  1449. static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
  1450. {
  1451. const struct net_device_ops *ops = dev->netdev_ops;
  1452. int err = -EINVAL;
  1453. if (tb[IFLA_VF_MAC]) {
  1454. struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
  1455. err = -EOPNOTSUPP;
  1456. if (ops->ndo_set_vf_mac)
  1457. err = ops->ndo_set_vf_mac(dev, ivm->vf,
  1458. ivm->mac);
  1459. if (err < 0)
  1460. return err;
  1461. }
  1462. if (tb[IFLA_VF_VLAN]) {
  1463. struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);
  1464. err = -EOPNOTSUPP;
  1465. if (ops->ndo_set_vf_vlan)
  1466. err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
  1467. ivv->qos,
  1468. htons(ETH_P_8021Q));
  1469. if (err < 0)
  1470. return err;
  1471. }
  1472. if (tb[IFLA_VF_VLAN_LIST]) {
  1473. struct ifla_vf_vlan_info *ivvl[MAX_VLAN_LIST_LEN];
  1474. struct nlattr *attr;
  1475. int rem, len = 0;
  1476. err = -EOPNOTSUPP;
  1477. if (!ops->ndo_set_vf_vlan)
  1478. return err;
  1479. nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) {
  1480. if (nla_type(attr) != IFLA_VF_VLAN_INFO ||
  1481. nla_len(attr) < NLA_HDRLEN) {
  1482. return -EINVAL;
  1483. }
  1484. if (len >= MAX_VLAN_LIST_LEN)
  1485. return -EOPNOTSUPP;
  1486. ivvl[len] = nla_data(attr);
  1487. len++;
  1488. }
  1489. if (len == 0)
  1490. return -EINVAL;
  1491. err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan,
  1492. ivvl[0]->qos, ivvl[0]->vlan_proto);
  1493. if (err < 0)
  1494. return err;
  1495. }
  1496. if (tb[IFLA_VF_TX_RATE]) {
  1497. struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
  1498. struct ifla_vf_info ivf;
  1499. err = -EOPNOTSUPP;
  1500. if (ops->ndo_get_vf_config)
  1501. err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf);
  1502. if (err < 0)
  1503. return err;
  1504. err = -EOPNOTSUPP;
  1505. if (ops->ndo_set_vf_rate)
  1506. err = ops->ndo_set_vf_rate(dev, ivt->vf,
  1507. ivf.min_tx_rate,
  1508. ivt->rate);
  1509. if (err < 0)
  1510. return err;
  1511. }
  1512. if (tb[IFLA_VF_RATE]) {
  1513. struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]);
  1514. err = -EOPNOTSUPP;
  1515. if (ops->ndo_set_vf_rate)
  1516. err = ops->ndo_set_vf_rate(dev, ivt->vf,
  1517. ivt->min_tx_rate,
  1518. ivt->max_tx_rate);
  1519. if (err < 0)
  1520. return err;
  1521. }
  1522. if (tb[IFLA_VF_SPOOFCHK]) {
  1523. struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);
  1524. err = -EOPNOTSUPP;
  1525. if (ops->ndo_set_vf_spoofchk)
  1526. err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
  1527. ivs->setting);
  1528. if (err < 0)
  1529. return err;
  1530. }
  1531. if (tb[IFLA_VF_LINK_STATE]) {
  1532. struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]);
  1533. err = -EOPNOTSUPP;
  1534. if (ops->ndo_set_vf_link_state)
  1535. err = ops->ndo_set_vf_link_state(dev, ivl->vf,
  1536. ivl->link_state);
  1537. if (err < 0)
  1538. return err;
  1539. }
  1540. if (tb[IFLA_VF_RSS_QUERY_EN]) {
  1541. struct ifla_vf_rss_query_en *ivrssq_en;
  1542. err = -EOPNOTSUPP;
  1543. ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]);
  1544. if (ops->ndo_set_vf_rss_query_en)
  1545. err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf,
  1546. ivrssq_en->setting);
  1547. if (err < 0)
  1548. return err;
  1549. }
  1550. if (tb[IFLA_VF_TRUST]) {
  1551. struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]);
  1552. err = -EOPNOTSUPP;
  1553. if (ops->ndo_set_vf_trust)
  1554. err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting);
  1555. if (err < 0)
  1556. return err;
  1557. }
  1558. if (tb[IFLA_VF_IB_NODE_GUID]) {
  1559. struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]);
  1560. if (!ops->ndo_set_vf_guid)
  1561. return -EOPNOTSUPP;
  1562. return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID);
  1563. }
  1564. if (tb[IFLA_VF_IB_PORT_GUID]) {
  1565. struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]);
  1566. if (!ops->ndo_set_vf_guid)
  1567. return -EOPNOTSUPP;
  1568. return handle_vf_guid(dev, ivt, IFLA_VF_IB_PORT_GUID);
  1569. }
  1570. return err;
  1571. }
  1572. static int do_set_master(struct net_device *dev, int ifindex)
  1573. {
  1574. struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
  1575. const struct net_device_ops *ops;
  1576. int err;
  1577. if (upper_dev) {
  1578. if (upper_dev->ifindex == ifindex)
  1579. return 0;
  1580. ops = upper_dev->netdev_ops;
  1581. if (ops->ndo_del_slave) {
  1582. err = ops->ndo_del_slave(upper_dev, dev);
  1583. if (err)
  1584. return err;
  1585. } else {
  1586. return -EOPNOTSUPP;
  1587. }
  1588. }
  1589. if (ifindex) {
  1590. upper_dev = __dev_get_by_index(dev_net(dev), ifindex);
  1591. if (!upper_dev)
  1592. return -EINVAL;
  1593. ops = upper_dev->netdev_ops;
  1594. if (ops->ndo_add_slave) {
  1595. err = ops->ndo_add_slave(upper_dev, dev);
  1596. if (err)
  1597. return err;
  1598. } else {
  1599. return -EOPNOTSUPP;
  1600. }
  1601. }
  1602. return 0;
  1603. }
  1604. #define DO_SETLINK_MODIFIED 0x01
  1605. /* notify flag means notify + modified. */
  1606. #define DO_SETLINK_NOTIFY 0x03
  1607. static int do_setlink(const struct sk_buff *skb,
  1608. struct net_device *dev, struct ifinfomsg *ifm,
  1609. struct nlattr **tb, char *ifname, int status)
  1610. {
  1611. const struct net_device_ops *ops = dev->netdev_ops;
  1612. int err;
  1613. if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]) {
  1614. struct net *net = rtnl_link_get_net(dev_net(dev), tb);
  1615. if (IS_ERR(net)) {
  1616. err = PTR_ERR(net);
  1617. goto errout;
  1618. }
  1619. if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
  1620. put_net(net);
  1621. err = -EPERM;
  1622. goto errout;
  1623. }
  1624. err = dev_change_net_namespace(dev, net, ifname);
  1625. put_net(net);
  1626. if (err)
  1627. goto errout;
  1628. status |= DO_SETLINK_MODIFIED;
  1629. }
  1630. if (tb[IFLA_MAP]) {
  1631. struct rtnl_link_ifmap *u_map;
  1632. struct ifmap k_map;
  1633. if (!ops->ndo_set_config) {
  1634. err = -EOPNOTSUPP;
  1635. goto errout;
  1636. }
  1637. if (!netif_device_present(dev)) {
  1638. err = -ENODEV;
  1639. goto errout;
  1640. }
  1641. u_map = nla_data(tb[IFLA_MAP]);
  1642. k_map.mem_start = (unsigned long) u_map->mem_start;
  1643. k_map.mem_end = (unsigned long) u_map->mem_end;
  1644. k_map.base_addr = (unsigned short) u_map->base_addr;
  1645. k_map.irq = (unsigned char) u_map->irq;
  1646. k_map.dma = (unsigned char) u_map->dma;
  1647. k_map.port = (unsigned char) u_map->port;
  1648. err = ops->ndo_set_config(dev, &k_map);
  1649. if (err < 0)
  1650. goto errout;
  1651. status |= DO_SETLINK_NOTIFY;
  1652. }
  1653. if (tb[IFLA_ADDRESS]) {
  1654. struct sockaddr *sa;
  1655. int len;
  1656. len = sizeof(sa_family_t) + dev->addr_len;
  1657. sa = kmalloc(len, GFP_KERNEL);
  1658. if (!sa) {
  1659. err = -ENOMEM;
  1660. goto errout;
  1661. }
  1662. sa->sa_family = dev->type;
  1663. memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]),
  1664. dev->addr_len);
  1665. err = dev_set_mac_address(dev, sa);
  1666. kfree(sa);
  1667. if (err)
  1668. goto errout;
  1669. status |= DO_SETLINK_MODIFIED;
  1670. }
  1671. if (tb[IFLA_MTU]) {
  1672. err = dev_set_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
  1673. if (err < 0)
  1674. goto errout;
  1675. status |= DO_SETLINK_MODIFIED;
  1676. }
  1677. if (tb[IFLA_GROUP]) {
  1678. dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
  1679. status |= DO_SETLINK_NOTIFY;
  1680. }
  1681. /*
  1682. * Interface selected by interface index but interface
  1683. * name provided implies that a name change has been
  1684. * requested.
  1685. */
  1686. if (ifm->ifi_index > 0 && ifname[0]) {
  1687. err = dev_change_name(dev, ifname);
  1688. if (err < 0)
  1689. goto errout;
  1690. status |= DO_SETLINK_MODIFIED;
  1691. }
  1692. if (tb[IFLA_IFALIAS]) {
  1693. err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]),
  1694. nla_len(tb[IFLA_IFALIAS]));
  1695. if (err < 0)
  1696. goto errout;
  1697. status |= DO_SETLINK_NOTIFY;
  1698. }
  1699. if (tb[IFLA_BROADCAST]) {
  1700. nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
  1701. call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
  1702. }
  1703. if (ifm->ifi_flags || ifm->ifi_change) {
  1704. err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm));
  1705. if (err < 0)
  1706. goto errout;
  1707. }
  1708. if (tb[IFLA_MASTER]) {
  1709. err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]));
  1710. if (err)
  1711. goto errout;
  1712. status |= DO_SETLINK_MODIFIED;
  1713. }
  1714. if (tb[IFLA_CARRIER]) {
  1715. err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER]));
  1716. if (err)
  1717. goto errout;
  1718. status |= DO_SETLINK_MODIFIED;
  1719. }
  1720. if (tb[IFLA_TXQLEN]) {
  1721. unsigned long value = nla_get_u32(tb[IFLA_TXQLEN]);
  1722. unsigned long orig_len = dev->tx_queue_len;
  1723. if (dev->tx_queue_len ^ value) {
  1724. dev->tx_queue_len = value;
  1725. err = call_netdevice_notifiers(
  1726. NETDEV_CHANGE_TX_QUEUE_LEN, dev);
  1727. err = notifier_to_errno(err);
  1728. if (err) {
  1729. dev->tx_queue_len = orig_len;
  1730. goto errout;
  1731. }
  1732. status |= DO_SETLINK_NOTIFY;
  1733. }
  1734. }
  1735. if (tb[IFLA_OPERSTATE])
  1736. set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
  1737. if (tb[IFLA_LINKMODE]) {
  1738. unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]);
  1739. write_lock_bh(&dev_base_lock);
  1740. if (dev->link_mode ^ value)
  1741. status |= DO_SETLINK_NOTIFY;
  1742. dev->link_mode = value;
  1743. write_unlock_bh(&dev_base_lock);
  1744. }
  1745. if (tb[IFLA_VFINFO_LIST]) {
  1746. struct nlattr *vfinfo[IFLA_VF_MAX + 1];
  1747. struct nlattr *attr;
  1748. int rem;
  1749. nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
  1750. if (nla_type(attr) != IFLA_VF_INFO ||
  1751. nla_len(attr) < NLA_HDRLEN) {
  1752. err = -EINVAL;
  1753. goto errout;
  1754. }
  1755. err = nla_parse_nested(vfinfo, IFLA_VF_MAX, attr,
  1756. ifla_vf_policy);
  1757. if (err < 0)
  1758. goto errout;
  1759. err = do_setvfinfo(dev, vfinfo);
  1760. if (err < 0)
  1761. goto errout;
  1762. status |= DO_SETLINK_NOTIFY;
  1763. }
  1764. }
  1765. err = 0;
  1766. if (tb[IFLA_VF_PORTS]) {
  1767. struct nlattr *port[IFLA_PORT_MAX+1];
  1768. struct nlattr *attr;
  1769. int vf;
  1770. int rem;
  1771. err = -EOPNOTSUPP;
  1772. if (!ops->ndo_set_vf_port)
  1773. goto errout;
  1774. nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
  1775. if (nla_type(attr) != IFLA_VF_PORT ||
  1776. nla_len(attr) < NLA_HDRLEN) {
  1777. err = -EINVAL;
  1778. goto errout;
  1779. }
  1780. err = nla_parse_nested(port, IFLA_PORT_MAX, attr,
  1781. ifla_port_policy);
  1782. if (err < 0)
  1783. goto errout;
  1784. if (!port[IFLA_PORT_VF]) {
  1785. err = -EOPNOTSUPP;
  1786. goto errout;
  1787. }
  1788. vf = nla_get_u32(port[IFLA_PORT_VF]);
  1789. err = ops->ndo_set_vf_port(dev, vf, port);
  1790. if (err < 0)
  1791. goto errout;
  1792. status |= DO_SETLINK_NOTIFY;
  1793. }
  1794. }
  1795. err = 0;
  1796. if (tb[IFLA_PORT_SELF]) {
  1797. struct nlattr *port[IFLA_PORT_MAX+1];
  1798. err = nla_parse_nested(port, IFLA_PORT_MAX,
  1799. tb[IFLA_PORT_SELF], ifla_port_policy);
  1800. if (err < 0)
  1801. goto errout;
  1802. err = -EOPNOTSUPP;
  1803. if (ops->ndo_set_vf_port)
  1804. err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port);
  1805. if (err < 0)
  1806. goto errout;
  1807. status |= DO_SETLINK_NOTIFY;
  1808. }
  1809. if (tb[IFLA_AF_SPEC]) {
  1810. struct nlattr *af;
  1811. int rem;
  1812. nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
  1813. const struct rtnl_af_ops *af_ops;
  1814. if (!(af_ops = rtnl_af_lookup(nla_type(af))))
  1815. BUG();
  1816. err = af_ops->set_link_af(dev, af);
  1817. if (err < 0)
  1818. goto errout;
  1819. status |= DO_SETLINK_NOTIFY;
  1820. }
  1821. }
  1822. err = 0;
  1823. if (tb[IFLA_PROTO_DOWN]) {
  1824. err = dev_change_proto_down(dev,
  1825. nla_get_u8(tb[IFLA_PROTO_DOWN]));
  1826. if (err)
  1827. goto errout;
  1828. status |= DO_SETLINK_NOTIFY;
  1829. }
  1830. if (tb[IFLA_XDP]) {
  1831. struct nlattr *xdp[IFLA_XDP_MAX + 1];
  1832. err = nla_parse_nested(xdp, IFLA_XDP_MAX, tb[IFLA_XDP],
  1833. ifla_xdp_policy);
  1834. if (err < 0)
  1835. goto errout;
  1836. if (xdp[IFLA_XDP_ATTACHED]) {
  1837. err = -EINVAL;
  1838. goto errout;
  1839. }
  1840. if (xdp[IFLA_XDP_FD]) {
  1841. err = dev_change_xdp_fd(dev,
  1842. nla_get_s32(xdp[IFLA_XDP_FD]));
  1843. if (err)
  1844. goto errout;
  1845. status |= DO_SETLINK_NOTIFY;
  1846. }
  1847. }
  1848. errout:
  1849. if (status & DO_SETLINK_MODIFIED) {
  1850. if (status & DO_SETLINK_NOTIFY)
  1851. netdev_state_change(dev);
  1852. if (err < 0)
  1853. net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n",
  1854. dev->name);
  1855. }
  1856. return err;
  1857. }
  1858. static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
  1859. {
  1860. struct net *net = sock_net(skb->sk);
  1861. struct ifinfomsg *ifm;
  1862. struct net_device *dev;
  1863. int err;
  1864. struct nlattr *tb[IFLA_MAX+1];
  1865. char ifname[IFNAMSIZ];
  1866. err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
  1867. if (err < 0)
  1868. goto errout;
  1869. if (tb[IFLA_IFNAME])
  1870. nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
  1871. else
  1872. ifname[0] = '\0';
  1873. err = -EINVAL;
  1874. ifm = nlmsg_data(nlh);
  1875. if (ifm->ifi_index > 0)
  1876. dev = __dev_get_by_index(net, ifm->ifi_index);
  1877. else if (tb[IFLA_IFNAME])
  1878. dev = __dev_get_by_name(net, ifname);
  1879. else
  1880. goto errout;
  1881. if (dev == NULL) {
  1882. err = -ENODEV;
  1883. goto errout;
  1884. }
  1885. err = validate_linkmsg(dev, tb);
  1886. if (err < 0)
  1887. goto errout;
  1888. err = do_setlink(skb, dev, ifm, tb, ifname, 0);
  1889. errout:
  1890. return err;
  1891. }
  1892. static int rtnl_group_dellink(const struct net *net, int group)
  1893. {
  1894. struct net_device *dev, *aux;
  1895. LIST_HEAD(list_kill);
  1896. bool found = false;
  1897. if (!group)
  1898. return -EPERM;
  1899. for_each_netdev(net, dev) {
  1900. if (dev->group == group) {
  1901. const struct rtnl_link_ops *ops;
  1902. found = true;
  1903. ops = dev->rtnl_link_ops;
  1904. if (!ops || !ops->dellink)
  1905. return -EOPNOTSUPP;
  1906. }
  1907. }
  1908. if (!found)
  1909. return -ENODEV;
  1910. for_each_netdev_safe(net, dev, aux) {
  1911. if (dev->group == group) {
  1912. const struct rtnl_link_ops *ops;
  1913. ops = dev->rtnl_link_ops;
  1914. ops->dellink(dev, &list_kill);
  1915. }
  1916. }
  1917. unregister_netdevice_many(&list_kill);
  1918. return 0;
  1919. }
  1920. int rtnl_delete_link(struct net_device *dev)
  1921. {
  1922. const struct rtnl_link_ops *ops;
  1923. LIST_HEAD(list_kill);
  1924. ops = dev->rtnl_link_ops;
  1925. if (!ops || !ops->dellink)
  1926. return -EOPNOTSUPP;
  1927. ops->dellink(dev, &list_kill);
  1928. unregister_netdevice_many(&list_kill);
  1929. return 0;
  1930. }
  1931. EXPORT_SYMBOL_GPL(rtnl_delete_link);
  1932. static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
  1933. {
  1934. struct net *net = sock_net(skb->sk);
  1935. struct net_device *dev;
  1936. struct ifinfomsg *ifm;
  1937. char ifname[IFNAMSIZ];
  1938. struct nlattr *tb[IFLA_MAX+1];
  1939. int err;
  1940. err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
  1941. if (err < 0)
  1942. return err;
  1943. if (tb[IFLA_IFNAME])
  1944. nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
  1945. ifm = nlmsg_data(nlh);
  1946. if (ifm->ifi_index > 0)
  1947. dev = __dev_get_by_index(net, ifm->ifi_index);
  1948. else if (tb[IFLA_IFNAME])
  1949. dev = __dev_get_by_name(net, ifname);
  1950. else if (tb[IFLA_GROUP])
  1951. return rtnl_group_dellink(net, nla_get_u32(tb[IFLA_GROUP]));
  1952. else
  1953. return -EINVAL;
  1954. if (!dev)
  1955. return -ENODEV;
  1956. return rtnl_delete_link(dev);
  1957. }
  1958. int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
  1959. {
  1960. unsigned int old_flags;
  1961. int err;
  1962. old_flags = dev->flags;
  1963. if (ifm && (ifm->ifi_flags || ifm->ifi_change)) {
  1964. err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm));
  1965. if (err < 0)
  1966. return err;
  1967. }
  1968. dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
  1969. __dev_notify_flags(dev, old_flags, ~0U);
  1970. return 0;
  1971. }
  1972. EXPORT_SYMBOL(rtnl_configure_link);
  1973. struct net_device *rtnl_create_link(struct net *net,
  1974. const char *ifname, unsigned char name_assign_type,
  1975. const struct rtnl_link_ops *ops, struct nlattr *tb[])
  1976. {
  1977. int err;
  1978. struct net_device *dev;
  1979. unsigned int num_tx_queues = 1;
  1980. unsigned int num_rx_queues = 1;
  1981. if (tb[IFLA_NUM_TX_QUEUES])
  1982. num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]);
  1983. else if (ops->get_num_tx_queues)
  1984. num_tx_queues = ops->get_num_tx_queues();
  1985. if (tb[IFLA_NUM_RX_QUEUES])
  1986. num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]);
  1987. else if (ops->get_num_rx_queues)
  1988. num_rx_queues = ops->get_num_rx_queues();
  1989. err = -ENOMEM;
  1990. dev = alloc_netdev_mqs(ops->priv_size, ifname, name_assign_type,
  1991. ops->setup, num_tx_queues, num_rx_queues);
  1992. if (!dev)
  1993. goto err;
  1994. dev_net_set(dev, net);
  1995. dev->rtnl_link_ops = ops;
  1996. dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
  1997. if (tb[IFLA_MTU])
  1998. dev->mtu = nla_get_u32(tb[IFLA_MTU]);
  1999. if (tb[IFLA_ADDRESS]) {
  2000. memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]),
  2001. nla_len(tb[IFLA_ADDRESS]));
  2002. dev->addr_assign_type = NET_ADDR_SET;
  2003. }
  2004. if (tb[IFLA_BROADCAST])
  2005. memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]),
  2006. nla_len(tb[IFLA_BROADCAST]));
  2007. if (tb[IFLA_TXQLEN])
  2008. dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
  2009. if (tb[IFLA_OPERSTATE])
  2010. set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
  2011. if (tb[IFLA_LINKMODE])
  2012. dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
  2013. if (tb[IFLA_GROUP])
  2014. dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
  2015. return dev;
  2016. err:
  2017. return ERR_PTR(err);
  2018. }
  2019. EXPORT_SYMBOL(rtnl_create_link);
  2020. static int rtnl_group_changelink(const struct sk_buff *skb,
  2021. struct net *net, int group,
  2022. struct ifinfomsg *ifm,
  2023. struct nlattr **tb)
  2024. {
  2025. struct net_device *dev, *aux;
  2026. int err;
  2027. for_each_netdev_safe(net, dev, aux) {
  2028. if (dev->group == group) {
  2029. err = do_setlink(skb, dev, ifm, tb, NULL, 0);
  2030. if (err < 0)
  2031. return err;
  2032. }
  2033. }
  2034. return 0;
  2035. }
  2036. static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh)
  2037. {
  2038. struct net *net = sock_net(skb->sk);
  2039. const struct rtnl_link_ops *ops;
  2040. const struct rtnl_link_ops *m_ops = NULL;
  2041. struct net_device *dev;
  2042. struct net_device *master_dev = NULL;
  2043. struct ifinfomsg *ifm;
  2044. char kind[MODULE_NAME_LEN];
  2045. char ifname[IFNAMSIZ];
  2046. struct nlattr *tb[IFLA_MAX+1];
  2047. struct nlattr *linkinfo[IFLA_INFO_MAX+1];
  2048. unsigned char name_assign_type = NET_NAME_USER;
  2049. int err;
  2050. #ifdef CONFIG_MODULES
  2051. replay:
  2052. #endif
  2053. err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
  2054. if (err < 0)
  2055. return err;
  2056. if (tb[IFLA_IFNAME])
  2057. nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
  2058. else
  2059. ifname[0] = '\0';
  2060. ifm = nlmsg_data(nlh);
  2061. if (ifm->ifi_index > 0)
  2062. dev = __dev_get_by_index(net, ifm->ifi_index);
  2063. else {
  2064. if (ifname[0])
  2065. dev = __dev_get_by_name(net, ifname);
  2066. else
  2067. dev = NULL;
  2068. }
  2069. if (dev) {
  2070. master_dev = netdev_master_upper_dev_get(dev);
  2071. if (master_dev)
  2072. m_ops = master_dev->rtnl_link_ops;
  2073. }
  2074. err = validate_linkmsg(dev, tb);
  2075. if (err < 0)
  2076. return err;
  2077. if (tb[IFLA_LINKINFO]) {
  2078. err = nla_parse_nested(linkinfo, IFLA_INFO_MAX,
  2079. tb[IFLA_LINKINFO], ifla_info_policy);
  2080. if (err < 0)
  2081. return err;
  2082. } else
  2083. memset(linkinfo, 0, sizeof(linkinfo));
  2084. if (linkinfo[IFLA_INFO_KIND]) {
  2085. nla_strlcpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind));
  2086. ops = rtnl_link_ops_get(kind);
  2087. } else {
  2088. kind[0] = '\0';
  2089. ops = NULL;
  2090. }
  2091. if (1) {
  2092. struct nlattr *attr[ops ? ops->maxtype + 1 : 1];
  2093. struct nlattr *slave_attr[m_ops ? m_ops->slave_maxtype + 1 : 1];
  2094. struct nlattr **data = NULL;
  2095. struct nlattr **slave_data = NULL;
  2096. struct net *dest_net, *link_net = NULL;
  2097. if (ops) {
  2098. if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
  2099. err = nla_parse_nested(attr, ops->maxtype,
  2100. linkinfo[IFLA_INFO_DATA],
  2101. ops->policy);
  2102. if (err < 0)
  2103. return err;
  2104. data = attr;
  2105. }
  2106. if (ops->validate) {
  2107. err = ops->validate(tb, data);
  2108. if (err < 0)
  2109. return err;
  2110. }
  2111. }
  2112. if (m_ops) {
  2113. if (m_ops->slave_maxtype &&
  2114. linkinfo[IFLA_INFO_SLAVE_DATA]) {
  2115. err = nla_parse_nested(slave_attr,
  2116. m_ops->slave_maxtype,
  2117. linkinfo[IFLA_INFO_SLAVE_DATA],
  2118. m_ops->slave_policy);
  2119. if (err < 0)
  2120. return err;
  2121. slave_data = slave_attr;
  2122. }
  2123. if (m_ops->slave_validate) {
  2124. err = m_ops->slave_validate(tb, slave_data);
  2125. if (err < 0)
  2126. return err;
  2127. }
  2128. }
  2129. if (dev) {
  2130. int status = 0;
  2131. if (nlh->nlmsg_flags & NLM_F_EXCL)
  2132. return -EEXIST;
  2133. if (nlh->nlmsg_flags & NLM_F_REPLACE)
  2134. return -EOPNOTSUPP;
  2135. if (linkinfo[IFLA_INFO_DATA]) {
  2136. if (!ops || ops != dev->rtnl_link_ops ||
  2137. !ops->changelink)
  2138. return -EOPNOTSUPP;
  2139. err = ops->changelink(dev, tb, data);
  2140. if (err < 0)
  2141. return err;
  2142. status |= DO_SETLINK_NOTIFY;
  2143. }
  2144. if (linkinfo[IFLA_INFO_SLAVE_DATA]) {
  2145. if (!m_ops || !m_ops->slave_changelink)
  2146. return -EOPNOTSUPP;
  2147. err = m_ops->slave_changelink(master_dev, dev,
  2148. tb, slave_data);
  2149. if (err < 0)
  2150. return err;
  2151. status |= DO_SETLINK_NOTIFY;
  2152. }
  2153. return do_setlink(skb, dev, ifm, tb, ifname, status);
  2154. }
  2155. if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
  2156. if (ifm->ifi_index == 0 && tb[IFLA_GROUP])
  2157. return rtnl_group_changelink(skb, net,
  2158. nla_get_u32(tb[IFLA_GROUP]),
  2159. ifm, tb);
  2160. return -ENODEV;
  2161. }
  2162. if (tb[IFLA_MAP] || tb[IFLA_MASTER] || tb[IFLA_PROTINFO])
  2163. return -EOPNOTSUPP;
  2164. if (!ops) {
  2165. #ifdef CONFIG_MODULES
  2166. if (kind[0]) {
  2167. __rtnl_unlock();
  2168. request_module("rtnl-link-%s", kind);
  2169. rtnl_lock();
  2170. ops = rtnl_link_ops_get(kind);
  2171. if (ops)
  2172. goto replay;
  2173. }
  2174. #endif
  2175. return -EOPNOTSUPP;
  2176. }
  2177. if (!ops->setup)
  2178. return -EOPNOTSUPP;
  2179. if (!ifname[0]) {
  2180. snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
  2181. name_assign_type = NET_NAME_ENUM;
  2182. }
  2183. dest_net = rtnl_link_get_net(net, tb);
  2184. if (IS_ERR(dest_net))
  2185. return PTR_ERR(dest_net);
  2186. err = -EPERM;
  2187. if (!netlink_ns_capable(skb, dest_net->user_ns, CAP_NET_ADMIN))
  2188. goto out;
  2189. if (tb[IFLA_LINK_NETNSID]) {
  2190. int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
  2191. link_net = get_net_ns_by_id(dest_net, id);
  2192. if (!link_net) {
  2193. err = -EINVAL;
  2194. goto out;
  2195. }
  2196. err = -EPERM;
  2197. if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN))
  2198. goto out;
  2199. }
  2200. dev = rtnl_create_link(link_net ? : dest_net, ifname,
  2201. name_assign_type, ops, tb);
  2202. if (IS_ERR(dev)) {
  2203. err = PTR_ERR(dev);
  2204. goto out;
  2205. }
  2206. dev->ifindex = ifm->ifi_index;
  2207. if (ops->newlink) {
  2208. err = ops->newlink(link_net ? : net, dev, tb, data);
  2209. /* Drivers should call free_netdev() in ->destructor
  2210. * and unregister it on failure after registration
  2211. * so that device could be finally freed in rtnl_unlock.
  2212. */
  2213. if (err < 0) {
  2214. /* If device is not registered at all, free it now */
  2215. if (dev->reg_state == NETREG_UNINITIALIZED)
  2216. free_netdev(dev);
  2217. goto out;
  2218. }
  2219. } else {
  2220. err = register_netdevice(dev);
  2221. if (err < 0) {
  2222. free_netdev(dev);
  2223. goto out;
  2224. }
  2225. }
  2226. err = rtnl_configure_link(dev, ifm);
  2227. if (err < 0)
  2228. goto out_unregister;
  2229. if (link_net) {
  2230. err = dev_change_net_namespace(dev, dest_net, ifname);
  2231. if (err < 0)
  2232. goto out_unregister;
  2233. }
  2234. out:
  2235. if (link_net)
  2236. put_net(link_net);
  2237. put_net(dest_net);
  2238. return err;
  2239. out_unregister:
  2240. if (ops->newlink) {
  2241. LIST_HEAD(list_kill);
  2242. ops->dellink(dev, &list_kill);
  2243. unregister_netdevice_many(&list_kill);
  2244. } else {
  2245. unregister_netdevice(dev);
  2246. }
  2247. goto out;
  2248. }
  2249. }
  2250. static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh)
  2251. {
  2252. struct net *net = sock_net(skb->sk);
  2253. struct ifinfomsg *ifm;
  2254. char ifname[IFNAMSIZ];
  2255. struct nlattr *tb[IFLA_MAX+1];
  2256. struct net_device *dev = NULL;
  2257. struct sk_buff *nskb;
  2258. int err;
  2259. u32 ext_filter_mask = 0;
  2260. err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
  2261. if (err < 0)
  2262. return err;
  2263. if (tb[IFLA_IFNAME])
  2264. nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
  2265. if (tb[IFLA_EXT_MASK])
  2266. ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
  2267. ifm = nlmsg_data(nlh);
  2268. if (ifm->ifi_index > 0)
  2269. dev = __dev_get_by_index(net, ifm->ifi_index);
  2270. else if (tb[IFLA_IFNAME])
  2271. dev = __dev_get_by_name(net, ifname);
  2272. else
  2273. return -EINVAL;
  2274. if (dev == NULL)
  2275. return -ENODEV;
  2276. nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL);
  2277. if (nskb == NULL)
  2278. return -ENOBUFS;
  2279. err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).portid,
  2280. nlh->nlmsg_seq, 0, 0, ext_filter_mask);
  2281. if (err < 0) {
  2282. /* -EMSGSIZE implies BUG in if_nlmsg_size */
  2283. WARN_ON(err == -EMSGSIZE);
  2284. kfree_skb(nskb);
  2285. } else
  2286. err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
  2287. return err;
  2288. }
  2289. static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
  2290. {
  2291. struct net *net = sock_net(skb->sk);
  2292. struct net_device *dev;
  2293. struct nlattr *tb[IFLA_MAX+1];
  2294. u32 ext_filter_mask = 0;
  2295. u16 min_ifinfo_dump_size = 0;
  2296. int hdrlen;
  2297. /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */
  2298. hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
  2299. sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
  2300. if (nlmsg_parse(nlh, hdrlen, tb, IFLA_MAX, ifla_policy) >= 0) {
  2301. if (tb[IFLA_EXT_MASK])
  2302. ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
  2303. }
  2304. if (!ext_filter_mask)
  2305. return NLMSG_GOODSIZE;
  2306. /*
  2307. * traverse the list of net devices and compute the minimum
  2308. * buffer size based upon the filter mask.
  2309. */
  2310. list_for_each_entry(dev, &net->dev_base_head, dev_list) {
  2311. min_ifinfo_dump_size = max_t(u16, min_ifinfo_dump_size,
  2312. if_nlmsg_size(dev,
  2313. ext_filter_mask));
  2314. }
  2315. return min_ifinfo_dump_size;
  2316. }
  2317. static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
  2318. {
  2319. int idx;
  2320. int s_idx = cb->family;
  2321. if (s_idx == 0)
  2322. s_idx = 1;
  2323. for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) {
  2324. int type = cb->nlh->nlmsg_type-RTM_BASE;
  2325. if (idx < s_idx || idx == PF_PACKET)
  2326. continue;
  2327. if (rtnl_msg_handlers[idx] == NULL ||
  2328. rtnl_msg_handlers[idx][type].dumpit == NULL)
  2329. continue;
  2330. if (idx > s_idx) {
  2331. memset(&cb->args[0], 0, sizeof(cb->args));
  2332. cb->prev_seq = 0;
  2333. cb->seq = 0;
  2334. }
  2335. if (rtnl_msg_handlers[idx][type].dumpit(skb, cb))
  2336. break;
  2337. }
  2338. cb->family = idx;
  2339. return skb->len;
  2340. }
  2341. struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
  2342. unsigned int change, gfp_t flags)
  2343. {
  2344. struct net *net = dev_net(dev);
  2345. struct sk_buff *skb;
  2346. int err = -ENOBUFS;
  2347. size_t if_info_size;
  2348. skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), flags);
  2349. if (skb == NULL)
  2350. goto errout;
  2351. err = rtnl_fill_ifinfo(skb, dev, type, 0, 0, change, 0, 0);
  2352. if (err < 0) {
  2353. /* -EMSGSIZE implies BUG in if_nlmsg_size() */
  2354. WARN_ON(err == -EMSGSIZE);
  2355. kfree_skb(skb);
  2356. goto errout;
  2357. }
  2358. return skb;
  2359. errout:
  2360. if (err < 0)
  2361. rtnl_set_sk_err(net, RTNLGRP_LINK, err);
  2362. return NULL;
  2363. }
  2364. void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags)
  2365. {
  2366. struct net *net = dev_net(dev);
  2367. rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, flags);
  2368. }
  2369. void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
  2370. gfp_t flags)
  2371. {
  2372. struct sk_buff *skb;
  2373. if (dev->reg_state != NETREG_REGISTERED)
  2374. return;
  2375. skb = rtmsg_ifinfo_build_skb(type, dev, change, flags);
  2376. if (skb)
  2377. rtmsg_ifinfo_send(skb, dev, flags);
  2378. }
  2379. EXPORT_SYMBOL(rtmsg_ifinfo);
  2380. static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
  2381. struct net_device *dev,
  2382. u8 *addr, u16 vid, u32 pid, u32 seq,
  2383. int type, unsigned int flags,
  2384. int nlflags, u16 ndm_state)
  2385. {
  2386. struct nlmsghdr *nlh;
  2387. struct ndmsg *ndm;
  2388. nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags);
  2389. if (!nlh)
  2390. return -EMSGSIZE;
  2391. ndm = nlmsg_data(nlh);
  2392. ndm->ndm_family = AF_BRIDGE;
  2393. ndm->ndm_pad1 = 0;
  2394. ndm->ndm_pad2 = 0;
  2395. ndm->ndm_flags = flags;
  2396. ndm->ndm_type = 0;
  2397. ndm->ndm_ifindex = dev->ifindex;
  2398. ndm->ndm_state = ndm_state;
  2399. if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
  2400. goto nla_put_failure;
  2401. if (vid)
  2402. if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid))
  2403. goto nla_put_failure;
  2404. nlmsg_end(skb, nlh);
  2405. return 0;
  2406. nla_put_failure:
  2407. nlmsg_cancel(skb, nlh);
  2408. return -EMSGSIZE;
  2409. }
  2410. static inline size_t rtnl_fdb_nlmsg_size(void)
  2411. {
  2412. return NLMSG_ALIGN(sizeof(struct ndmsg)) + nla_total_size(ETH_ALEN);
  2413. }
  2414. static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type,
  2415. u16 ndm_state)
  2416. {
  2417. struct net *net = dev_net(dev);
  2418. struct sk_buff *skb;
  2419. int err = -ENOBUFS;
  2420. skb = nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC);
  2421. if (!skb)
  2422. goto errout;
  2423. err = nlmsg_populate_fdb_fill(skb, dev, addr, vid,
  2424. 0, 0, type, NTF_SELF, 0, ndm_state);
  2425. if (err < 0) {
  2426. kfree_skb(skb);
  2427. goto errout;
  2428. }
  2429. rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
  2430. return;
  2431. errout:
  2432. rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
  2433. }
  2434. /**
  2435. * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry
  2436. */
  2437. int ndo_dflt_fdb_add(struct ndmsg *ndm,
  2438. struct nlattr *tb[],
  2439. struct net_device *dev,
  2440. const unsigned char *addr, u16 vid,
  2441. u16 flags)
  2442. {
  2443. int err = -EINVAL;
  2444. /* If aging addresses are supported device will need to
  2445. * implement its own handler for this.
  2446. */
  2447. if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
  2448. pr_info("%s: FDB only supports static addresses\n", dev->name);
  2449. return err;
  2450. }
  2451. if (vid) {
  2452. pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
  2453. return err;
  2454. }
  2455. if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
  2456. err = dev_uc_add_excl(dev, addr);
  2457. else if (is_multicast_ether_addr(addr))
  2458. err = dev_mc_add_excl(dev, addr);
  2459. /* Only return duplicate errors if NLM_F_EXCL is set */
  2460. if (err == -EEXIST && !(flags & NLM_F_EXCL))
  2461. err = 0;
  2462. return err;
  2463. }
  2464. EXPORT_SYMBOL(ndo_dflt_fdb_add);
  2465. static int fdb_vid_parse(struct nlattr *vlan_attr, u16 *p_vid)
  2466. {
  2467. u16 vid = 0;
  2468. if (vlan_attr) {
  2469. if (nla_len(vlan_attr) != sizeof(u16)) {
  2470. pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid vlan\n");
  2471. return -EINVAL;
  2472. }
  2473. vid = nla_get_u16(vlan_attr);
  2474. if (!vid || vid >= VLAN_VID_MASK) {
  2475. pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid vlan id %d\n",
  2476. vid);
  2477. return -EINVAL;
  2478. }
  2479. }
  2480. *p_vid = vid;
  2481. return 0;
  2482. }
  2483. static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
  2484. {
  2485. struct net *net = sock_net(skb->sk);
  2486. struct ndmsg *ndm;
  2487. struct nlattr *tb[NDA_MAX+1];
  2488. struct net_device *dev;
  2489. u8 *addr;
  2490. u16 vid;
  2491. int err;
  2492. err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
  2493. if (err < 0)
  2494. return err;
  2495. ndm = nlmsg_data(nlh);
  2496. if (ndm->ndm_ifindex == 0) {
  2497. pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid ifindex\n");
  2498. return -EINVAL;
  2499. }
  2500. dev = __dev_get_by_index(net, ndm->ndm_ifindex);
  2501. if (dev == NULL) {
  2502. pr_info("PF_BRIDGE: RTM_NEWNEIGH with unknown ifindex\n");
  2503. return -ENODEV;
  2504. }
  2505. if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
  2506. pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid address\n");
  2507. return -EINVAL;
  2508. }
  2509. addr = nla_data(tb[NDA_LLADDR]);
  2510. err = fdb_vid_parse(tb[NDA_VLAN], &vid);
  2511. if (err)
  2512. return err;
  2513. err = -EOPNOTSUPP;
  2514. /* Support fdb on master device the net/bridge default case */
  2515. if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
  2516. (dev->priv_flags & IFF_BRIDGE_PORT)) {
  2517. struct net_device *br_dev = netdev_master_upper_dev_get(dev);
  2518. const struct net_device_ops *ops = br_dev->netdev_ops;
  2519. err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid,
  2520. nlh->nlmsg_flags);
  2521. if (err)
  2522. goto out;
  2523. else
  2524. ndm->ndm_flags &= ~NTF_MASTER;
  2525. }
  2526. /* Embedded bridge, macvlan, and any other device support */
  2527. if ((ndm->ndm_flags & NTF_SELF)) {
  2528. if (dev->netdev_ops->ndo_fdb_add)
  2529. err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr,
  2530. vid,
  2531. nlh->nlmsg_flags);
  2532. else
  2533. err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid,
  2534. nlh->nlmsg_flags);
  2535. if (!err) {
  2536. rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH,
  2537. ndm->ndm_state);
  2538. ndm->ndm_flags &= ~NTF_SELF;
  2539. }
  2540. }
  2541. out:
  2542. return err;
  2543. }
  2544. /**
  2545. * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry
  2546. */
  2547. int ndo_dflt_fdb_del(struct ndmsg *ndm,
  2548. struct nlattr *tb[],
  2549. struct net_device *dev,
  2550. const unsigned char *addr, u16 vid)
  2551. {
  2552. int err = -EINVAL;
  2553. /* If aging addresses are supported device will need to
  2554. * implement its own handler for this.
  2555. */
  2556. if (!(ndm->ndm_state & NUD_PERMANENT)) {
  2557. pr_info("%s: FDB only supports static addresses\n", dev->name);
  2558. return err;
  2559. }
  2560. if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
  2561. err = dev_uc_del(dev, addr);
  2562. else if (is_multicast_ether_addr(addr))
  2563. err = dev_mc_del(dev, addr);
  2564. return err;
  2565. }
  2566. EXPORT_SYMBOL(ndo_dflt_fdb_del);
  2567. static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
  2568. {
  2569. struct net *net = sock_net(skb->sk);
  2570. struct ndmsg *ndm;
  2571. struct nlattr *tb[NDA_MAX+1];
  2572. struct net_device *dev;
  2573. int err = -EINVAL;
  2574. __u8 *addr;
  2575. u16 vid;
  2576. if (!netlink_capable(skb, CAP_NET_ADMIN))
  2577. return -EPERM;
  2578. err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
  2579. if (err < 0)
  2580. return err;
  2581. ndm = nlmsg_data(nlh);
  2582. if (ndm->ndm_ifindex == 0) {
  2583. pr_info("PF_BRIDGE: RTM_DELNEIGH with invalid ifindex\n");
  2584. return -EINVAL;
  2585. }
  2586. dev = __dev_get_by_index(net, ndm->ndm_ifindex);
  2587. if (dev == NULL) {
  2588. pr_info("PF_BRIDGE: RTM_DELNEIGH with unknown ifindex\n");
  2589. return -ENODEV;
  2590. }
  2591. if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
  2592. pr_info("PF_BRIDGE: RTM_DELNEIGH with invalid address\n");
  2593. return -EINVAL;
  2594. }
  2595. addr = nla_data(tb[NDA_LLADDR]);
  2596. err = fdb_vid_parse(tb[NDA_VLAN], &vid);
  2597. if (err)
  2598. return err;
  2599. err = -EOPNOTSUPP;
  2600. /* Support fdb on master device the net/bridge default case */
  2601. if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
  2602. (dev->priv_flags & IFF_BRIDGE_PORT)) {
  2603. struct net_device *br_dev = netdev_master_upper_dev_get(dev);
  2604. const struct net_device_ops *ops = br_dev->netdev_ops;
  2605. if (ops->ndo_fdb_del)
  2606. err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid);
  2607. if (err)
  2608. goto out;
  2609. else
  2610. ndm->ndm_flags &= ~NTF_MASTER;
  2611. }
  2612. /* Embedded bridge, macvlan, and any other device support */
  2613. if (ndm->ndm_flags & NTF_SELF) {
  2614. if (dev->netdev_ops->ndo_fdb_del)
  2615. err = dev->netdev_ops->ndo_fdb_del(ndm, tb, dev, addr,
  2616. vid);
  2617. else
  2618. err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid);
  2619. if (!err) {
  2620. rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH,
  2621. ndm->ndm_state);
  2622. ndm->ndm_flags &= ~NTF_SELF;
  2623. }
  2624. }
  2625. out:
  2626. return err;
  2627. }
  2628. static int nlmsg_populate_fdb(struct sk_buff *skb,
  2629. struct netlink_callback *cb,
  2630. struct net_device *dev,
  2631. int *idx,
  2632. struct netdev_hw_addr_list *list)
  2633. {
  2634. struct netdev_hw_addr *ha;
  2635. int err;
  2636. u32 portid, seq;
  2637. portid = NETLINK_CB(cb->skb).portid;
  2638. seq = cb->nlh->nlmsg_seq;
  2639. list_for_each_entry(ha, &list->list, list) {
  2640. if (*idx < cb->args[2])
  2641. goto skip;
  2642. err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0,
  2643. portid, seq,
  2644. RTM_NEWNEIGH, NTF_SELF,
  2645. NLM_F_MULTI, NUD_PERMANENT);
  2646. if (err < 0)
  2647. return err;
  2648. skip:
  2649. *idx += 1;
  2650. }
  2651. return 0;
  2652. }
  2653. /**
  2654. * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table.
  2655. * @nlh: netlink message header
  2656. * @dev: netdevice
  2657. *
  2658. * Default netdevice operation to dump the existing unicast address list.
  2659. * Returns number of addresses from list put in skb.
  2660. */
  2661. int ndo_dflt_fdb_dump(struct sk_buff *skb,
  2662. struct netlink_callback *cb,
  2663. struct net_device *dev,
  2664. struct net_device *filter_dev,
  2665. int *idx)
  2666. {
  2667. int err;
  2668. netif_addr_lock_bh(dev);
  2669. err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc);
  2670. if (err)
  2671. goto out;
  2672. nlmsg_populate_fdb(skb, cb, dev, idx, &dev->mc);
  2673. out:
  2674. netif_addr_unlock_bh(dev);
  2675. return err;
  2676. }
  2677. EXPORT_SYMBOL(ndo_dflt_fdb_dump);
  2678. static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
  2679. {
  2680. struct net_device *dev;
  2681. struct nlattr *tb[IFLA_MAX+1];
  2682. struct net_device *br_dev = NULL;
  2683. const struct net_device_ops *ops = NULL;
  2684. const struct net_device_ops *cops = NULL;
  2685. struct ifinfomsg *ifm = nlmsg_data(cb->nlh);
  2686. struct net *net = sock_net(skb->sk);
  2687. struct hlist_head *head;
  2688. int brport_idx = 0;
  2689. int br_idx = 0;
  2690. int h, s_h;
  2691. int idx = 0, s_idx;
  2692. int err = 0;
  2693. int fidx = 0;
  2694. if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
  2695. ifla_policy) == 0) {
  2696. if (tb[IFLA_MASTER])
  2697. br_idx = nla_get_u32(tb[IFLA_MASTER]);
  2698. }
  2699. brport_idx = ifm->ifi_index;
  2700. if (br_idx) {
  2701. br_dev = __dev_get_by_index(net, br_idx);
  2702. if (!br_dev)
  2703. return -ENODEV;
  2704. ops = br_dev->netdev_ops;
  2705. }
  2706. s_h = cb->args[0];
  2707. s_idx = cb->args[1];
  2708. for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
  2709. idx = 0;
  2710. head = &net->dev_index_head[h];
  2711. hlist_for_each_entry(dev, head, index_hlist) {
  2712. if (brport_idx && (dev->ifindex != brport_idx))
  2713. continue;
  2714. if (!br_idx) { /* user did not specify a specific bridge */
  2715. if (dev->priv_flags & IFF_BRIDGE_PORT) {
  2716. br_dev = netdev_master_upper_dev_get(dev);
  2717. cops = br_dev->netdev_ops;
  2718. }
  2719. } else {
  2720. if (dev != br_dev &&
  2721. !(dev->priv_flags & IFF_BRIDGE_PORT))
  2722. continue;
  2723. if (br_dev != netdev_master_upper_dev_get(dev) &&
  2724. !(dev->priv_flags & IFF_EBRIDGE))
  2725. continue;
  2726. cops = ops;
  2727. }
  2728. if (idx < s_idx)
  2729. goto cont;
  2730. if (dev->priv_flags & IFF_BRIDGE_PORT) {
  2731. if (cops && cops->ndo_fdb_dump) {
  2732. err = cops->ndo_fdb_dump(skb, cb,
  2733. br_dev, dev,
  2734. &fidx);
  2735. if (err == -EMSGSIZE)
  2736. goto out;
  2737. }
  2738. }
  2739. if (dev->netdev_ops->ndo_fdb_dump)
  2740. err = dev->netdev_ops->ndo_fdb_dump(skb, cb,
  2741. dev, NULL,
  2742. &fidx);
  2743. else
  2744. err = ndo_dflt_fdb_dump(skb, cb, dev, NULL,
  2745. &fidx);
  2746. if (err == -EMSGSIZE)
  2747. goto out;
  2748. cops = NULL;
  2749. /* reset fdb offset to 0 for rest of the interfaces */
  2750. cb->args[2] = 0;
  2751. fidx = 0;
  2752. cont:
  2753. idx++;
  2754. }
  2755. }
  2756. out:
  2757. cb->args[0] = h;
  2758. cb->args[1] = idx;
  2759. cb->args[2] = fidx;
  2760. return skb->len;
  2761. }
  2762. static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask,
  2763. unsigned int attrnum, unsigned int flag)
  2764. {
  2765. if (mask & flag)
  2766. return nla_put_u8(skb, attrnum, !!(flags & flag));
  2767. return 0;
  2768. }
  2769. int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
  2770. struct net_device *dev, u16 mode,
  2771. u32 flags, u32 mask, int nlflags,
  2772. u32 filter_mask,
  2773. int (*vlan_fill)(struct sk_buff *skb,
  2774. struct net_device *dev,
  2775. u32 filter_mask))
  2776. {
  2777. struct nlmsghdr *nlh;
  2778. struct ifinfomsg *ifm;
  2779. struct nlattr *br_afspec;
  2780. struct nlattr *protinfo;
  2781. u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
  2782. struct net_device *br_dev = netdev_master_upper_dev_get(dev);
  2783. int err = 0;
  2784. nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags);
  2785. if (nlh == NULL)
  2786. return -EMSGSIZE;
  2787. ifm = nlmsg_data(nlh);
  2788. ifm->ifi_family = AF_BRIDGE;
  2789. ifm->__ifi_pad = 0;
  2790. ifm->ifi_type = dev->type;
  2791. ifm->ifi_index = dev->ifindex;
  2792. ifm->ifi_flags = dev_get_flags(dev);
  2793. ifm->ifi_change = 0;
  2794. if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
  2795. nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
  2796. nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
  2797. (br_dev &&
  2798. nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) ||
  2799. (dev->addr_len &&
  2800. nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
  2801. (dev->ifindex != dev_get_iflink(dev) &&
  2802. nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
  2803. goto nla_put_failure;
  2804. br_afspec = nla_nest_start(skb, IFLA_AF_SPEC);
  2805. if (!br_afspec)
  2806. goto nla_put_failure;
  2807. if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF)) {
  2808. nla_nest_cancel(skb, br_afspec);
  2809. goto nla_put_failure;
  2810. }
  2811. if (mode != BRIDGE_MODE_UNDEF) {
  2812. if (nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) {
  2813. nla_nest_cancel(skb, br_afspec);
  2814. goto nla_put_failure;
  2815. }
  2816. }
  2817. if (vlan_fill) {
  2818. err = vlan_fill(skb, dev, filter_mask);
  2819. if (err) {
  2820. nla_nest_cancel(skb, br_afspec);
  2821. goto nla_put_failure;
  2822. }
  2823. }
  2824. nla_nest_end(skb, br_afspec);
  2825. protinfo = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED);
  2826. if (!protinfo)
  2827. goto nla_put_failure;
  2828. if (brport_nla_put_flag(skb, flags, mask,
  2829. IFLA_BRPORT_MODE, BR_HAIRPIN_MODE) ||
  2830. brport_nla_put_flag(skb, flags, mask,
  2831. IFLA_BRPORT_GUARD, BR_BPDU_GUARD) ||
  2832. brport_nla_put_flag(skb, flags, mask,
  2833. IFLA_BRPORT_FAST_LEAVE,
  2834. BR_MULTICAST_FAST_LEAVE) ||
  2835. brport_nla_put_flag(skb, flags, mask,
  2836. IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK) ||
  2837. brport_nla_put_flag(skb, flags, mask,
  2838. IFLA_BRPORT_LEARNING, BR_LEARNING) ||
  2839. brport_nla_put_flag(skb, flags, mask,
  2840. IFLA_BRPORT_LEARNING_SYNC, BR_LEARNING_SYNC) ||
  2841. brport_nla_put_flag(skb, flags, mask,
  2842. IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD) ||
  2843. brport_nla_put_flag(skb, flags, mask,
  2844. IFLA_BRPORT_PROXYARP, BR_PROXYARP)) {
  2845. nla_nest_cancel(skb, protinfo);
  2846. goto nla_put_failure;
  2847. }
  2848. nla_nest_end(skb, protinfo);
  2849. nlmsg_end(skb, nlh);
  2850. return 0;
  2851. nla_put_failure:
  2852. nlmsg_cancel(skb, nlh);
  2853. return err ? err : -EMSGSIZE;
  2854. }
  2855. EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink);
  2856. static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
  2857. {
  2858. struct net *net = sock_net(skb->sk);
  2859. struct net_device *dev;
  2860. int idx = 0;
  2861. u32 portid = NETLINK_CB(cb->skb).portid;
  2862. u32 seq = cb->nlh->nlmsg_seq;
  2863. u32 filter_mask = 0;
  2864. int err;
  2865. if (nlmsg_len(cb->nlh) > sizeof(struct ifinfomsg)) {
  2866. struct nlattr *extfilt;
  2867. extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg),
  2868. IFLA_EXT_MASK);
  2869. if (extfilt) {
  2870. if (nla_len(extfilt) < sizeof(filter_mask))
  2871. return -EINVAL;
  2872. filter_mask = nla_get_u32(extfilt);
  2873. }
  2874. }
  2875. rcu_read_lock();
  2876. for_each_netdev_rcu(net, dev) {
  2877. const struct net_device_ops *ops = dev->netdev_ops;
  2878. struct net_device *br_dev = netdev_master_upper_dev_get(dev);
  2879. if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
  2880. if (idx >= cb->args[0]) {
  2881. err = br_dev->netdev_ops->ndo_bridge_getlink(
  2882. skb, portid, seq, dev,
  2883. filter_mask, NLM_F_MULTI);
  2884. if (err < 0 && err != -EOPNOTSUPP)
  2885. break;
  2886. }
  2887. idx++;
  2888. }
  2889. if (ops->ndo_bridge_getlink) {
  2890. if (idx >= cb->args[0]) {
  2891. err = ops->ndo_bridge_getlink(skb, portid,
  2892. seq, dev,
  2893. filter_mask,
  2894. NLM_F_MULTI);
  2895. if (err < 0 && err != -EOPNOTSUPP)
  2896. break;
  2897. }
  2898. idx++;
  2899. }
  2900. }
  2901. rcu_read_unlock();
  2902. cb->args[0] = idx;
  2903. return skb->len;
  2904. }
  2905. static inline size_t bridge_nlmsg_size(void)
  2906. {
  2907. return NLMSG_ALIGN(sizeof(struct ifinfomsg))
  2908. + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
  2909. + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
  2910. + nla_total_size(sizeof(u32)) /* IFLA_MASTER */
  2911. + nla_total_size(sizeof(u32)) /* IFLA_MTU */
  2912. + nla_total_size(sizeof(u32)) /* IFLA_LINK */
  2913. + nla_total_size(sizeof(u32)) /* IFLA_OPERSTATE */
  2914. + nla_total_size(sizeof(u8)) /* IFLA_PROTINFO */
  2915. + nla_total_size(sizeof(struct nlattr)) /* IFLA_AF_SPEC */
  2916. + nla_total_size(sizeof(u16)) /* IFLA_BRIDGE_FLAGS */
  2917. + nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_MODE */
  2918. }
  2919. static int rtnl_bridge_notify(struct net_device *dev)
  2920. {
  2921. struct net *net = dev_net(dev);
  2922. struct sk_buff *skb;
  2923. int err = -EOPNOTSUPP;
  2924. if (!dev->netdev_ops->ndo_bridge_getlink)
  2925. return 0;
  2926. skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC);
  2927. if (!skb) {
  2928. err = -ENOMEM;
  2929. goto errout;
  2930. }
  2931. err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0);
  2932. if (err < 0)
  2933. goto errout;
  2934. if (!skb->len)
  2935. goto errout;
  2936. rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
  2937. return 0;
  2938. errout:
  2939. WARN_ON(err == -EMSGSIZE);
  2940. kfree_skb(skb);
  2941. if (err)
  2942. rtnl_set_sk_err(net, RTNLGRP_LINK, err);
  2943. return err;
  2944. }
  2945. static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
  2946. {
  2947. struct net *net = sock_net(skb->sk);
  2948. struct ifinfomsg *ifm;
  2949. struct net_device *dev;
  2950. struct nlattr *br_spec, *attr = NULL;
  2951. int rem, err = -EOPNOTSUPP;
  2952. u16 flags = 0;
  2953. bool have_flags = false;
  2954. if (nlmsg_len(nlh) < sizeof(*ifm))
  2955. return -EINVAL;
  2956. ifm = nlmsg_data(nlh);
  2957. if (ifm->ifi_family != AF_BRIDGE)
  2958. return -EPFNOSUPPORT;
  2959. dev = __dev_get_by_index(net, ifm->ifi_index);
  2960. if (!dev) {
  2961. pr_info("PF_BRIDGE: RTM_SETLINK with unknown ifindex\n");
  2962. return -ENODEV;
  2963. }
  2964. br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
  2965. if (br_spec) {
  2966. nla_for_each_nested(attr, br_spec, rem) {
  2967. if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
  2968. if (nla_len(attr) < sizeof(flags))
  2969. return -EINVAL;
  2970. have_flags = true;
  2971. flags = nla_get_u16(attr);
  2972. break;
  2973. }
  2974. }
  2975. }
  2976. if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
  2977. struct net_device *br_dev = netdev_master_upper_dev_get(dev);
  2978. if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) {
  2979. err = -EOPNOTSUPP;
  2980. goto out;
  2981. }
  2982. err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags);
  2983. if (err)
  2984. goto out;
  2985. flags &= ~BRIDGE_FLAGS_MASTER;
  2986. }
  2987. if ((flags & BRIDGE_FLAGS_SELF)) {
  2988. if (!dev->netdev_ops->ndo_bridge_setlink)
  2989. err = -EOPNOTSUPP;
  2990. else
  2991. err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh,
  2992. flags);
  2993. if (!err) {
  2994. flags &= ~BRIDGE_FLAGS_SELF;
  2995. /* Generate event to notify upper layer of bridge
  2996. * change
  2997. */
  2998. err = rtnl_bridge_notify(dev);
  2999. }
  3000. }
  3001. if (have_flags)
  3002. memcpy(nla_data(attr), &flags, sizeof(flags));
  3003. out:
  3004. return err;
  3005. }
  3006. static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
  3007. {
  3008. struct net *net = sock_net(skb->sk);
  3009. struct ifinfomsg *ifm;
  3010. struct net_device *dev;
  3011. struct nlattr *br_spec, *attr = NULL;
  3012. int rem, err = -EOPNOTSUPP;
  3013. u16 flags = 0;
  3014. bool have_flags = false;
  3015. if (nlmsg_len(nlh) < sizeof(*ifm))
  3016. return -EINVAL;
  3017. ifm = nlmsg_data(nlh);
  3018. if (ifm->ifi_family != AF_BRIDGE)
  3019. return -EPFNOSUPPORT;
  3020. dev = __dev_get_by_index(net, ifm->ifi_index);
  3021. if (!dev) {
  3022. pr_info("PF_BRIDGE: RTM_SETLINK with unknown ifindex\n");
  3023. return -ENODEV;
  3024. }
  3025. br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
  3026. if (br_spec) {
  3027. nla_for_each_nested(attr, br_spec, rem) {
  3028. if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
  3029. if (nla_len(attr) < sizeof(flags))
  3030. return -EINVAL;
  3031. have_flags = true;
  3032. flags = nla_get_u16(attr);
  3033. break;
  3034. }
  3035. }
  3036. }
  3037. if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
  3038. struct net_device *br_dev = netdev_master_upper_dev_get(dev);
  3039. if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) {
  3040. err = -EOPNOTSUPP;
  3041. goto out;
  3042. }
  3043. err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags);
  3044. if (err)
  3045. goto out;
  3046. flags &= ~BRIDGE_FLAGS_MASTER;
  3047. }
  3048. if ((flags & BRIDGE_FLAGS_SELF)) {
  3049. if (!dev->netdev_ops->ndo_bridge_dellink)
  3050. err = -EOPNOTSUPP;
  3051. else
  3052. err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh,
  3053. flags);
  3054. if (!err) {
  3055. flags &= ~BRIDGE_FLAGS_SELF;
  3056. /* Generate event to notify upper layer of bridge
  3057. * change
  3058. */
  3059. err = rtnl_bridge_notify(dev);
  3060. }
  3061. }
  3062. if (have_flags)
  3063. memcpy(nla_data(attr), &flags, sizeof(flags));
  3064. out:
  3065. return err;
  3066. }
  3067. static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr)
  3068. {
  3069. return (mask & IFLA_STATS_FILTER_BIT(attrid)) &&
  3070. (!idxattr || idxattr == attrid);
  3071. }
  3072. #define IFLA_OFFLOAD_XSTATS_FIRST (IFLA_OFFLOAD_XSTATS_UNSPEC + 1)
  3073. static int rtnl_get_offload_stats_attr_size(int attr_id)
  3074. {
  3075. switch (attr_id) {
  3076. case IFLA_OFFLOAD_XSTATS_CPU_HIT:
  3077. return sizeof(struct rtnl_link_stats64);
  3078. }
  3079. return 0;
  3080. }
  3081. static int rtnl_get_offload_stats(struct sk_buff *skb, struct net_device *dev,
  3082. int *prividx)
  3083. {
  3084. struct nlattr *attr = NULL;
  3085. int attr_id, size;
  3086. void *attr_data;
  3087. int err;
  3088. if (!(dev->netdev_ops && dev->netdev_ops->ndo_has_offload_stats &&
  3089. dev->netdev_ops->ndo_get_offload_stats))
  3090. return -ENODATA;
  3091. for (attr_id = IFLA_OFFLOAD_XSTATS_FIRST;
  3092. attr_id <= IFLA_OFFLOAD_XSTATS_MAX; attr_id++) {
  3093. if (attr_id < *prividx)
  3094. continue;
  3095. size = rtnl_get_offload_stats_attr_size(attr_id);
  3096. if (!size)
  3097. continue;
  3098. if (!dev->netdev_ops->ndo_has_offload_stats(attr_id))
  3099. continue;
  3100. attr = nla_reserve_64bit(skb, attr_id, size,
  3101. IFLA_OFFLOAD_XSTATS_UNSPEC);
  3102. if (!attr)
  3103. goto nla_put_failure;
  3104. attr_data = nla_data(attr);
  3105. memset(attr_data, 0, size);
  3106. err = dev->netdev_ops->ndo_get_offload_stats(attr_id, dev,
  3107. attr_data);
  3108. if (err)
  3109. goto get_offload_stats_failure;
  3110. }
  3111. if (!attr)
  3112. return -ENODATA;
  3113. *prividx = 0;
  3114. return 0;
  3115. nla_put_failure:
  3116. err = -EMSGSIZE;
  3117. get_offload_stats_failure:
  3118. *prividx = attr_id;
  3119. return err;
  3120. }
  3121. static int rtnl_get_offload_stats_size(const struct net_device *dev)
  3122. {
  3123. int nla_size = 0;
  3124. int attr_id;
  3125. int size;
  3126. if (!(dev->netdev_ops && dev->netdev_ops->ndo_has_offload_stats &&
  3127. dev->netdev_ops->ndo_get_offload_stats))
  3128. return 0;
  3129. for (attr_id = IFLA_OFFLOAD_XSTATS_FIRST;
  3130. attr_id <= IFLA_OFFLOAD_XSTATS_MAX; attr_id++) {
  3131. if (!dev->netdev_ops->ndo_has_offload_stats(attr_id))
  3132. continue;
  3133. size = rtnl_get_offload_stats_attr_size(attr_id);
  3134. nla_size += nla_total_size_64bit(size);
  3135. }
  3136. if (nla_size != 0)
  3137. nla_size += nla_total_size(0);
  3138. return nla_size;
  3139. }
  3140. static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
  3141. int type, u32 pid, u32 seq, u32 change,
  3142. unsigned int flags, unsigned int filter_mask,
  3143. int *idxattr, int *prividx)
  3144. {
  3145. struct if_stats_msg *ifsm;
  3146. struct nlmsghdr *nlh;
  3147. struct nlattr *attr;
  3148. int s_prividx = *prividx;
  3149. int err;
  3150. ASSERT_RTNL();
  3151. nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifsm), flags);
  3152. if (!nlh)
  3153. return -EMSGSIZE;
  3154. ifsm = nlmsg_data(nlh);
  3155. ifsm->ifindex = dev->ifindex;
  3156. ifsm->filter_mask = filter_mask;
  3157. if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, *idxattr)) {
  3158. struct rtnl_link_stats64 *sp;
  3159. attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64,
  3160. sizeof(struct rtnl_link_stats64),
  3161. IFLA_STATS_UNSPEC);
  3162. if (!attr)
  3163. goto nla_put_failure;
  3164. sp = nla_data(attr);
  3165. dev_get_stats(dev, sp);
  3166. }
  3167. if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, *idxattr)) {
  3168. const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
  3169. if (ops && ops->fill_linkxstats) {
  3170. *idxattr = IFLA_STATS_LINK_XSTATS;
  3171. attr = nla_nest_start(skb,
  3172. IFLA_STATS_LINK_XSTATS);
  3173. if (!attr)
  3174. goto nla_put_failure;
  3175. err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
  3176. nla_nest_end(skb, attr);
  3177. if (err)
  3178. goto nla_put_failure;
  3179. *idxattr = 0;
  3180. }
  3181. }
  3182. if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE,
  3183. *idxattr)) {
  3184. const struct rtnl_link_ops *ops = NULL;
  3185. const struct net_device *master;
  3186. master = netdev_master_upper_dev_get(dev);
  3187. if (master)
  3188. ops = master->rtnl_link_ops;
  3189. if (ops && ops->fill_linkxstats) {
  3190. *idxattr = IFLA_STATS_LINK_XSTATS_SLAVE;
  3191. attr = nla_nest_start(skb,
  3192. IFLA_STATS_LINK_XSTATS_SLAVE);
  3193. if (!attr)
  3194. goto nla_put_failure;
  3195. err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
  3196. nla_nest_end(skb, attr);
  3197. if (err)
  3198. goto nla_put_failure;
  3199. *idxattr = 0;
  3200. }
  3201. }
  3202. if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS,
  3203. *idxattr)) {
  3204. *idxattr = IFLA_STATS_LINK_OFFLOAD_XSTATS;
  3205. attr = nla_nest_start(skb, IFLA_STATS_LINK_OFFLOAD_XSTATS);
  3206. if (!attr)
  3207. goto nla_put_failure;
  3208. err = rtnl_get_offload_stats(skb, dev, prividx);
  3209. if (err == -ENODATA)
  3210. nla_nest_cancel(skb, attr);
  3211. else
  3212. nla_nest_end(skb, attr);
  3213. if (err && err != -ENODATA)
  3214. goto nla_put_failure;
  3215. *idxattr = 0;
  3216. }
  3217. nlmsg_end(skb, nlh);
  3218. return 0;
  3219. nla_put_failure:
  3220. /* not a multi message or no progress mean a real error */
  3221. if (!(flags & NLM_F_MULTI) || s_prividx == *prividx)
  3222. nlmsg_cancel(skb, nlh);
  3223. else
  3224. nlmsg_end(skb, nlh);
  3225. return -EMSGSIZE;
  3226. }
  3227. static size_t if_nlmsg_stats_size(const struct net_device *dev,
  3228. u32 filter_mask)
  3229. {
  3230. size_t size = 0;
  3231. if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0))
  3232. size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64));
  3233. if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) {
  3234. const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
  3235. int attr = IFLA_STATS_LINK_XSTATS;
  3236. if (ops && ops->get_linkxstats_size) {
  3237. size += nla_total_size(ops->get_linkxstats_size(dev,
  3238. attr));
  3239. /* for IFLA_STATS_LINK_XSTATS */
  3240. size += nla_total_size(0);
  3241. }
  3242. }
  3243. if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 0)) {
  3244. struct net_device *_dev = (struct net_device *)dev;
  3245. const struct rtnl_link_ops *ops = NULL;
  3246. const struct net_device *master;
  3247. /* netdev_master_upper_dev_get can't take const */
  3248. master = netdev_master_upper_dev_get(_dev);
  3249. if (master)
  3250. ops = master->rtnl_link_ops;
  3251. if (ops && ops->get_linkxstats_size) {
  3252. int attr = IFLA_STATS_LINK_XSTATS_SLAVE;
  3253. size += nla_total_size(ops->get_linkxstats_size(dev,
  3254. attr));
  3255. /* for IFLA_STATS_LINK_XSTATS_SLAVE */
  3256. size += nla_total_size(0);
  3257. }
  3258. }
  3259. if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0))
  3260. size += rtnl_get_offload_stats_size(dev);
  3261. return size;
  3262. }
  3263. static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh)
  3264. {
  3265. struct net *net = sock_net(skb->sk);
  3266. struct net_device *dev = NULL;
  3267. int idxattr = 0, prividx = 0;
  3268. struct if_stats_msg *ifsm;
  3269. struct sk_buff *nskb;
  3270. u32 filter_mask;
  3271. int err;
  3272. ifsm = nlmsg_data(nlh);
  3273. if (ifsm->ifindex > 0)
  3274. dev = __dev_get_by_index(net, ifsm->ifindex);
  3275. else
  3276. return -EINVAL;
  3277. if (!dev)
  3278. return -ENODEV;
  3279. filter_mask = ifsm->filter_mask;
  3280. if (!filter_mask)
  3281. return -EINVAL;
  3282. nskb = nlmsg_new(if_nlmsg_stats_size(dev, filter_mask), GFP_KERNEL);
  3283. if (!nskb)
  3284. return -ENOBUFS;
  3285. err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS,
  3286. NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
  3287. 0, filter_mask, &idxattr, &prividx);
  3288. if (err < 0) {
  3289. /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */
  3290. WARN_ON(err == -EMSGSIZE);
  3291. kfree_skb(nskb);
  3292. } else {
  3293. err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
  3294. }
  3295. return err;
  3296. }
  3297. static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
  3298. {
  3299. int h, s_h, err, s_idx, s_idxattr, s_prividx;
  3300. struct net *net = sock_net(skb->sk);
  3301. unsigned int flags = NLM_F_MULTI;
  3302. struct if_stats_msg *ifsm;
  3303. struct hlist_head *head;
  3304. struct net_device *dev;
  3305. u32 filter_mask = 0;
  3306. int idx = 0;
  3307. s_h = cb->args[0];
  3308. s_idx = cb->args[1];
  3309. s_idxattr = cb->args[2];
  3310. s_prividx = cb->args[3];
  3311. cb->seq = net->dev_base_seq;
  3312. ifsm = nlmsg_data(cb->nlh);
  3313. filter_mask = ifsm->filter_mask;
  3314. if (!filter_mask)
  3315. return -EINVAL;
  3316. for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
  3317. idx = 0;
  3318. head = &net->dev_index_head[h];
  3319. hlist_for_each_entry(dev, head, index_hlist) {
  3320. if (idx < s_idx)
  3321. goto cont;
  3322. err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS,
  3323. NETLINK_CB(cb->skb).portid,
  3324. cb->nlh->nlmsg_seq, 0,
  3325. flags, filter_mask,
  3326. &s_idxattr, &s_prividx);
  3327. /* If we ran out of room on the first message,
  3328. * we're in trouble
  3329. */
  3330. WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
  3331. if (err < 0)
  3332. goto out;
  3333. s_prividx = 0;
  3334. s_idxattr = 0;
  3335. nl_dump_check_consistent(cb, nlmsg_hdr(skb));
  3336. cont:
  3337. idx++;
  3338. }
  3339. }
  3340. out:
  3341. cb->args[3] = s_prividx;
  3342. cb->args[2] = s_idxattr;
  3343. cb->args[1] = idx;
  3344. cb->args[0] = h;
  3345. return skb->len;
  3346. }
  3347. /* Process one rtnetlink message. */
  3348. static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
  3349. {
  3350. struct net *net = sock_net(skb->sk);
  3351. rtnl_doit_func doit;
  3352. int kind;
  3353. int family;
  3354. int type;
  3355. int err;
  3356. type = nlh->nlmsg_type;
  3357. if (type > RTM_MAX)
  3358. return -EOPNOTSUPP;
  3359. type -= RTM_BASE;
  3360. /* All the messages must have at least 1 byte length */
  3361. if (nlmsg_len(nlh) < sizeof(struct rtgenmsg))
  3362. return 0;
  3363. family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
  3364. kind = type&3;
  3365. if (kind != 2 && !netlink_net_capable(skb, CAP_NET_ADMIN))
  3366. return -EPERM;
  3367. if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
  3368. struct sock *rtnl;
  3369. rtnl_dumpit_func dumpit;
  3370. rtnl_calcit_func calcit;
  3371. u16 min_dump_alloc = 0;
  3372. dumpit = rtnl_get_dumpit(family, type);
  3373. if (dumpit == NULL)
  3374. return -EOPNOTSUPP;
  3375. calcit = rtnl_get_calcit(family, type);
  3376. if (calcit)
  3377. min_dump_alloc = calcit(skb, nlh);
  3378. __rtnl_unlock();
  3379. rtnl = net->rtnl;
  3380. {
  3381. struct netlink_dump_control c = {
  3382. .dump = dumpit,
  3383. .min_dump_alloc = min_dump_alloc,
  3384. };
  3385. err = netlink_dump_start(rtnl, skb, nlh, &c);
  3386. }
  3387. rtnl_lock();
  3388. return err;
  3389. }
  3390. doit = rtnl_get_doit(family, type);
  3391. if (doit == NULL)
  3392. return -EOPNOTSUPP;
  3393. return doit(skb, nlh);
  3394. }
  3395. static void rtnetlink_rcv(struct sk_buff *skb)
  3396. {
  3397. rtnl_lock();
  3398. netlink_rcv_skb(skb, &rtnetlink_rcv_msg);
  3399. rtnl_unlock();
  3400. }
  3401. static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr)
  3402. {
  3403. struct net_device *dev = netdev_notifier_info_to_dev(ptr);
  3404. switch (event) {
  3405. case NETDEV_UP:
  3406. case NETDEV_DOWN:
  3407. case NETDEV_PRE_UP:
  3408. case NETDEV_POST_INIT:
  3409. case NETDEV_REGISTER:
  3410. case NETDEV_CHANGE:
  3411. case NETDEV_PRE_TYPE_CHANGE:
  3412. case NETDEV_GOING_DOWN:
  3413. case NETDEV_UNREGISTER:
  3414. case NETDEV_UNREGISTER_FINAL:
  3415. case NETDEV_RELEASE:
  3416. case NETDEV_JOIN:
  3417. case NETDEV_BONDING_INFO:
  3418. break;
  3419. default:
  3420. rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
  3421. break;
  3422. }
  3423. return NOTIFY_DONE;
  3424. }
  3425. static struct notifier_block rtnetlink_dev_notifier = {
  3426. .notifier_call = rtnetlink_event,
  3427. };
  3428. static int __net_init rtnetlink_net_init(struct net *net)
  3429. {
  3430. struct sock *sk;
  3431. struct netlink_kernel_cfg cfg = {
  3432. .groups = RTNLGRP_MAX,
  3433. .input = rtnetlink_rcv,
  3434. .cb_mutex = &rtnl_mutex,
  3435. .flags = NL_CFG_F_NONROOT_RECV,
  3436. };
  3437. sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg);
  3438. if (!sk)
  3439. return -ENOMEM;
  3440. net->rtnl = sk;
  3441. return 0;
  3442. }
  3443. static void __net_exit rtnetlink_net_exit(struct net *net)
  3444. {
  3445. netlink_kernel_release(net->rtnl);
  3446. net->rtnl = NULL;
  3447. }
  3448. static struct pernet_operations rtnetlink_net_ops = {
  3449. .init = rtnetlink_net_init,
  3450. .exit = rtnetlink_net_exit,
  3451. };
  3452. void __init rtnetlink_init(void)
  3453. {
  3454. if (register_pernet_subsys(&rtnetlink_net_ops))
  3455. panic("rtnetlink_init: cannot initialize rtnetlink\n");
  3456. register_netdevice_notifier(&rtnetlink_dev_notifier);
  3457. rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink,
  3458. rtnl_dump_ifinfo, rtnl_calcit);
  3459. rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, NULL);
  3460. rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, NULL);
  3461. rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, NULL);
  3462. rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, NULL);
  3463. rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, NULL);
  3464. rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, NULL);
  3465. rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, NULL);
  3466. rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, rtnl_fdb_dump, NULL);
  3467. rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, NULL);
  3468. rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, NULL);
  3469. rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, NULL);
  3470. rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump,
  3471. NULL);
  3472. }