dev.c 239 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561
  1. /*
  2. * NET3 Protocol independent device support routines.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. *
  9. * Derived from the non IP parts of dev.c 1.0.19
  10. * Authors: Ross Biro
  11. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12. * Mark Evans, <evansmp@uhura.aston.ac.uk>
  13. *
  14. * Additional Authors:
  15. * Florian la Roche <rzsfl@rz.uni-sb.de>
  16. * Alan Cox <gw4pts@gw4pts.ampr.org>
  17. * David Hinds <dahinds@users.sourceforge.net>
  18. * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
  19. * Adam Sulmicki <adam@cfar.umd.edu>
  20. * Pekka Riikonen <priikone@poesidon.pspt.fi>
  21. *
  22. * Changes:
  23. * D.J. Barrow : Fixed bug where dev->refcnt gets set
  24. * to 2 if register_netdev gets called
  25. * before net_dev_init & also removed a
  26. * few lines of code in the process.
  27. * Alan Cox : device private ioctl copies fields back.
  28. * Alan Cox : Transmit queue code does relevant
  29. * stunts to keep the queue safe.
  30. * Alan Cox : Fixed double lock.
  31. * Alan Cox : Fixed promisc NULL pointer trap
  32. * ???????? : Support the full private ioctl range
  33. * Alan Cox : Moved ioctl permission check into
  34. * drivers
  35. * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
  36. * Alan Cox : 100 backlog just doesn't cut it when
  37. * you start doing multicast video 8)
  38. * Alan Cox : Rewrote net_bh and list manager.
  39. * Alan Cox : Fix ETH_P_ALL echoback lengths.
  40. * Alan Cox : Took out transmit every packet pass
  41. * Saved a few bytes in the ioctl handler
  42. * Alan Cox : Network driver sets packet type before
  43. * calling netif_rx. Saves a function
  44. * call a packet.
  45. * Alan Cox : Hashed net_bh()
  46. * Richard Kooijman: Timestamp fixes.
  47. * Alan Cox : Wrong field in SIOCGIFDSTADDR
  48. * Alan Cox : Device lock protection.
  49. * Alan Cox : Fixed nasty side effect of device close
  50. * changes.
  51. * Rudi Cilibrasi : Pass the right thing to
  52. * set_mac_address()
  53. * Dave Miller : 32bit quantity for the device lock to
  54. * make it work out on a Sparc.
  55. * Bjorn Ekwall : Added KERNELD hack.
  56. * Alan Cox : Cleaned up the backlog initialise.
  57. * Craig Metz : SIOCGIFCONF fix if space for under
  58. * 1 device.
  59. * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
  60. * is no device open function.
  61. * Andi Kleen : Fix error reporting for SIOCGIFCONF
  62. * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
  63. * Cyrus Durgin : Cleaned for KMOD
  64. * Adam Sulmicki : Bug Fix : Network Device Unload
  65. * A network device unload needs to purge
  66. * the backlog queue.
  67. * Paul Rusty Russell : SIOCSIFNAME
  68. * Pekka Riikonen : Netdev boot-time settings code
  69. * Andrew Morton : Make unregister_netdevice wait
  70. * indefinitely on dev->refcnt
  71. * J Hadi Salim : - Backlog queue sampling
  72. * - netif_rx() feedback
  73. */
  74. #include <linux/uaccess.h>
  75. #include <linux/bitops.h>
  76. #include <linux/capability.h>
  77. #include <linux/cpu.h>
  78. #include <linux/types.h>
  79. #include <linux/kernel.h>
  80. #include <linux/hash.h>
  81. #include <linux/slab.h>
  82. #include <linux/sched.h>
  83. #include <linux/sched/mm.h>
  84. #include <linux/mutex.h>
  85. #include <linux/string.h>
  86. #include <linux/mm.h>
  87. #include <linux/socket.h>
  88. #include <linux/sockios.h>
  89. #include <linux/errno.h>
  90. #include <linux/interrupt.h>
  91. #include <linux/if_ether.h>
  92. #include <linux/netdevice.h>
  93. #include <linux/etherdevice.h>
  94. #include <linux/ethtool.h>
  95. #include <linux/notifier.h>
  96. #include <linux/skbuff.h>
  97. #include <linux/bpf.h>
  98. #include <linux/bpf_trace.h>
  99. #include <net/net_namespace.h>
  100. #include <net/sock.h>
  101. #include <net/busy_poll.h>
  102. #include <linux/rtnetlink.h>
  103. #include <linux/stat.h>
  104. #include <net/dst.h>
  105. #include <net/dst_metadata.h>
  106. #include <net/pkt_sched.h>
  107. #include <net/pkt_cls.h>
  108. #include <net/checksum.h>
  109. #include <net/xfrm.h>
  110. #include <linux/highmem.h>
  111. #include <linux/init.h>
  112. #include <linux/module.h>
  113. #include <linux/netpoll.h>
  114. #include <linux/rcupdate.h>
  115. #include <linux/delay.h>
  116. #include <net/iw_handler.h>
  117. #include <asm/current.h>
  118. #include <linux/audit.h>
  119. #include <linux/dmaengine.h>
  120. #include <linux/err.h>
  121. #include <linux/ctype.h>
  122. #include <linux/if_arp.h>
  123. #include <linux/if_vlan.h>
  124. #include <linux/ip.h>
  125. #include <net/ip.h>
  126. #include <net/mpls.h>
  127. #include <linux/ipv6.h>
  128. #include <linux/in.h>
  129. #include <linux/jhash.h>
  130. #include <linux/random.h>
  131. #include <trace/events/napi.h>
  132. #include <trace/events/net.h>
  133. #include <trace/events/skb.h>
  134. #include <linux/pci.h>
  135. #include <linux/inetdevice.h>
  136. #include <linux/cpu_rmap.h>
  137. #include <linux/static_key.h>
  138. #include <linux/hashtable.h>
  139. #include <linux/vmalloc.h>
  140. #include <linux/if_macvlan.h>
  141. #include <linux/errqueue.h>
  142. #include <linux/hrtimer.h>
  143. #include <linux/netfilter_ingress.h>
  144. #include <linux/crash_dump.h>
  145. #include <linux/sctp.h>
  146. #include <net/udp_tunnel.h>
  147. #include <linux/net_namespace.h>
  148. #include "net-sysfs.h"
  149. #define MAX_GRO_SKBS 8
  150. /* This should be increased if a protocol with a bigger head is added. */
  151. #define GRO_MAX_HEAD (MAX_HEADER + 128)
  152. static DEFINE_SPINLOCK(ptype_lock);
  153. static DEFINE_SPINLOCK(offload_lock);
  154. struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
  155. struct list_head ptype_all __read_mostly; /* Taps */
  156. static struct list_head offload_base __read_mostly;
  157. static int netif_rx_internal(struct sk_buff *skb);
  158. static int call_netdevice_notifiers_info(unsigned long val,
  159. struct netdev_notifier_info *info);
  160. static struct napi_struct *napi_by_id(unsigned int napi_id);
  161. /*
  162. * The @dev_base_head list is protected by @dev_base_lock and the rtnl
  163. * semaphore.
  164. *
  165. * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
  166. *
  167. * Writers must hold the rtnl semaphore while they loop through the
  168. * dev_base_head list, and hold dev_base_lock for writing when they do the
  169. * actual updates. This allows pure readers to access the list even
  170. * while a writer is preparing to update it.
  171. *
  172. * To put it another way, dev_base_lock is held for writing only to
  173. * protect against pure readers; the rtnl semaphore provides the
  174. * protection against other writers.
  175. *
  176. * See, for example usages, register_netdevice() and
  177. * unregister_netdevice(), which must be called with the rtnl
  178. * semaphore held.
  179. */
  180. DEFINE_RWLOCK(dev_base_lock);
  181. EXPORT_SYMBOL(dev_base_lock);
  182. static DEFINE_MUTEX(ifalias_mutex);
  183. /* protects napi_hash addition/deletion and napi_gen_id */
  184. static DEFINE_SPINLOCK(napi_hash_lock);
  185. static unsigned int napi_gen_id = NR_CPUS;
  186. static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
  187. static seqcount_t devnet_rename_seq;
  188. static inline void dev_base_seq_inc(struct net *net)
  189. {
  190. while (++net->dev_base_seq == 0)
  191. ;
  192. }
  193. static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
  194. {
  195. unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ));
  196. return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
  197. }
  198. static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
  199. {
  200. return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
  201. }
  202. static inline void rps_lock(struct softnet_data *sd)
  203. {
  204. #ifdef CONFIG_RPS
  205. spin_lock(&sd->input_pkt_queue.lock);
  206. #endif
  207. }
  208. static inline void rps_unlock(struct softnet_data *sd)
  209. {
  210. #ifdef CONFIG_RPS
  211. spin_unlock(&sd->input_pkt_queue.lock);
  212. #endif
  213. }
  214. /* Device list insertion */
  215. static void list_netdevice(struct net_device *dev)
  216. {
  217. struct net *net = dev_net(dev);
  218. ASSERT_RTNL();
  219. write_lock_bh(&dev_base_lock);
  220. list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
  221. hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
  222. hlist_add_head_rcu(&dev->index_hlist,
  223. dev_index_hash(net, dev->ifindex));
  224. write_unlock_bh(&dev_base_lock);
  225. dev_base_seq_inc(net);
  226. }
  227. /* Device list removal
  228. * caller must respect a RCU grace period before freeing/reusing dev
  229. */
  230. static void unlist_netdevice(struct net_device *dev)
  231. {
  232. ASSERT_RTNL();
  233. /* Unlink dev from the device chain */
  234. write_lock_bh(&dev_base_lock);
  235. list_del_rcu(&dev->dev_list);
  236. hlist_del_rcu(&dev->name_hlist);
  237. hlist_del_rcu(&dev->index_hlist);
  238. write_unlock_bh(&dev_base_lock);
  239. dev_base_seq_inc(dev_net(dev));
  240. }
  241. /*
  242. * Our notifier list
  243. */
  244. static RAW_NOTIFIER_HEAD(netdev_chain);
  245. /*
  246. * Device drivers call our routines to queue packets here. We empty the
  247. * queue in the local softnet handler.
  248. */
  249. DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
  250. EXPORT_PER_CPU_SYMBOL(softnet_data);
  251. #ifdef CONFIG_LOCKDEP
  252. /*
  253. * register_netdevice() inits txq->_xmit_lock and sets lockdep class
  254. * according to dev->type
  255. */
  256. static const unsigned short netdev_lock_type[] = {
  257. ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
  258. ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
  259. ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
  260. ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
  261. ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
  262. ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
  263. ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
  264. ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
  265. ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
  266. ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
  267. ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
  268. ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
  269. ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
  270. ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
  271. ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
  272. static const char *const netdev_lock_name[] = {
  273. "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
  274. "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
  275. "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
  276. "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
  277. "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
  278. "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
  279. "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
  280. "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
  281. "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
  282. "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
  283. "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
  284. "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
  285. "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
  286. "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
  287. "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
  288. static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
  289. static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
  290. static inline unsigned short netdev_lock_pos(unsigned short dev_type)
  291. {
  292. int i;
  293. for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
  294. if (netdev_lock_type[i] == dev_type)
  295. return i;
  296. /* the last key is used by default */
  297. return ARRAY_SIZE(netdev_lock_type) - 1;
  298. }
  299. static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
  300. unsigned short dev_type)
  301. {
  302. int i;
  303. i = netdev_lock_pos(dev_type);
  304. lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
  305. netdev_lock_name[i]);
  306. }
  307. static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
  308. {
  309. int i;
  310. i = netdev_lock_pos(dev->type);
  311. lockdep_set_class_and_name(&dev->addr_list_lock,
  312. &netdev_addr_lock_key[i],
  313. netdev_lock_name[i]);
  314. }
  315. #else
  316. static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
  317. unsigned short dev_type)
  318. {
  319. }
  320. static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
  321. {
  322. }
  323. #endif
  324. /*******************************************************************************
  325. *
  326. * Protocol management and registration routines
  327. *
  328. *******************************************************************************/
  329. /*
  330. * Add a protocol ID to the list. Now that the input handler is
  331. * smarter we can dispense with all the messy stuff that used to be
  332. * here.
  333. *
  334. * BEWARE!!! Protocol handlers, mangling input packets,
  335. * MUST BE last in hash buckets and checking protocol handlers
  336. * MUST start from promiscuous ptype_all chain in net_bh.
  337. * It is true now, do not change it.
  338. * Explanation follows: if protocol handler, mangling packet, will
  339. * be the first on list, it is not able to sense, that packet
  340. * is cloned and should be copied-on-write, so that it will
  341. * change it and subsequent readers will get broken packet.
  342. * --ANK (980803)
  343. */
  344. static inline struct list_head *ptype_head(const struct packet_type *pt)
  345. {
  346. if (pt->type == htons(ETH_P_ALL))
  347. return pt->dev ? &pt->dev->ptype_all : &ptype_all;
  348. else
  349. return pt->dev ? &pt->dev->ptype_specific :
  350. &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
  351. }
  352. /**
  353. * dev_add_pack - add packet handler
  354. * @pt: packet type declaration
  355. *
  356. * Add a protocol handler to the networking stack. The passed &packet_type
  357. * is linked into kernel lists and may not be freed until it has been
  358. * removed from the kernel lists.
  359. *
  360. * This call does not sleep therefore it can not
  361. * guarantee all CPU's that are in middle of receiving packets
  362. * will see the new packet type (until the next received packet).
  363. */
  364. void dev_add_pack(struct packet_type *pt)
  365. {
  366. struct list_head *head = ptype_head(pt);
  367. spin_lock(&ptype_lock);
  368. list_add_rcu(&pt->list, head);
  369. spin_unlock(&ptype_lock);
  370. }
  371. EXPORT_SYMBOL(dev_add_pack);
  372. /**
  373. * __dev_remove_pack - remove packet handler
  374. * @pt: packet type declaration
  375. *
  376. * Remove a protocol handler that was previously added to the kernel
  377. * protocol handlers by dev_add_pack(). The passed &packet_type is removed
  378. * from the kernel lists and can be freed or reused once this function
  379. * returns.
  380. *
  381. * The packet type might still be in use by receivers
  382. * and must not be freed until after all the CPU's have gone
  383. * through a quiescent state.
  384. */
  385. void __dev_remove_pack(struct packet_type *pt)
  386. {
  387. struct list_head *head = ptype_head(pt);
  388. struct packet_type *pt1;
  389. spin_lock(&ptype_lock);
  390. list_for_each_entry(pt1, head, list) {
  391. if (pt == pt1) {
  392. list_del_rcu(&pt->list);
  393. goto out;
  394. }
  395. }
  396. pr_warn("dev_remove_pack: %p not found\n", pt);
  397. out:
  398. spin_unlock(&ptype_lock);
  399. }
  400. EXPORT_SYMBOL(__dev_remove_pack);
  401. /**
  402. * dev_remove_pack - remove packet handler
  403. * @pt: packet type declaration
  404. *
  405. * Remove a protocol handler that was previously added to the kernel
  406. * protocol handlers by dev_add_pack(). The passed &packet_type is removed
  407. * from the kernel lists and can be freed or reused once this function
  408. * returns.
  409. *
  410. * This call sleeps to guarantee that no CPU is looking at the packet
  411. * type after return.
  412. */
  413. void dev_remove_pack(struct packet_type *pt)
  414. {
  415. __dev_remove_pack(pt);
  416. synchronize_net();
  417. }
  418. EXPORT_SYMBOL(dev_remove_pack);
  419. /**
  420. * dev_add_offload - register offload handlers
  421. * @po: protocol offload declaration
  422. *
  423. * Add protocol offload handlers to the networking stack. The passed
  424. * &proto_offload is linked into kernel lists and may not be freed until
  425. * it has been removed from the kernel lists.
  426. *
  427. * This call does not sleep therefore it can not
  428. * guarantee all CPU's that are in middle of receiving packets
  429. * will see the new offload handlers (until the next received packet).
  430. */
  431. void dev_add_offload(struct packet_offload *po)
  432. {
  433. struct packet_offload *elem;
  434. spin_lock(&offload_lock);
  435. list_for_each_entry(elem, &offload_base, list) {
  436. if (po->priority < elem->priority)
  437. break;
  438. }
  439. list_add_rcu(&po->list, elem->list.prev);
  440. spin_unlock(&offload_lock);
  441. }
  442. EXPORT_SYMBOL(dev_add_offload);
  443. /**
  444. * __dev_remove_offload - remove offload handler
  445. * @po: packet offload declaration
  446. *
  447. * Remove a protocol offload handler that was previously added to the
  448. * kernel offload handlers by dev_add_offload(). The passed &offload_type
  449. * is removed from the kernel lists and can be freed or reused once this
  450. * function returns.
  451. *
  452. * The packet type might still be in use by receivers
  453. * and must not be freed until after all the CPU's have gone
  454. * through a quiescent state.
  455. */
  456. static void __dev_remove_offload(struct packet_offload *po)
  457. {
  458. struct list_head *head = &offload_base;
  459. struct packet_offload *po1;
  460. spin_lock(&offload_lock);
  461. list_for_each_entry(po1, head, list) {
  462. if (po == po1) {
  463. list_del_rcu(&po->list);
  464. goto out;
  465. }
  466. }
  467. pr_warn("dev_remove_offload: %p not found\n", po);
  468. out:
  469. spin_unlock(&offload_lock);
  470. }
  471. /**
  472. * dev_remove_offload - remove packet offload handler
  473. * @po: packet offload declaration
  474. *
  475. * Remove a packet offload handler that was previously added to the kernel
  476. * offload handlers by dev_add_offload(). The passed &offload_type is
  477. * removed from the kernel lists and can be freed or reused once this
  478. * function returns.
  479. *
  480. * This call sleeps to guarantee that no CPU is looking at the packet
  481. * type after return.
  482. */
  483. void dev_remove_offload(struct packet_offload *po)
  484. {
  485. __dev_remove_offload(po);
  486. synchronize_net();
  487. }
  488. EXPORT_SYMBOL(dev_remove_offload);
  489. /******************************************************************************
  490. *
  491. * Device Boot-time Settings Routines
  492. *
  493. ******************************************************************************/
  494. /* Boot time configuration table */
  495. static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
  496. /**
  497. * netdev_boot_setup_add - add new setup entry
  498. * @name: name of the device
  499. * @map: configured settings for the device
  500. *
  501. * Adds new setup entry to the dev_boot_setup list. The function
  502. * returns 0 on error and 1 on success. This is a generic routine to
  503. * all netdevices.
  504. */
  505. static int netdev_boot_setup_add(char *name, struct ifmap *map)
  506. {
  507. struct netdev_boot_setup *s;
  508. int i;
  509. s = dev_boot_setup;
  510. for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
  511. if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
  512. memset(s[i].name, 0, sizeof(s[i].name));
  513. strlcpy(s[i].name, name, IFNAMSIZ);
  514. memcpy(&s[i].map, map, sizeof(s[i].map));
  515. break;
  516. }
  517. }
  518. return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
  519. }
  520. /**
  521. * netdev_boot_setup_check - check boot time settings
  522. * @dev: the netdevice
  523. *
  524. * Check boot time settings for the device.
  525. * The found settings are set for the device to be used
  526. * later in the device probing.
  527. * Returns 0 if no settings found, 1 if they are.
  528. */
  529. int netdev_boot_setup_check(struct net_device *dev)
  530. {
  531. struct netdev_boot_setup *s = dev_boot_setup;
  532. int i;
  533. for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
  534. if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
  535. !strcmp(dev->name, s[i].name)) {
  536. dev->irq = s[i].map.irq;
  537. dev->base_addr = s[i].map.base_addr;
  538. dev->mem_start = s[i].map.mem_start;
  539. dev->mem_end = s[i].map.mem_end;
  540. return 1;
  541. }
  542. }
  543. return 0;
  544. }
  545. EXPORT_SYMBOL(netdev_boot_setup_check);
  546. /**
  547. * netdev_boot_base - get address from boot time settings
  548. * @prefix: prefix for network device
  549. * @unit: id for network device
  550. *
  551. * Check boot time settings for the base address of device.
  552. * The found settings are set for the device to be used
  553. * later in the device probing.
  554. * Returns 0 if no settings found.
  555. */
  556. unsigned long netdev_boot_base(const char *prefix, int unit)
  557. {
  558. const struct netdev_boot_setup *s = dev_boot_setup;
  559. char name[IFNAMSIZ];
  560. int i;
  561. sprintf(name, "%s%d", prefix, unit);
  562. /*
  563. * If device already registered then return base of 1
  564. * to indicate not to probe for this interface
  565. */
  566. if (__dev_get_by_name(&init_net, name))
  567. return 1;
  568. for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
  569. if (!strcmp(name, s[i].name))
  570. return s[i].map.base_addr;
  571. return 0;
  572. }
  573. /*
  574. * Saves at boot time configured settings for any netdevice.
  575. */
  576. int __init netdev_boot_setup(char *str)
  577. {
  578. int ints[5];
  579. struct ifmap map;
  580. str = get_options(str, ARRAY_SIZE(ints), ints);
  581. if (!str || !*str)
  582. return 0;
  583. /* Save settings */
  584. memset(&map, 0, sizeof(map));
  585. if (ints[0] > 0)
  586. map.irq = ints[1];
  587. if (ints[0] > 1)
  588. map.base_addr = ints[2];
  589. if (ints[0] > 2)
  590. map.mem_start = ints[3];
  591. if (ints[0] > 3)
  592. map.mem_end = ints[4];
  593. /* Add new entry to the list */
  594. return netdev_boot_setup_add(str, &map);
  595. }
  596. __setup("netdev=", netdev_boot_setup);
  597. /*******************************************************************************
  598. *
  599. * Device Interface Subroutines
  600. *
  601. *******************************************************************************/
  602. /**
  603. * dev_get_iflink - get 'iflink' value of a interface
  604. * @dev: targeted interface
  605. *
  606. * Indicates the ifindex the interface is linked to.
  607. * Physical interfaces have the same 'ifindex' and 'iflink' values.
  608. */
  609. int dev_get_iflink(const struct net_device *dev)
  610. {
  611. if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
  612. return dev->netdev_ops->ndo_get_iflink(dev);
  613. return dev->ifindex;
  614. }
  615. EXPORT_SYMBOL(dev_get_iflink);
  616. /**
  617. * dev_fill_metadata_dst - Retrieve tunnel egress information.
  618. * @dev: targeted interface
  619. * @skb: The packet.
  620. *
  621. * For better visibility of tunnel traffic OVS needs to retrieve
  622. * egress tunnel information for a packet. Following API allows
  623. * user to get this info.
  624. */
  625. int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
  626. {
  627. struct ip_tunnel_info *info;
  628. if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst)
  629. return -EINVAL;
  630. info = skb_tunnel_info_unclone(skb);
  631. if (!info)
  632. return -ENOMEM;
  633. if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
  634. return -EINVAL;
  635. return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
  636. }
  637. EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
  638. /**
  639. * __dev_get_by_name - find a device by its name
  640. * @net: the applicable net namespace
  641. * @name: name to find
  642. *
  643. * Find an interface by name. Must be called under RTNL semaphore
  644. * or @dev_base_lock. If the name is found a pointer to the device
  645. * is returned. If the name is not found then %NULL is returned. The
  646. * reference counters are not incremented so the caller must be
  647. * careful with locks.
  648. */
  649. struct net_device *__dev_get_by_name(struct net *net, const char *name)
  650. {
  651. struct net_device *dev;
  652. struct hlist_head *head = dev_name_hash(net, name);
  653. hlist_for_each_entry(dev, head, name_hlist)
  654. if (!strncmp(dev->name, name, IFNAMSIZ))
  655. return dev;
  656. return NULL;
  657. }
  658. EXPORT_SYMBOL(__dev_get_by_name);
  659. /**
  660. * dev_get_by_name_rcu - find a device by its name
  661. * @net: the applicable net namespace
  662. * @name: name to find
  663. *
  664. * Find an interface by name.
  665. * If the name is found a pointer to the device is returned.
  666. * If the name is not found then %NULL is returned.
  667. * The reference counters are not incremented so the caller must be
  668. * careful with locks. The caller must hold RCU lock.
  669. */
  670. struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
  671. {
  672. struct net_device *dev;
  673. struct hlist_head *head = dev_name_hash(net, name);
  674. hlist_for_each_entry_rcu(dev, head, name_hlist)
  675. if (!strncmp(dev->name, name, IFNAMSIZ))
  676. return dev;
  677. return NULL;
  678. }
  679. EXPORT_SYMBOL(dev_get_by_name_rcu);
  680. /**
  681. * dev_get_by_name - find a device by its name
  682. * @net: the applicable net namespace
  683. * @name: name to find
  684. *
  685. * Find an interface by name. This can be called from any
  686. * context and does its own locking. The returned handle has
  687. * the usage count incremented and the caller must use dev_put() to
  688. * release it when it is no longer needed. %NULL is returned if no
  689. * matching device is found.
  690. */
  691. struct net_device *dev_get_by_name(struct net *net, const char *name)
  692. {
  693. struct net_device *dev;
  694. rcu_read_lock();
  695. dev = dev_get_by_name_rcu(net, name);
  696. if (dev)
  697. dev_hold(dev);
  698. rcu_read_unlock();
  699. return dev;
  700. }
  701. EXPORT_SYMBOL(dev_get_by_name);
  702. /**
  703. * __dev_get_by_index - find a device by its ifindex
  704. * @net: the applicable net namespace
  705. * @ifindex: index of device
  706. *
  707. * Search for an interface by index. Returns %NULL if the device
  708. * is not found or a pointer to the device. The device has not
  709. * had its reference counter increased so the caller must be careful
  710. * about locking. The caller must hold either the RTNL semaphore
  711. * or @dev_base_lock.
  712. */
  713. struct net_device *__dev_get_by_index(struct net *net, int ifindex)
  714. {
  715. struct net_device *dev;
  716. struct hlist_head *head = dev_index_hash(net, ifindex);
  717. hlist_for_each_entry(dev, head, index_hlist)
  718. if (dev->ifindex == ifindex)
  719. return dev;
  720. return NULL;
  721. }
  722. EXPORT_SYMBOL(__dev_get_by_index);
  723. /**
  724. * dev_get_by_index_rcu - find a device by its ifindex
  725. * @net: the applicable net namespace
  726. * @ifindex: index of device
  727. *
  728. * Search for an interface by index. Returns %NULL if the device
  729. * is not found or a pointer to the device. The device has not
  730. * had its reference counter increased so the caller must be careful
  731. * about locking. The caller must hold RCU lock.
  732. */
  733. struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
  734. {
  735. struct net_device *dev;
  736. struct hlist_head *head = dev_index_hash(net, ifindex);
  737. hlist_for_each_entry_rcu(dev, head, index_hlist)
  738. if (dev->ifindex == ifindex)
  739. return dev;
  740. return NULL;
  741. }
  742. EXPORT_SYMBOL(dev_get_by_index_rcu);
  743. /**
  744. * dev_get_by_index - find a device by its ifindex
  745. * @net: the applicable net namespace
  746. * @ifindex: index of device
  747. *
  748. * Search for an interface by index. Returns NULL if the device
  749. * is not found or a pointer to the device. The device returned has
  750. * had a reference added and the pointer is safe until the user calls
  751. * dev_put to indicate they have finished with it.
  752. */
  753. struct net_device *dev_get_by_index(struct net *net, int ifindex)
  754. {
  755. struct net_device *dev;
  756. rcu_read_lock();
  757. dev = dev_get_by_index_rcu(net, ifindex);
  758. if (dev)
  759. dev_hold(dev);
  760. rcu_read_unlock();
  761. return dev;
  762. }
  763. EXPORT_SYMBOL(dev_get_by_index);
  764. /**
  765. * dev_get_by_napi_id - find a device by napi_id
  766. * @napi_id: ID of the NAPI struct
  767. *
  768. * Search for an interface by NAPI ID. Returns %NULL if the device
  769. * is not found or a pointer to the device. The device has not had
  770. * its reference counter increased so the caller must be careful
  771. * about locking. The caller must hold RCU lock.
  772. */
  773. struct net_device *dev_get_by_napi_id(unsigned int napi_id)
  774. {
  775. struct napi_struct *napi;
  776. WARN_ON_ONCE(!rcu_read_lock_held());
  777. if (napi_id < MIN_NAPI_ID)
  778. return NULL;
  779. napi = napi_by_id(napi_id);
  780. return napi ? napi->dev : NULL;
  781. }
  782. EXPORT_SYMBOL(dev_get_by_napi_id);
  783. /**
  784. * netdev_get_name - get a netdevice name, knowing its ifindex.
  785. * @net: network namespace
  786. * @name: a pointer to the buffer where the name will be stored.
  787. * @ifindex: the ifindex of the interface to get the name from.
  788. *
  789. * The use of raw_seqcount_begin() and cond_resched() before
  790. * retrying is required as we want to give the writers a chance
  791. * to complete when CONFIG_PREEMPT is not set.
  792. */
  793. int netdev_get_name(struct net *net, char *name, int ifindex)
  794. {
  795. struct net_device *dev;
  796. unsigned int seq;
  797. retry:
  798. seq = raw_seqcount_begin(&devnet_rename_seq);
  799. rcu_read_lock();
  800. dev = dev_get_by_index_rcu(net, ifindex);
  801. if (!dev) {
  802. rcu_read_unlock();
  803. return -ENODEV;
  804. }
  805. strcpy(name, dev->name);
  806. rcu_read_unlock();
  807. if (read_seqcount_retry(&devnet_rename_seq, seq)) {
  808. cond_resched();
  809. goto retry;
  810. }
  811. return 0;
  812. }
  813. /**
  814. * dev_getbyhwaddr_rcu - find a device by its hardware address
  815. * @net: the applicable net namespace
  816. * @type: media type of device
  817. * @ha: hardware address
  818. *
  819. * Search for an interface by MAC address. Returns NULL if the device
  820. * is not found or a pointer to the device.
  821. * The caller must hold RCU or RTNL.
  822. * The returned device has not had its ref count increased
  823. * and the caller must therefore be careful about locking
  824. *
  825. */
  826. struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
  827. const char *ha)
  828. {
  829. struct net_device *dev;
  830. for_each_netdev_rcu(net, dev)
  831. if (dev->type == type &&
  832. !memcmp(dev->dev_addr, ha, dev->addr_len))
  833. return dev;
  834. return NULL;
  835. }
  836. EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
  837. struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
  838. {
  839. struct net_device *dev;
  840. ASSERT_RTNL();
  841. for_each_netdev(net, dev)
  842. if (dev->type == type)
  843. return dev;
  844. return NULL;
  845. }
  846. EXPORT_SYMBOL(__dev_getfirstbyhwtype);
  847. struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
  848. {
  849. struct net_device *dev, *ret = NULL;
  850. rcu_read_lock();
  851. for_each_netdev_rcu(net, dev)
  852. if (dev->type == type) {
  853. dev_hold(dev);
  854. ret = dev;
  855. break;
  856. }
  857. rcu_read_unlock();
  858. return ret;
  859. }
  860. EXPORT_SYMBOL(dev_getfirstbyhwtype);
  861. /**
  862. * __dev_get_by_flags - find any device with given flags
  863. * @net: the applicable net namespace
  864. * @if_flags: IFF_* values
  865. * @mask: bitmask of bits in if_flags to check
  866. *
  867. * Search for any interface with the given flags. Returns NULL if a device
  868. * is not found or a pointer to the device. Must be called inside
  869. * rtnl_lock(), and result refcount is unchanged.
  870. */
  871. struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
  872. unsigned short mask)
  873. {
  874. struct net_device *dev, *ret;
  875. ASSERT_RTNL();
  876. ret = NULL;
  877. for_each_netdev(net, dev) {
  878. if (((dev->flags ^ if_flags) & mask) == 0) {
  879. ret = dev;
  880. break;
  881. }
  882. }
  883. return ret;
  884. }
  885. EXPORT_SYMBOL(__dev_get_by_flags);
  886. /**
  887. * dev_valid_name - check if name is okay for network device
  888. * @name: name string
  889. *
  890. * Network device names need to be valid file names to
  891. * to allow sysfs to work. We also disallow any kind of
  892. * whitespace.
  893. */
  894. bool dev_valid_name(const char *name)
  895. {
  896. if (*name == '\0')
  897. return false;
  898. if (strnlen(name, IFNAMSIZ) == IFNAMSIZ)
  899. return false;
  900. if (!strcmp(name, ".") || !strcmp(name, ".."))
  901. return false;
  902. while (*name) {
  903. if (*name == '/' || *name == ':' || isspace(*name))
  904. return false;
  905. name++;
  906. }
  907. return true;
  908. }
  909. EXPORT_SYMBOL(dev_valid_name);
  910. /**
  911. * __dev_alloc_name - allocate a name for a device
  912. * @net: network namespace to allocate the device name in
  913. * @name: name format string
  914. * @buf: scratch buffer and result name string
  915. *
  916. * Passed a format string - eg "lt%d" it will try and find a suitable
  917. * id. It scans list of devices to build up a free map, then chooses
  918. * the first empty slot. The caller must hold the dev_base or rtnl lock
  919. * while allocating the name and adding the device in order to avoid
  920. * duplicates.
  921. * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
  922. * Returns the number of the unit assigned or a negative errno code.
  923. */
  924. static int __dev_alloc_name(struct net *net, const char *name, char *buf)
  925. {
  926. int i = 0;
  927. const char *p;
  928. const int max_netdevices = 8*PAGE_SIZE;
  929. unsigned long *inuse;
  930. struct net_device *d;
  931. if (!dev_valid_name(name))
  932. return -EINVAL;
  933. p = strchr(name, '%');
  934. if (p) {
  935. /*
  936. * Verify the string as this thing may have come from
  937. * the user. There must be either one "%d" and no other "%"
  938. * characters.
  939. */
  940. if (p[1] != 'd' || strchr(p + 2, '%'))
  941. return -EINVAL;
  942. /* Use one page as a bit array of possible slots */
  943. inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
  944. if (!inuse)
  945. return -ENOMEM;
  946. for_each_netdev(net, d) {
  947. if (!sscanf(d->name, name, &i))
  948. continue;
  949. if (i < 0 || i >= max_netdevices)
  950. continue;
  951. /* avoid cases where sscanf is not exact inverse of printf */
  952. snprintf(buf, IFNAMSIZ, name, i);
  953. if (!strncmp(buf, d->name, IFNAMSIZ))
  954. set_bit(i, inuse);
  955. }
  956. i = find_first_zero_bit(inuse, max_netdevices);
  957. free_page((unsigned long) inuse);
  958. }
  959. snprintf(buf, IFNAMSIZ, name, i);
  960. if (!__dev_get_by_name(net, buf))
  961. return i;
  962. /* It is possible to run out of possible slots
  963. * when the name is long and there isn't enough space left
  964. * for the digits, or if all bits are used.
  965. */
  966. return -ENFILE;
  967. }
  968. static int dev_alloc_name_ns(struct net *net,
  969. struct net_device *dev,
  970. const char *name)
  971. {
  972. char buf[IFNAMSIZ];
  973. int ret;
  974. BUG_ON(!net);
  975. ret = __dev_alloc_name(net, name, buf);
  976. if (ret >= 0)
  977. strlcpy(dev->name, buf, IFNAMSIZ);
  978. return ret;
  979. }
  980. /**
  981. * dev_alloc_name - allocate a name for a device
  982. * @dev: device
  983. * @name: name format string
  984. *
  985. * Passed a format string - eg "lt%d" it will try and find a suitable
  986. * id. It scans list of devices to build up a free map, then chooses
  987. * the first empty slot. The caller must hold the dev_base or rtnl lock
  988. * while allocating the name and adding the device in order to avoid
  989. * duplicates.
  990. * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
  991. * Returns the number of the unit assigned or a negative errno code.
  992. */
  993. int dev_alloc_name(struct net_device *dev, const char *name)
  994. {
  995. return dev_alloc_name_ns(dev_net(dev), dev, name);
  996. }
  997. EXPORT_SYMBOL(dev_alloc_name);
  998. int dev_get_valid_name(struct net *net, struct net_device *dev,
  999. const char *name)
  1000. {
  1001. BUG_ON(!net);
  1002. if (!dev_valid_name(name))
  1003. return -EINVAL;
  1004. if (strchr(name, '%'))
  1005. return dev_alloc_name_ns(net, dev, name);
  1006. else if (__dev_get_by_name(net, name))
  1007. return -EEXIST;
  1008. else if (dev->name != name)
  1009. strlcpy(dev->name, name, IFNAMSIZ);
  1010. return 0;
  1011. }
  1012. EXPORT_SYMBOL(dev_get_valid_name);
  1013. /**
  1014. * dev_change_name - change name of a device
  1015. * @dev: device
  1016. * @newname: name (or format string) must be at least IFNAMSIZ
  1017. *
  1018. * Change name of a device, can pass format strings "eth%d".
  1019. * for wildcarding.
  1020. */
  1021. int dev_change_name(struct net_device *dev, const char *newname)
  1022. {
  1023. unsigned char old_assign_type;
  1024. char oldname[IFNAMSIZ];
  1025. int err = 0;
  1026. int ret;
  1027. struct net *net;
  1028. ASSERT_RTNL();
  1029. BUG_ON(!dev_net(dev));
  1030. net = dev_net(dev);
  1031. if (dev->flags & IFF_UP)
  1032. return -EBUSY;
  1033. write_seqcount_begin(&devnet_rename_seq);
  1034. if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
  1035. write_seqcount_end(&devnet_rename_seq);
  1036. return 0;
  1037. }
  1038. memcpy(oldname, dev->name, IFNAMSIZ);
  1039. err = dev_get_valid_name(net, dev, newname);
  1040. if (err < 0) {
  1041. write_seqcount_end(&devnet_rename_seq);
  1042. return err;
  1043. }
  1044. if (oldname[0] && !strchr(oldname, '%'))
  1045. netdev_info(dev, "renamed from %s\n", oldname);
  1046. old_assign_type = dev->name_assign_type;
  1047. dev->name_assign_type = NET_NAME_RENAMED;
  1048. rollback:
  1049. ret = device_rename(&dev->dev, dev->name);
  1050. if (ret) {
  1051. memcpy(dev->name, oldname, IFNAMSIZ);
  1052. dev->name_assign_type = old_assign_type;
  1053. write_seqcount_end(&devnet_rename_seq);
  1054. return ret;
  1055. }
  1056. write_seqcount_end(&devnet_rename_seq);
  1057. netdev_adjacent_rename_links(dev, oldname);
  1058. write_lock_bh(&dev_base_lock);
  1059. hlist_del_rcu(&dev->name_hlist);
  1060. write_unlock_bh(&dev_base_lock);
  1061. synchronize_rcu();
  1062. write_lock_bh(&dev_base_lock);
  1063. hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
  1064. write_unlock_bh(&dev_base_lock);
  1065. ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
  1066. ret = notifier_to_errno(ret);
  1067. if (ret) {
  1068. /* err >= 0 after dev_alloc_name() or stores the first errno */
  1069. if (err >= 0) {
  1070. err = ret;
  1071. write_seqcount_begin(&devnet_rename_seq);
  1072. memcpy(dev->name, oldname, IFNAMSIZ);
  1073. memcpy(oldname, newname, IFNAMSIZ);
  1074. dev->name_assign_type = old_assign_type;
  1075. old_assign_type = NET_NAME_RENAMED;
  1076. goto rollback;
  1077. } else {
  1078. pr_err("%s: name change rollback failed: %d\n",
  1079. dev->name, ret);
  1080. }
  1081. }
  1082. return err;
  1083. }
  1084. /**
  1085. * dev_set_alias - change ifalias of a device
  1086. * @dev: device
  1087. * @alias: name up to IFALIASZ
  1088. * @len: limit of bytes to copy from info
  1089. *
  1090. * Set ifalias for a device,
  1091. */
  1092. int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
  1093. {
  1094. struct dev_ifalias *new_alias = NULL;
  1095. if (len >= IFALIASZ)
  1096. return -EINVAL;
  1097. if (len) {
  1098. new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL);
  1099. if (!new_alias)
  1100. return -ENOMEM;
  1101. memcpy(new_alias->ifalias, alias, len);
  1102. new_alias->ifalias[len] = 0;
  1103. }
  1104. mutex_lock(&ifalias_mutex);
  1105. rcu_swap_protected(dev->ifalias, new_alias,
  1106. mutex_is_locked(&ifalias_mutex));
  1107. mutex_unlock(&ifalias_mutex);
  1108. if (new_alias)
  1109. kfree_rcu(new_alias, rcuhead);
  1110. return len;
  1111. }
  1112. EXPORT_SYMBOL(dev_set_alias);
  1113. /**
  1114. * dev_get_alias - get ifalias of a device
  1115. * @dev: device
  1116. * @name: buffer to store name of ifalias
  1117. * @len: size of buffer
  1118. *
  1119. * get ifalias for a device. Caller must make sure dev cannot go
  1120. * away, e.g. rcu read lock or own a reference count to device.
  1121. */
  1122. int dev_get_alias(const struct net_device *dev, char *name, size_t len)
  1123. {
  1124. const struct dev_ifalias *alias;
  1125. int ret = 0;
  1126. rcu_read_lock();
  1127. alias = rcu_dereference(dev->ifalias);
  1128. if (alias)
  1129. ret = snprintf(name, len, "%s", alias->ifalias);
  1130. rcu_read_unlock();
  1131. return ret;
  1132. }
  1133. /**
  1134. * netdev_features_change - device changes features
  1135. * @dev: device to cause notification
  1136. *
  1137. * Called to indicate a device has changed features.
  1138. */
  1139. void netdev_features_change(struct net_device *dev)
  1140. {
  1141. call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
  1142. }
  1143. EXPORT_SYMBOL(netdev_features_change);
  1144. /**
  1145. * netdev_state_change - device changes state
  1146. * @dev: device to cause notification
  1147. *
  1148. * Called to indicate a device has changed state. This function calls
  1149. * the notifier chains for netdev_chain and sends a NEWLINK message
  1150. * to the routing socket.
  1151. */
  1152. void netdev_state_change(struct net_device *dev)
  1153. {
  1154. if (dev->flags & IFF_UP) {
  1155. struct netdev_notifier_change_info change_info = {
  1156. .info.dev = dev,
  1157. };
  1158. call_netdevice_notifiers_info(NETDEV_CHANGE,
  1159. &change_info.info);
  1160. rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
  1161. }
  1162. }
  1163. EXPORT_SYMBOL(netdev_state_change);
  1164. /**
  1165. * netdev_notify_peers - notify network peers about existence of @dev
  1166. * @dev: network device
  1167. *
  1168. * Generate traffic such that interested network peers are aware of
  1169. * @dev, such as by generating a gratuitous ARP. This may be used when
  1170. * a device wants to inform the rest of the network about some sort of
  1171. * reconfiguration such as a failover event or virtual machine
  1172. * migration.
  1173. */
  1174. void netdev_notify_peers(struct net_device *dev)
  1175. {
  1176. rtnl_lock();
  1177. call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
  1178. call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
  1179. rtnl_unlock();
  1180. }
  1181. EXPORT_SYMBOL(netdev_notify_peers);
  1182. static int __dev_open(struct net_device *dev)
  1183. {
  1184. const struct net_device_ops *ops = dev->netdev_ops;
  1185. int ret;
  1186. ASSERT_RTNL();
  1187. if (!netif_device_present(dev))
  1188. return -ENODEV;
  1189. /* Block netpoll from trying to do any rx path servicing.
  1190. * If we don't do this there is a chance ndo_poll_controller
  1191. * or ndo_poll may be running while we open the device
  1192. */
  1193. netpoll_poll_disable(dev);
  1194. ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
  1195. ret = notifier_to_errno(ret);
  1196. if (ret)
  1197. return ret;
  1198. set_bit(__LINK_STATE_START, &dev->state);
  1199. if (ops->ndo_validate_addr)
  1200. ret = ops->ndo_validate_addr(dev);
  1201. if (!ret && ops->ndo_open)
  1202. ret = ops->ndo_open(dev);
  1203. netpoll_poll_enable(dev);
  1204. if (ret)
  1205. clear_bit(__LINK_STATE_START, &dev->state);
  1206. else {
  1207. dev->flags |= IFF_UP;
  1208. dev_set_rx_mode(dev);
  1209. dev_activate(dev);
  1210. add_device_randomness(dev->dev_addr, dev->addr_len);
  1211. }
  1212. return ret;
  1213. }
  1214. /**
  1215. * dev_open - prepare an interface for use.
  1216. * @dev: device to open
  1217. *
  1218. * Takes a device from down to up state. The device's private open
  1219. * function is invoked and then the multicast lists are loaded. Finally
  1220. * the device is moved into the up state and a %NETDEV_UP message is
  1221. * sent to the netdev notifier chain.
  1222. *
  1223. * Calling this function on an active interface is a nop. On a failure
  1224. * a negative errno code is returned.
  1225. */
  1226. int dev_open(struct net_device *dev)
  1227. {
  1228. int ret;
  1229. if (dev->flags & IFF_UP)
  1230. return 0;
  1231. ret = __dev_open(dev);
  1232. if (ret < 0)
  1233. return ret;
  1234. rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
  1235. call_netdevice_notifiers(NETDEV_UP, dev);
  1236. return ret;
  1237. }
  1238. EXPORT_SYMBOL(dev_open);
  1239. static void __dev_close_many(struct list_head *head)
  1240. {
  1241. struct net_device *dev;
  1242. ASSERT_RTNL();
  1243. might_sleep();
  1244. list_for_each_entry(dev, head, close_list) {
  1245. /* Temporarily disable netpoll until the interface is down */
  1246. netpoll_poll_disable(dev);
  1247. call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
  1248. clear_bit(__LINK_STATE_START, &dev->state);
  1249. /* Synchronize to scheduled poll. We cannot touch poll list, it
  1250. * can be even on different cpu. So just clear netif_running().
  1251. *
  1252. * dev->stop() will invoke napi_disable() on all of it's
  1253. * napi_struct instances on this device.
  1254. */
  1255. smp_mb__after_atomic(); /* Commit netif_running(). */
  1256. }
  1257. dev_deactivate_many(head);
  1258. list_for_each_entry(dev, head, close_list) {
  1259. const struct net_device_ops *ops = dev->netdev_ops;
  1260. /*
  1261. * Call the device specific close. This cannot fail.
  1262. * Only if device is UP
  1263. *
  1264. * We allow it to be called even after a DETACH hot-plug
  1265. * event.
  1266. */
  1267. if (ops->ndo_stop)
  1268. ops->ndo_stop(dev);
  1269. dev->flags &= ~IFF_UP;
  1270. netpoll_poll_enable(dev);
  1271. }
  1272. }
  1273. static void __dev_close(struct net_device *dev)
  1274. {
  1275. LIST_HEAD(single);
  1276. list_add(&dev->close_list, &single);
  1277. __dev_close_many(&single);
  1278. list_del(&single);
  1279. }
  1280. void dev_close_many(struct list_head *head, bool unlink)
  1281. {
  1282. struct net_device *dev, *tmp;
  1283. /* Remove the devices that don't need to be closed */
  1284. list_for_each_entry_safe(dev, tmp, head, close_list)
  1285. if (!(dev->flags & IFF_UP))
  1286. list_del_init(&dev->close_list);
  1287. __dev_close_many(head);
  1288. list_for_each_entry_safe(dev, tmp, head, close_list) {
  1289. rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
  1290. call_netdevice_notifiers(NETDEV_DOWN, dev);
  1291. if (unlink)
  1292. list_del_init(&dev->close_list);
  1293. }
  1294. }
  1295. EXPORT_SYMBOL(dev_close_many);
  1296. /**
  1297. * dev_close - shutdown an interface.
  1298. * @dev: device to shutdown
  1299. *
  1300. * This function moves an active device into down state. A
  1301. * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
  1302. * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
  1303. * chain.
  1304. */
  1305. void dev_close(struct net_device *dev)
  1306. {
  1307. if (dev->flags & IFF_UP) {
  1308. LIST_HEAD(single);
  1309. list_add(&dev->close_list, &single);
  1310. dev_close_many(&single, true);
  1311. list_del(&single);
  1312. }
  1313. }
  1314. EXPORT_SYMBOL(dev_close);
  1315. /**
  1316. * dev_disable_lro - disable Large Receive Offload on a device
  1317. * @dev: device
  1318. *
  1319. * Disable Large Receive Offload (LRO) on a net device. Must be
  1320. * called under RTNL. This is needed if received packets may be
  1321. * forwarded to another interface.
  1322. */
  1323. void dev_disable_lro(struct net_device *dev)
  1324. {
  1325. struct net_device *lower_dev;
  1326. struct list_head *iter;
  1327. dev->wanted_features &= ~NETIF_F_LRO;
  1328. netdev_update_features(dev);
  1329. if (unlikely(dev->features & NETIF_F_LRO))
  1330. netdev_WARN(dev, "failed to disable LRO!\n");
  1331. netdev_for_each_lower_dev(dev, lower_dev, iter)
  1332. dev_disable_lro(lower_dev);
  1333. }
  1334. EXPORT_SYMBOL(dev_disable_lro);
  1335. /**
  1336. * dev_disable_gro_hw - disable HW Generic Receive Offload on a device
  1337. * @dev: device
  1338. *
  1339. * Disable HW Generic Receive Offload (GRO_HW) on a net device. Must be
  1340. * called under RTNL. This is needed if Generic XDP is installed on
  1341. * the device.
  1342. */
  1343. static void dev_disable_gro_hw(struct net_device *dev)
  1344. {
  1345. dev->wanted_features &= ~NETIF_F_GRO_HW;
  1346. netdev_update_features(dev);
  1347. if (unlikely(dev->features & NETIF_F_GRO_HW))
  1348. netdev_WARN(dev, "failed to disable GRO_HW!\n");
  1349. }
  1350. const char *netdev_cmd_to_name(enum netdev_cmd cmd)
  1351. {
  1352. #define N(val) \
  1353. case NETDEV_##val: \
  1354. return "NETDEV_" __stringify(val);
  1355. switch (cmd) {
  1356. N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER)
  1357. N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE)
  1358. N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE)
  1359. N(POST_INIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN) N(CHANGEUPPER)
  1360. N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA) N(BONDING_INFO)
  1361. N(PRECHANGEUPPER) N(CHANGELOWERSTATE) N(UDP_TUNNEL_PUSH_INFO)
  1362. N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN)
  1363. N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO)
  1364. N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO)
  1365. }
  1366. #undef N
  1367. return "UNKNOWN_NETDEV_EVENT";
  1368. }
  1369. EXPORT_SYMBOL_GPL(netdev_cmd_to_name);
  1370. static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
  1371. struct net_device *dev)
  1372. {
  1373. struct netdev_notifier_info info = {
  1374. .dev = dev,
  1375. };
  1376. return nb->notifier_call(nb, val, &info);
  1377. }
  1378. static int dev_boot_phase = 1;
  1379. /**
  1380. * register_netdevice_notifier - register a network notifier block
  1381. * @nb: notifier
  1382. *
  1383. * Register a notifier to be called when network device events occur.
  1384. * The notifier passed is linked into the kernel structures and must
  1385. * not be reused until it has been unregistered. A negative errno code
  1386. * is returned on a failure.
  1387. *
  1388. * When registered all registration and up events are replayed
  1389. * to the new notifier to allow device to have a race free
  1390. * view of the network device list.
  1391. */
  1392. int register_netdevice_notifier(struct notifier_block *nb)
  1393. {
  1394. struct net_device *dev;
  1395. struct net_device *last;
  1396. struct net *net;
  1397. int err;
  1398. /* Close race with setup_net() and cleanup_net() */
  1399. down_write(&pernet_ops_rwsem);
  1400. rtnl_lock();
  1401. err = raw_notifier_chain_register(&netdev_chain, nb);
  1402. if (err)
  1403. goto unlock;
  1404. if (dev_boot_phase)
  1405. goto unlock;
  1406. for_each_net(net) {
  1407. for_each_netdev(net, dev) {
  1408. err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
  1409. err = notifier_to_errno(err);
  1410. if (err)
  1411. goto rollback;
  1412. if (!(dev->flags & IFF_UP))
  1413. continue;
  1414. call_netdevice_notifier(nb, NETDEV_UP, dev);
  1415. }
  1416. }
  1417. unlock:
  1418. rtnl_unlock();
  1419. up_write(&pernet_ops_rwsem);
  1420. return err;
  1421. rollback:
  1422. last = dev;
  1423. for_each_net(net) {
  1424. for_each_netdev(net, dev) {
  1425. if (dev == last)
  1426. goto outroll;
  1427. if (dev->flags & IFF_UP) {
  1428. call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
  1429. dev);
  1430. call_netdevice_notifier(nb, NETDEV_DOWN, dev);
  1431. }
  1432. call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
  1433. }
  1434. }
  1435. outroll:
  1436. raw_notifier_chain_unregister(&netdev_chain, nb);
  1437. goto unlock;
  1438. }
  1439. EXPORT_SYMBOL(register_netdevice_notifier);
  1440. /**
  1441. * unregister_netdevice_notifier - unregister a network notifier block
  1442. * @nb: notifier
  1443. *
  1444. * Unregister a notifier previously registered by
  1445. * register_netdevice_notifier(). The notifier is unlinked into the
  1446. * kernel structures and may then be reused. A negative errno code
  1447. * is returned on a failure.
  1448. *
  1449. * After unregistering unregister and down device events are synthesized
  1450. * for all devices on the device list to the removed notifier to remove
  1451. * the need for special case cleanup code.
  1452. */
  1453. int unregister_netdevice_notifier(struct notifier_block *nb)
  1454. {
  1455. struct net_device *dev;
  1456. struct net *net;
  1457. int err;
  1458. /* Close race with setup_net() and cleanup_net() */
  1459. down_write(&pernet_ops_rwsem);
  1460. rtnl_lock();
  1461. err = raw_notifier_chain_unregister(&netdev_chain, nb);
  1462. if (err)
  1463. goto unlock;
  1464. for_each_net(net) {
  1465. for_each_netdev(net, dev) {
  1466. if (dev->flags & IFF_UP) {
  1467. call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
  1468. dev);
  1469. call_netdevice_notifier(nb, NETDEV_DOWN, dev);
  1470. }
  1471. call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
  1472. }
  1473. }
  1474. unlock:
  1475. rtnl_unlock();
  1476. up_write(&pernet_ops_rwsem);
  1477. return err;
  1478. }
  1479. EXPORT_SYMBOL(unregister_netdevice_notifier);
  1480. /**
  1481. * call_netdevice_notifiers_info - call all network notifier blocks
  1482. * @val: value passed unmodified to notifier function
  1483. * @info: notifier information data
  1484. *
  1485. * Call all network notifier blocks. Parameters and return value
  1486. * are as for raw_notifier_call_chain().
  1487. */
  1488. static int call_netdevice_notifiers_info(unsigned long val,
  1489. struct netdev_notifier_info *info)
  1490. {
  1491. ASSERT_RTNL();
  1492. return raw_notifier_call_chain(&netdev_chain, val, info);
  1493. }
  1494. /**
  1495. * call_netdevice_notifiers - call all network notifier blocks
  1496. * @val: value passed unmodified to notifier function
  1497. * @dev: net_device pointer passed unmodified to notifier function
  1498. *
  1499. * Call all network notifier blocks. Parameters and return value
  1500. * are as for raw_notifier_call_chain().
  1501. */
  1502. int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
  1503. {
  1504. struct netdev_notifier_info info = {
  1505. .dev = dev,
  1506. };
  1507. return call_netdevice_notifiers_info(val, &info);
  1508. }
  1509. EXPORT_SYMBOL(call_netdevice_notifiers);
  1510. #ifdef CONFIG_NET_INGRESS
  1511. static DEFINE_STATIC_KEY_FALSE(ingress_needed_key);
  1512. void net_inc_ingress_queue(void)
  1513. {
  1514. static_branch_inc(&ingress_needed_key);
  1515. }
  1516. EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
  1517. void net_dec_ingress_queue(void)
  1518. {
  1519. static_branch_dec(&ingress_needed_key);
  1520. }
  1521. EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
  1522. #endif
  1523. #ifdef CONFIG_NET_EGRESS
  1524. static DEFINE_STATIC_KEY_FALSE(egress_needed_key);
  1525. void net_inc_egress_queue(void)
  1526. {
  1527. static_branch_inc(&egress_needed_key);
  1528. }
  1529. EXPORT_SYMBOL_GPL(net_inc_egress_queue);
  1530. void net_dec_egress_queue(void)
  1531. {
  1532. static_branch_dec(&egress_needed_key);
  1533. }
  1534. EXPORT_SYMBOL_GPL(net_dec_egress_queue);
  1535. #endif
  1536. static DEFINE_STATIC_KEY_FALSE(netstamp_needed_key);
  1537. #ifdef HAVE_JUMP_LABEL
  1538. static atomic_t netstamp_needed_deferred;
  1539. static atomic_t netstamp_wanted;
  1540. static void netstamp_clear(struct work_struct *work)
  1541. {
  1542. int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
  1543. int wanted;
  1544. wanted = atomic_add_return(deferred, &netstamp_wanted);
  1545. if (wanted > 0)
  1546. static_branch_enable(&netstamp_needed_key);
  1547. else
  1548. static_branch_disable(&netstamp_needed_key);
  1549. }
  1550. static DECLARE_WORK(netstamp_work, netstamp_clear);
  1551. #endif
  1552. void net_enable_timestamp(void)
  1553. {
  1554. #ifdef HAVE_JUMP_LABEL
  1555. int wanted;
  1556. while (1) {
  1557. wanted = atomic_read(&netstamp_wanted);
  1558. if (wanted <= 0)
  1559. break;
  1560. if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted)
  1561. return;
  1562. }
  1563. atomic_inc(&netstamp_needed_deferred);
  1564. schedule_work(&netstamp_work);
  1565. #else
  1566. static_branch_inc(&netstamp_needed_key);
  1567. #endif
  1568. }
  1569. EXPORT_SYMBOL(net_enable_timestamp);
  1570. void net_disable_timestamp(void)
  1571. {
  1572. #ifdef HAVE_JUMP_LABEL
  1573. int wanted;
  1574. while (1) {
  1575. wanted = atomic_read(&netstamp_wanted);
  1576. if (wanted <= 1)
  1577. break;
  1578. if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted)
  1579. return;
  1580. }
  1581. atomic_dec(&netstamp_needed_deferred);
  1582. schedule_work(&netstamp_work);
  1583. #else
  1584. static_branch_dec(&netstamp_needed_key);
  1585. #endif
  1586. }
  1587. EXPORT_SYMBOL(net_disable_timestamp);
  1588. static inline void net_timestamp_set(struct sk_buff *skb)
  1589. {
  1590. skb->tstamp = 0;
  1591. if (static_branch_unlikely(&netstamp_needed_key))
  1592. __net_timestamp(skb);
  1593. }
  1594. #define net_timestamp_check(COND, SKB) \
  1595. if (static_branch_unlikely(&netstamp_needed_key)) { \
  1596. if ((COND) && !(SKB)->tstamp) \
  1597. __net_timestamp(SKB); \
  1598. } \
  1599. bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
  1600. {
  1601. unsigned int len;
  1602. if (!(dev->flags & IFF_UP))
  1603. return false;
  1604. len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
  1605. if (skb->len <= len)
  1606. return true;
  1607. /* if TSO is enabled, we don't care about the length as the packet
  1608. * could be forwarded without being segmented before
  1609. */
  1610. if (skb_is_gso(skb))
  1611. return true;
  1612. return false;
  1613. }
  1614. EXPORT_SYMBOL_GPL(is_skb_forwardable);
  1615. int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
  1616. {
  1617. int ret = ____dev_forward_skb(dev, skb);
  1618. if (likely(!ret)) {
  1619. skb->protocol = eth_type_trans(skb, dev);
  1620. skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
  1621. }
  1622. return ret;
  1623. }
  1624. EXPORT_SYMBOL_GPL(__dev_forward_skb);
  1625. /**
  1626. * dev_forward_skb - loopback an skb to another netif
  1627. *
  1628. * @dev: destination network device
  1629. * @skb: buffer to forward
  1630. *
  1631. * return values:
  1632. * NET_RX_SUCCESS (no congestion)
  1633. * NET_RX_DROP (packet was dropped, but freed)
  1634. *
  1635. * dev_forward_skb can be used for injecting an skb from the
  1636. * start_xmit function of one device into the receive queue
  1637. * of another device.
  1638. *
  1639. * The receiving device may be in another namespace, so
  1640. * we have to clear all information in the skb that could
  1641. * impact namespace isolation.
  1642. */
  1643. int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
  1644. {
  1645. return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
  1646. }
  1647. EXPORT_SYMBOL_GPL(dev_forward_skb);
  1648. static inline int deliver_skb(struct sk_buff *skb,
  1649. struct packet_type *pt_prev,
  1650. struct net_device *orig_dev)
  1651. {
  1652. if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
  1653. return -ENOMEM;
  1654. refcount_inc(&skb->users);
  1655. return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
  1656. }
  1657. static inline void deliver_ptype_list_skb(struct sk_buff *skb,
  1658. struct packet_type **pt,
  1659. struct net_device *orig_dev,
  1660. __be16 type,
  1661. struct list_head *ptype_list)
  1662. {
  1663. struct packet_type *ptype, *pt_prev = *pt;
  1664. list_for_each_entry_rcu(ptype, ptype_list, list) {
  1665. if (ptype->type != type)
  1666. continue;
  1667. if (pt_prev)
  1668. deliver_skb(skb, pt_prev, orig_dev);
  1669. pt_prev = ptype;
  1670. }
  1671. *pt = pt_prev;
  1672. }
  1673. static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
  1674. {
  1675. if (!ptype->af_packet_priv || !skb->sk)
  1676. return false;
  1677. if (ptype->id_match)
  1678. return ptype->id_match(ptype, skb->sk);
  1679. else if ((struct sock *)ptype->af_packet_priv == skb->sk)
  1680. return true;
  1681. return false;
  1682. }
  1683. /*
  1684. * Support routine. Sends outgoing frames to any network
  1685. * taps currently in use.
  1686. */
  1687. void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
  1688. {
  1689. struct packet_type *ptype;
  1690. struct sk_buff *skb2 = NULL;
  1691. struct packet_type *pt_prev = NULL;
  1692. struct list_head *ptype_list = &ptype_all;
  1693. rcu_read_lock();
  1694. again:
  1695. list_for_each_entry_rcu(ptype, ptype_list, list) {
  1696. /* Never send packets back to the socket
  1697. * they originated from - MvS (miquels@drinkel.ow.org)
  1698. */
  1699. if (skb_loop_sk(ptype, skb))
  1700. continue;
  1701. if (pt_prev) {
  1702. deliver_skb(skb2, pt_prev, skb->dev);
  1703. pt_prev = ptype;
  1704. continue;
  1705. }
  1706. /* need to clone skb, done only once */
  1707. skb2 = skb_clone(skb, GFP_ATOMIC);
  1708. if (!skb2)
  1709. goto out_unlock;
  1710. net_timestamp_set(skb2);
  1711. /* skb->nh should be correctly
  1712. * set by sender, so that the second statement is
  1713. * just protection against buggy protocols.
  1714. */
  1715. skb_reset_mac_header(skb2);
  1716. if (skb_network_header(skb2) < skb2->data ||
  1717. skb_network_header(skb2) > skb_tail_pointer(skb2)) {
  1718. net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
  1719. ntohs(skb2->protocol),
  1720. dev->name);
  1721. skb_reset_network_header(skb2);
  1722. }
  1723. skb2->transport_header = skb2->network_header;
  1724. skb2->pkt_type = PACKET_OUTGOING;
  1725. pt_prev = ptype;
  1726. }
  1727. if (ptype_list == &ptype_all) {
  1728. ptype_list = &dev->ptype_all;
  1729. goto again;
  1730. }
  1731. out_unlock:
  1732. if (pt_prev) {
  1733. if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC))
  1734. pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
  1735. else
  1736. kfree_skb(skb2);
  1737. }
  1738. rcu_read_unlock();
  1739. }
  1740. EXPORT_SYMBOL_GPL(dev_queue_xmit_nit);
  1741. /**
  1742. * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
  1743. * @dev: Network device
  1744. * @txq: number of queues available
  1745. *
  1746. * If real_num_tx_queues is changed the tc mappings may no longer be
  1747. * valid. To resolve this verify the tc mapping remains valid and if
  1748. * not NULL the mapping. With no priorities mapping to this
  1749. * offset/count pair it will no longer be used. In the worst case TC0
  1750. * is invalid nothing can be done so disable priority mappings. If is
  1751. * expected that drivers will fix this mapping if they can before
  1752. * calling netif_set_real_num_tx_queues.
  1753. */
  1754. static void netif_setup_tc(struct net_device *dev, unsigned int txq)
  1755. {
  1756. int i;
  1757. struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
  1758. /* If TC0 is invalidated disable TC mapping */
  1759. if (tc->offset + tc->count > txq) {
  1760. pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
  1761. dev->num_tc = 0;
  1762. return;
  1763. }
  1764. /* Invalidated prio to tc mappings set to TC0 */
  1765. for (i = 1; i < TC_BITMASK + 1; i++) {
  1766. int q = netdev_get_prio_tc_map(dev, i);
  1767. tc = &dev->tc_to_txq[q];
  1768. if (tc->offset + tc->count > txq) {
  1769. pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
  1770. i, q);
  1771. netdev_set_prio_tc_map(dev, i, 0);
  1772. }
  1773. }
  1774. }
  1775. int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
  1776. {
  1777. if (dev->num_tc) {
  1778. struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
  1779. int i;
  1780. /* walk through the TCs and see if it falls into any of them */
  1781. for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
  1782. if ((txq - tc->offset) < tc->count)
  1783. return i;
  1784. }
  1785. /* didn't find it, just return -1 to indicate no match */
  1786. return -1;
  1787. }
  1788. return 0;
  1789. }
  1790. EXPORT_SYMBOL(netdev_txq_to_tc);
  1791. #ifdef CONFIG_XPS
  1792. struct static_key xps_needed __read_mostly;
  1793. EXPORT_SYMBOL(xps_needed);
  1794. struct static_key xps_rxqs_needed __read_mostly;
  1795. EXPORT_SYMBOL(xps_rxqs_needed);
  1796. static DEFINE_MUTEX(xps_map_mutex);
  1797. #define xmap_dereference(P) \
  1798. rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
  1799. static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
  1800. int tci, u16 index)
  1801. {
  1802. struct xps_map *map = NULL;
  1803. int pos;
  1804. if (dev_maps)
  1805. map = xmap_dereference(dev_maps->attr_map[tci]);
  1806. if (!map)
  1807. return false;
  1808. for (pos = map->len; pos--;) {
  1809. if (map->queues[pos] != index)
  1810. continue;
  1811. if (map->len > 1) {
  1812. map->queues[pos] = map->queues[--map->len];
  1813. break;
  1814. }
  1815. RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
  1816. kfree_rcu(map, rcu);
  1817. return false;
  1818. }
  1819. return true;
  1820. }
  1821. static bool remove_xps_queue_cpu(struct net_device *dev,
  1822. struct xps_dev_maps *dev_maps,
  1823. int cpu, u16 offset, u16 count)
  1824. {
  1825. int num_tc = dev->num_tc ? : 1;
  1826. bool active = false;
  1827. int tci;
  1828. for (tci = cpu * num_tc; num_tc--; tci++) {
  1829. int i, j;
  1830. for (i = count, j = offset; i--; j++) {
  1831. if (!remove_xps_queue(dev_maps, tci, j))
  1832. break;
  1833. }
  1834. active |= i < 0;
  1835. }
  1836. return active;
  1837. }
  1838. static void clean_xps_maps(struct net_device *dev, const unsigned long *mask,
  1839. struct xps_dev_maps *dev_maps, unsigned int nr_ids,
  1840. u16 offset, u16 count, bool is_rxqs_map)
  1841. {
  1842. bool active = false;
  1843. int i, j;
  1844. for (j = -1; j = netif_attrmask_next(j, mask, nr_ids),
  1845. j < nr_ids;)
  1846. active |= remove_xps_queue_cpu(dev, dev_maps, j, offset,
  1847. count);
  1848. if (!active) {
  1849. if (is_rxqs_map) {
  1850. RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
  1851. } else {
  1852. RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
  1853. for (i = offset + (count - 1); count--; i--)
  1854. netdev_queue_numa_node_write(
  1855. netdev_get_tx_queue(dev, i),
  1856. NUMA_NO_NODE);
  1857. }
  1858. kfree_rcu(dev_maps, rcu);
  1859. }
  1860. }
  1861. static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
  1862. u16 count)
  1863. {
  1864. const unsigned long *possible_mask = NULL;
  1865. struct xps_dev_maps *dev_maps;
  1866. unsigned int nr_ids;
  1867. if (!static_key_false(&xps_needed))
  1868. return;
  1869. mutex_lock(&xps_map_mutex);
  1870. if (static_key_false(&xps_rxqs_needed)) {
  1871. dev_maps = xmap_dereference(dev->xps_rxqs_map);
  1872. if (dev_maps) {
  1873. nr_ids = dev->num_rx_queues;
  1874. clean_xps_maps(dev, possible_mask, dev_maps, nr_ids,
  1875. offset, count, true);
  1876. }
  1877. }
  1878. dev_maps = xmap_dereference(dev->xps_cpus_map);
  1879. if (!dev_maps)
  1880. goto out_no_maps;
  1881. if (num_possible_cpus() > 1)
  1882. possible_mask = cpumask_bits(cpu_possible_mask);
  1883. nr_ids = nr_cpu_ids;
  1884. clean_xps_maps(dev, possible_mask, dev_maps, nr_ids, offset, count,
  1885. false);
  1886. out_no_maps:
  1887. if (static_key_enabled(&xps_rxqs_needed))
  1888. static_key_slow_dec(&xps_rxqs_needed);
  1889. static_key_slow_dec(&xps_needed);
  1890. mutex_unlock(&xps_map_mutex);
  1891. }
  1892. static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
  1893. {
  1894. netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
  1895. }
  1896. static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index,
  1897. u16 index, bool is_rxqs_map)
  1898. {
  1899. struct xps_map *new_map;
  1900. int alloc_len = XPS_MIN_MAP_ALLOC;
  1901. int i, pos;
  1902. for (pos = 0; map && pos < map->len; pos++) {
  1903. if (map->queues[pos] != index)
  1904. continue;
  1905. return map;
  1906. }
  1907. /* Need to add tx-queue to this CPU's/rx-queue's existing map */
  1908. if (map) {
  1909. if (pos < map->alloc_len)
  1910. return map;
  1911. alloc_len = map->alloc_len * 2;
  1912. }
  1913. /* Need to allocate new map to store tx-queue on this CPU's/rx-queue's
  1914. * map
  1915. */
  1916. if (is_rxqs_map)
  1917. new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL);
  1918. else
  1919. new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
  1920. cpu_to_node(attr_index));
  1921. if (!new_map)
  1922. return NULL;
  1923. for (i = 0; i < pos; i++)
  1924. new_map->queues[i] = map->queues[i];
  1925. new_map->alloc_len = alloc_len;
  1926. new_map->len = pos;
  1927. return new_map;
  1928. }
  1929. int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
  1930. u16 index, bool is_rxqs_map)
  1931. {
  1932. const unsigned long *online_mask = NULL, *possible_mask = NULL;
  1933. struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
  1934. int i, j, tci, numa_node_id = -2;
  1935. int maps_sz, num_tc = 1, tc = 0;
  1936. struct xps_map *map, *new_map;
  1937. bool active = false;
  1938. unsigned int nr_ids;
  1939. if (dev->num_tc) {
  1940. /* Do not allow XPS on subordinate device directly */
  1941. num_tc = dev->num_tc;
  1942. if (num_tc < 0)
  1943. return -EINVAL;
  1944. /* If queue belongs to subordinate dev use its map */
  1945. dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
  1946. tc = netdev_txq_to_tc(dev, index);
  1947. if (tc < 0)
  1948. return -EINVAL;
  1949. }
  1950. mutex_lock(&xps_map_mutex);
  1951. if (is_rxqs_map) {
  1952. maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues);
  1953. dev_maps = xmap_dereference(dev->xps_rxqs_map);
  1954. nr_ids = dev->num_rx_queues;
  1955. } else {
  1956. maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc);
  1957. if (num_possible_cpus() > 1) {
  1958. online_mask = cpumask_bits(cpu_online_mask);
  1959. possible_mask = cpumask_bits(cpu_possible_mask);
  1960. }
  1961. dev_maps = xmap_dereference(dev->xps_cpus_map);
  1962. nr_ids = nr_cpu_ids;
  1963. }
  1964. if (maps_sz < L1_CACHE_BYTES)
  1965. maps_sz = L1_CACHE_BYTES;
  1966. /* allocate memory for queue storage */
  1967. for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids),
  1968. j < nr_ids;) {
  1969. if (!new_dev_maps)
  1970. new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
  1971. if (!new_dev_maps) {
  1972. mutex_unlock(&xps_map_mutex);
  1973. return -ENOMEM;
  1974. }
  1975. tci = j * num_tc + tc;
  1976. map = dev_maps ? xmap_dereference(dev_maps->attr_map[tci]) :
  1977. NULL;
  1978. map = expand_xps_map(map, j, index, is_rxqs_map);
  1979. if (!map)
  1980. goto error;
  1981. RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
  1982. }
  1983. if (!new_dev_maps)
  1984. goto out_no_new_maps;
  1985. static_key_slow_inc(&xps_needed);
  1986. if (is_rxqs_map)
  1987. static_key_slow_inc(&xps_rxqs_needed);
  1988. for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
  1989. j < nr_ids;) {
  1990. /* copy maps belonging to foreign traffic classes */
  1991. for (i = tc, tci = j * num_tc; dev_maps && i--; tci++) {
  1992. /* fill in the new device map from the old device map */
  1993. map = xmap_dereference(dev_maps->attr_map[tci]);
  1994. RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
  1995. }
  1996. /* We need to explicitly update tci as prevous loop
  1997. * could break out early if dev_maps is NULL.
  1998. */
  1999. tci = j * num_tc + tc;
  2000. if (netif_attr_test_mask(j, mask, nr_ids) &&
  2001. netif_attr_test_online(j, online_mask, nr_ids)) {
  2002. /* add tx-queue to CPU/rx-queue maps */
  2003. int pos = 0;
  2004. map = xmap_dereference(new_dev_maps->attr_map[tci]);
  2005. while ((pos < map->len) && (map->queues[pos] != index))
  2006. pos++;
  2007. if (pos == map->len)
  2008. map->queues[map->len++] = index;
  2009. #ifdef CONFIG_NUMA
  2010. if (!is_rxqs_map) {
  2011. if (numa_node_id == -2)
  2012. numa_node_id = cpu_to_node(j);
  2013. else if (numa_node_id != cpu_to_node(j))
  2014. numa_node_id = -1;
  2015. }
  2016. #endif
  2017. } else if (dev_maps) {
  2018. /* fill in the new device map from the old device map */
  2019. map = xmap_dereference(dev_maps->attr_map[tci]);
  2020. RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
  2021. }
  2022. /* copy maps belonging to foreign traffic classes */
  2023. for (i = num_tc - tc, tci++; dev_maps && --i; tci++) {
  2024. /* fill in the new device map from the old device map */
  2025. map = xmap_dereference(dev_maps->attr_map[tci]);
  2026. RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
  2027. }
  2028. }
  2029. if (is_rxqs_map)
  2030. rcu_assign_pointer(dev->xps_rxqs_map, new_dev_maps);
  2031. else
  2032. rcu_assign_pointer(dev->xps_cpus_map, new_dev_maps);
  2033. /* Cleanup old maps */
  2034. if (!dev_maps)
  2035. goto out_no_old_maps;
  2036. for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
  2037. j < nr_ids;) {
  2038. for (i = num_tc, tci = j * num_tc; i--; tci++) {
  2039. new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
  2040. map = xmap_dereference(dev_maps->attr_map[tci]);
  2041. if (map && map != new_map)
  2042. kfree_rcu(map, rcu);
  2043. }
  2044. }
  2045. kfree_rcu(dev_maps, rcu);
  2046. out_no_old_maps:
  2047. dev_maps = new_dev_maps;
  2048. active = true;
  2049. out_no_new_maps:
  2050. if (!is_rxqs_map) {
  2051. /* update Tx queue numa node */
  2052. netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
  2053. (numa_node_id >= 0) ?
  2054. numa_node_id : NUMA_NO_NODE);
  2055. }
  2056. if (!dev_maps)
  2057. goto out_no_maps;
  2058. /* removes tx-queue from unused CPUs/rx-queues */
  2059. for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
  2060. j < nr_ids;) {
  2061. for (i = tc, tci = j * num_tc; i--; tci++)
  2062. active |= remove_xps_queue(dev_maps, tci, index);
  2063. if (!netif_attr_test_mask(j, mask, nr_ids) ||
  2064. !netif_attr_test_online(j, online_mask, nr_ids))
  2065. active |= remove_xps_queue(dev_maps, tci, index);
  2066. for (i = num_tc - tc, tci++; --i; tci++)
  2067. active |= remove_xps_queue(dev_maps, tci, index);
  2068. }
  2069. /* free map if not active */
  2070. if (!active) {
  2071. if (is_rxqs_map)
  2072. RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
  2073. else
  2074. RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
  2075. kfree_rcu(dev_maps, rcu);
  2076. }
  2077. out_no_maps:
  2078. mutex_unlock(&xps_map_mutex);
  2079. return 0;
  2080. error:
  2081. /* remove any maps that we added */
  2082. for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
  2083. j < nr_ids;) {
  2084. for (i = num_tc, tci = j * num_tc; i--; tci++) {
  2085. new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
  2086. map = dev_maps ?
  2087. xmap_dereference(dev_maps->attr_map[tci]) :
  2088. NULL;
  2089. if (new_map && new_map != map)
  2090. kfree(new_map);
  2091. }
  2092. }
  2093. mutex_unlock(&xps_map_mutex);
  2094. kfree(new_dev_maps);
  2095. return -ENOMEM;
  2096. }
  2097. int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
  2098. u16 index)
  2099. {
  2100. return __netif_set_xps_queue(dev, cpumask_bits(mask), index, false);
  2101. }
  2102. EXPORT_SYMBOL(netif_set_xps_queue);
  2103. #endif
  2104. static void netdev_unbind_all_sb_channels(struct net_device *dev)
  2105. {
  2106. struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
  2107. /* Unbind any subordinate channels */
  2108. while (txq-- != &dev->_tx[0]) {
  2109. if (txq->sb_dev)
  2110. netdev_unbind_sb_channel(dev, txq->sb_dev);
  2111. }
  2112. }
  2113. void netdev_reset_tc(struct net_device *dev)
  2114. {
  2115. #ifdef CONFIG_XPS
  2116. netif_reset_xps_queues_gt(dev, 0);
  2117. #endif
  2118. netdev_unbind_all_sb_channels(dev);
  2119. /* Reset TC configuration of device */
  2120. dev->num_tc = 0;
  2121. memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
  2122. memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
  2123. }
  2124. EXPORT_SYMBOL(netdev_reset_tc);
  2125. int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
  2126. {
  2127. if (tc >= dev->num_tc)
  2128. return -EINVAL;
  2129. #ifdef CONFIG_XPS
  2130. netif_reset_xps_queues(dev, offset, count);
  2131. #endif
  2132. dev->tc_to_txq[tc].count = count;
  2133. dev->tc_to_txq[tc].offset = offset;
  2134. return 0;
  2135. }
  2136. EXPORT_SYMBOL(netdev_set_tc_queue);
  2137. int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
  2138. {
  2139. if (num_tc > TC_MAX_QUEUE)
  2140. return -EINVAL;
  2141. #ifdef CONFIG_XPS
  2142. netif_reset_xps_queues_gt(dev, 0);
  2143. #endif
  2144. netdev_unbind_all_sb_channels(dev);
  2145. dev->num_tc = num_tc;
  2146. return 0;
  2147. }
  2148. EXPORT_SYMBOL(netdev_set_num_tc);
  2149. void netdev_unbind_sb_channel(struct net_device *dev,
  2150. struct net_device *sb_dev)
  2151. {
  2152. struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
  2153. #ifdef CONFIG_XPS
  2154. netif_reset_xps_queues_gt(sb_dev, 0);
  2155. #endif
  2156. memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq));
  2157. memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map));
  2158. while (txq-- != &dev->_tx[0]) {
  2159. if (txq->sb_dev == sb_dev)
  2160. txq->sb_dev = NULL;
  2161. }
  2162. }
  2163. EXPORT_SYMBOL(netdev_unbind_sb_channel);
  2164. int netdev_bind_sb_channel_queue(struct net_device *dev,
  2165. struct net_device *sb_dev,
  2166. u8 tc, u16 count, u16 offset)
  2167. {
  2168. /* Make certain the sb_dev and dev are already configured */
  2169. if (sb_dev->num_tc >= 0 || tc >= dev->num_tc)
  2170. return -EINVAL;
  2171. /* We cannot hand out queues we don't have */
  2172. if ((offset + count) > dev->real_num_tx_queues)
  2173. return -EINVAL;
  2174. /* Record the mapping */
  2175. sb_dev->tc_to_txq[tc].count = count;
  2176. sb_dev->tc_to_txq[tc].offset = offset;
  2177. /* Provide a way for Tx queue to find the tc_to_txq map or
  2178. * XPS map for itself.
  2179. */
  2180. while (count--)
  2181. netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev;
  2182. return 0;
  2183. }
  2184. EXPORT_SYMBOL(netdev_bind_sb_channel_queue);
  2185. int netdev_set_sb_channel(struct net_device *dev, u16 channel)
  2186. {
  2187. /* Do not use a multiqueue device to represent a subordinate channel */
  2188. if (netif_is_multiqueue(dev))
  2189. return -ENODEV;
  2190. /* We allow channels 1 - 32767 to be used for subordinate channels.
  2191. * Channel 0 is meant to be "native" mode and used only to represent
  2192. * the main root device. We allow writing 0 to reset the device back
  2193. * to normal mode after being used as a subordinate channel.
  2194. */
  2195. if (channel > S16_MAX)
  2196. return -EINVAL;
  2197. dev->num_tc = -channel;
  2198. return 0;
  2199. }
  2200. EXPORT_SYMBOL(netdev_set_sb_channel);
  2201. /*
  2202. * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
  2203. * greater than real_num_tx_queues stale skbs on the qdisc must be flushed.
  2204. */
  2205. int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
  2206. {
  2207. bool disabling;
  2208. int rc;
  2209. disabling = txq < dev->real_num_tx_queues;
  2210. if (txq < 1 || txq > dev->num_tx_queues)
  2211. return -EINVAL;
  2212. if (dev->reg_state == NETREG_REGISTERED ||
  2213. dev->reg_state == NETREG_UNREGISTERING) {
  2214. ASSERT_RTNL();
  2215. rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
  2216. txq);
  2217. if (rc)
  2218. return rc;
  2219. if (dev->num_tc)
  2220. netif_setup_tc(dev, txq);
  2221. dev->real_num_tx_queues = txq;
  2222. if (disabling) {
  2223. synchronize_net();
  2224. qdisc_reset_all_tx_gt(dev, txq);
  2225. #ifdef CONFIG_XPS
  2226. netif_reset_xps_queues_gt(dev, txq);
  2227. #endif
  2228. }
  2229. } else {
  2230. dev->real_num_tx_queues = txq;
  2231. }
  2232. return 0;
  2233. }
  2234. EXPORT_SYMBOL(netif_set_real_num_tx_queues);
  2235. #ifdef CONFIG_SYSFS
  2236. /**
  2237. * netif_set_real_num_rx_queues - set actual number of RX queues used
  2238. * @dev: Network device
  2239. * @rxq: Actual number of RX queues
  2240. *
  2241. * This must be called either with the rtnl_lock held or before
  2242. * registration of the net device. Returns 0 on success, or a
  2243. * negative error code. If called before registration, it always
  2244. * succeeds.
  2245. */
  2246. int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
  2247. {
  2248. int rc;
  2249. if (rxq < 1 || rxq > dev->num_rx_queues)
  2250. return -EINVAL;
  2251. if (dev->reg_state == NETREG_REGISTERED) {
  2252. ASSERT_RTNL();
  2253. rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
  2254. rxq);
  2255. if (rc)
  2256. return rc;
  2257. }
  2258. dev->real_num_rx_queues = rxq;
  2259. return 0;
  2260. }
  2261. EXPORT_SYMBOL(netif_set_real_num_rx_queues);
  2262. #endif
  2263. /**
  2264. * netif_get_num_default_rss_queues - default number of RSS queues
  2265. *
  2266. * This routine should set an upper limit on the number of RSS queues
  2267. * used by default by multiqueue devices.
  2268. */
  2269. int netif_get_num_default_rss_queues(void)
  2270. {
  2271. return is_kdump_kernel() ?
  2272. 1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
  2273. }
  2274. EXPORT_SYMBOL(netif_get_num_default_rss_queues);
  2275. static void __netif_reschedule(struct Qdisc *q)
  2276. {
  2277. struct softnet_data *sd;
  2278. unsigned long flags;
  2279. local_irq_save(flags);
  2280. sd = this_cpu_ptr(&softnet_data);
  2281. q->next_sched = NULL;
  2282. *sd->output_queue_tailp = q;
  2283. sd->output_queue_tailp = &q->next_sched;
  2284. raise_softirq_irqoff(NET_TX_SOFTIRQ);
  2285. local_irq_restore(flags);
  2286. }
  2287. void __netif_schedule(struct Qdisc *q)
  2288. {
  2289. if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
  2290. __netif_reschedule(q);
  2291. }
  2292. EXPORT_SYMBOL(__netif_schedule);
  2293. struct dev_kfree_skb_cb {
  2294. enum skb_free_reason reason;
  2295. };
  2296. static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
  2297. {
  2298. return (struct dev_kfree_skb_cb *)skb->cb;
  2299. }
  2300. void netif_schedule_queue(struct netdev_queue *txq)
  2301. {
  2302. rcu_read_lock();
  2303. if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
  2304. struct Qdisc *q = rcu_dereference(txq->qdisc);
  2305. __netif_schedule(q);
  2306. }
  2307. rcu_read_unlock();
  2308. }
  2309. EXPORT_SYMBOL(netif_schedule_queue);
  2310. void netif_tx_wake_queue(struct netdev_queue *dev_queue)
  2311. {
  2312. if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
  2313. struct Qdisc *q;
  2314. rcu_read_lock();
  2315. q = rcu_dereference(dev_queue->qdisc);
  2316. __netif_schedule(q);
  2317. rcu_read_unlock();
  2318. }
  2319. }
  2320. EXPORT_SYMBOL(netif_tx_wake_queue);
  2321. void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
  2322. {
  2323. unsigned long flags;
  2324. if (unlikely(!skb))
  2325. return;
  2326. if (likely(refcount_read(&skb->users) == 1)) {
  2327. smp_rmb();
  2328. refcount_set(&skb->users, 0);
  2329. } else if (likely(!refcount_dec_and_test(&skb->users))) {
  2330. return;
  2331. }
  2332. get_kfree_skb_cb(skb)->reason = reason;
  2333. local_irq_save(flags);
  2334. skb->next = __this_cpu_read(softnet_data.completion_queue);
  2335. __this_cpu_write(softnet_data.completion_queue, skb);
  2336. raise_softirq_irqoff(NET_TX_SOFTIRQ);
  2337. local_irq_restore(flags);
  2338. }
  2339. EXPORT_SYMBOL(__dev_kfree_skb_irq);
  2340. void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
  2341. {
  2342. if (in_irq() || irqs_disabled())
  2343. __dev_kfree_skb_irq(skb, reason);
  2344. else
  2345. dev_kfree_skb(skb);
  2346. }
  2347. EXPORT_SYMBOL(__dev_kfree_skb_any);
  2348. /**
  2349. * netif_device_detach - mark device as removed
  2350. * @dev: network device
  2351. *
  2352. * Mark device as removed from system and therefore no longer available.
  2353. */
  2354. void netif_device_detach(struct net_device *dev)
  2355. {
  2356. if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
  2357. netif_running(dev)) {
  2358. netif_tx_stop_all_queues(dev);
  2359. }
  2360. }
  2361. EXPORT_SYMBOL(netif_device_detach);
  2362. /**
  2363. * netif_device_attach - mark device as attached
  2364. * @dev: network device
  2365. *
  2366. * Mark device as attached from system and restart if needed.
  2367. */
  2368. void netif_device_attach(struct net_device *dev)
  2369. {
  2370. if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
  2371. netif_running(dev)) {
  2372. netif_tx_wake_all_queues(dev);
  2373. __netdev_watchdog_up(dev);
  2374. }
  2375. }
  2376. EXPORT_SYMBOL(netif_device_attach);
  2377. /*
  2378. * Returns a Tx hash based on the given packet descriptor a Tx queues' number
  2379. * to be used as a distribution range.
  2380. */
  2381. static u16 skb_tx_hash(const struct net_device *dev,
  2382. const struct net_device *sb_dev,
  2383. struct sk_buff *skb)
  2384. {
  2385. u32 hash;
  2386. u16 qoffset = 0;
  2387. u16 qcount = dev->real_num_tx_queues;
  2388. if (dev->num_tc) {
  2389. u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
  2390. qoffset = sb_dev->tc_to_txq[tc].offset;
  2391. qcount = sb_dev->tc_to_txq[tc].count;
  2392. }
  2393. if (skb_rx_queue_recorded(skb)) {
  2394. hash = skb_get_rx_queue(skb);
  2395. while (unlikely(hash >= qcount))
  2396. hash -= qcount;
  2397. return hash + qoffset;
  2398. }
  2399. return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
  2400. }
  2401. static void skb_warn_bad_offload(const struct sk_buff *skb)
  2402. {
  2403. static const netdev_features_t null_features;
  2404. struct net_device *dev = skb->dev;
  2405. const char *name = "";
  2406. if (!net_ratelimit())
  2407. return;
  2408. if (dev) {
  2409. if (dev->dev.parent)
  2410. name = dev_driver_string(dev->dev.parent);
  2411. else
  2412. name = netdev_name(dev);
  2413. }
  2414. WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
  2415. "gso_type=%d ip_summed=%d\n",
  2416. name, dev ? &dev->features : &null_features,
  2417. skb->sk ? &skb->sk->sk_route_caps : &null_features,
  2418. skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
  2419. skb_shinfo(skb)->gso_type, skb->ip_summed);
  2420. }
  2421. /*
  2422. * Invalidate hardware checksum when packet is to be mangled, and
  2423. * complete checksum manually on outgoing path.
  2424. */
  2425. int skb_checksum_help(struct sk_buff *skb)
  2426. {
  2427. __wsum csum;
  2428. int ret = 0, offset;
  2429. if (skb->ip_summed == CHECKSUM_COMPLETE)
  2430. goto out_set_summed;
  2431. if (unlikely(skb_shinfo(skb)->gso_size)) {
  2432. skb_warn_bad_offload(skb);
  2433. return -EINVAL;
  2434. }
  2435. /* Before computing a checksum, we should make sure no frag could
  2436. * be modified by an external entity : checksum could be wrong.
  2437. */
  2438. if (skb_has_shared_frag(skb)) {
  2439. ret = __skb_linearize(skb);
  2440. if (ret)
  2441. goto out;
  2442. }
  2443. offset = skb_checksum_start_offset(skb);
  2444. BUG_ON(offset >= skb_headlen(skb));
  2445. csum = skb_checksum(skb, offset, skb->len - offset, 0);
  2446. offset += skb->csum_offset;
  2447. BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
  2448. if (skb_cloned(skb) &&
  2449. !skb_clone_writable(skb, offset + sizeof(__sum16))) {
  2450. ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
  2451. if (ret)
  2452. goto out;
  2453. }
  2454. *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
  2455. out_set_summed:
  2456. skb->ip_summed = CHECKSUM_NONE;
  2457. out:
  2458. return ret;
  2459. }
  2460. EXPORT_SYMBOL(skb_checksum_help);
  2461. int skb_crc32c_csum_help(struct sk_buff *skb)
  2462. {
  2463. __le32 crc32c_csum;
  2464. int ret = 0, offset, start;
  2465. if (skb->ip_summed != CHECKSUM_PARTIAL)
  2466. goto out;
  2467. if (unlikely(skb_is_gso(skb)))
  2468. goto out;
  2469. /* Before computing a checksum, we should make sure no frag could
  2470. * be modified by an external entity : checksum could be wrong.
  2471. */
  2472. if (unlikely(skb_has_shared_frag(skb))) {
  2473. ret = __skb_linearize(skb);
  2474. if (ret)
  2475. goto out;
  2476. }
  2477. start = skb_checksum_start_offset(skb);
  2478. offset = start + offsetof(struct sctphdr, checksum);
  2479. if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
  2480. ret = -EINVAL;
  2481. goto out;
  2482. }
  2483. if (skb_cloned(skb) &&
  2484. !skb_clone_writable(skb, offset + sizeof(__le32))) {
  2485. ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
  2486. if (ret)
  2487. goto out;
  2488. }
  2489. crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start,
  2490. skb->len - start, ~(__u32)0,
  2491. crc32c_csum_stub));
  2492. *(__le32 *)(skb->data + offset) = crc32c_csum;
  2493. skb->ip_summed = CHECKSUM_NONE;
  2494. skb->csum_not_inet = 0;
  2495. out:
  2496. return ret;
  2497. }
  2498. __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
  2499. {
  2500. __be16 type = skb->protocol;
  2501. /* Tunnel gso handlers can set protocol to ethernet. */
  2502. if (type == htons(ETH_P_TEB)) {
  2503. struct ethhdr *eth;
  2504. if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
  2505. return 0;
  2506. eth = (struct ethhdr *)skb->data;
  2507. type = eth->h_proto;
  2508. }
  2509. return __vlan_get_protocol(skb, type, depth);
  2510. }
  2511. /**
  2512. * skb_mac_gso_segment - mac layer segmentation handler.
  2513. * @skb: buffer to segment
  2514. * @features: features for the output path (see dev->features)
  2515. */
  2516. struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
  2517. netdev_features_t features)
  2518. {
  2519. struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
  2520. struct packet_offload *ptype;
  2521. int vlan_depth = skb->mac_len;
  2522. __be16 type = skb_network_protocol(skb, &vlan_depth);
  2523. if (unlikely(!type))
  2524. return ERR_PTR(-EINVAL);
  2525. __skb_pull(skb, vlan_depth);
  2526. rcu_read_lock();
  2527. list_for_each_entry_rcu(ptype, &offload_base, list) {
  2528. if (ptype->type == type && ptype->callbacks.gso_segment) {
  2529. segs = ptype->callbacks.gso_segment(skb, features);
  2530. break;
  2531. }
  2532. }
  2533. rcu_read_unlock();
  2534. __skb_push(skb, skb->data - skb_mac_header(skb));
  2535. return segs;
  2536. }
  2537. EXPORT_SYMBOL(skb_mac_gso_segment);
  2538. /* openvswitch calls this on rx path, so we need a different check.
  2539. */
  2540. static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
  2541. {
  2542. if (tx_path)
  2543. return skb->ip_summed != CHECKSUM_PARTIAL &&
  2544. skb->ip_summed != CHECKSUM_UNNECESSARY;
  2545. return skb->ip_summed == CHECKSUM_NONE;
  2546. }
  2547. /**
  2548. * __skb_gso_segment - Perform segmentation on skb.
  2549. * @skb: buffer to segment
  2550. * @features: features for the output path (see dev->features)
  2551. * @tx_path: whether it is called in TX path
  2552. *
  2553. * This function segments the given skb and returns a list of segments.
  2554. *
  2555. * It may return NULL if the skb requires no segmentation. This is
  2556. * only possible when GSO is used for verifying header integrity.
  2557. *
  2558. * Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb.
  2559. */
  2560. struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
  2561. netdev_features_t features, bool tx_path)
  2562. {
  2563. struct sk_buff *segs;
  2564. if (unlikely(skb_needs_check(skb, tx_path))) {
  2565. int err;
  2566. /* We're going to init ->check field in TCP or UDP header */
  2567. err = skb_cow_head(skb, 0);
  2568. if (err < 0)
  2569. return ERR_PTR(err);
  2570. }
  2571. /* Only report GSO partial support if it will enable us to
  2572. * support segmentation on this frame without needing additional
  2573. * work.
  2574. */
  2575. if (features & NETIF_F_GSO_PARTIAL) {
  2576. netdev_features_t partial_features = NETIF_F_GSO_ROBUST;
  2577. struct net_device *dev = skb->dev;
  2578. partial_features |= dev->features & dev->gso_partial_features;
  2579. if (!skb_gso_ok(skb, features | partial_features))
  2580. features &= ~NETIF_F_GSO_PARTIAL;
  2581. }
  2582. BUILD_BUG_ON(SKB_SGO_CB_OFFSET +
  2583. sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
  2584. SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
  2585. SKB_GSO_CB(skb)->encap_level = 0;
  2586. skb_reset_mac_header(skb);
  2587. skb_reset_mac_len(skb);
  2588. segs = skb_mac_gso_segment(skb, features);
  2589. if (unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs)))
  2590. skb_warn_bad_offload(skb);
  2591. return segs;
  2592. }
  2593. EXPORT_SYMBOL(__skb_gso_segment);
  2594. /* Take action when hardware reception checksum errors are detected. */
  2595. #ifdef CONFIG_BUG
  2596. void netdev_rx_csum_fault(struct net_device *dev)
  2597. {
  2598. if (net_ratelimit()) {
  2599. pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
  2600. dump_stack();
  2601. }
  2602. }
  2603. EXPORT_SYMBOL(netdev_rx_csum_fault);
  2604. #endif
  2605. /* XXX: check that highmem exists at all on the given machine. */
  2606. static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
  2607. {
  2608. #ifdef CONFIG_HIGHMEM
  2609. int i;
  2610. if (!(dev->features & NETIF_F_HIGHDMA)) {
  2611. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  2612. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  2613. if (PageHighMem(skb_frag_page(frag)))
  2614. return 1;
  2615. }
  2616. }
  2617. #endif
  2618. return 0;
  2619. }
  2620. /* If MPLS offload request, verify we are testing hardware MPLS features
  2621. * instead of standard features for the netdev.
  2622. */
  2623. #if IS_ENABLED(CONFIG_NET_MPLS_GSO)
  2624. static netdev_features_t net_mpls_features(struct sk_buff *skb,
  2625. netdev_features_t features,
  2626. __be16 type)
  2627. {
  2628. if (eth_p_mpls(type))
  2629. features &= skb->dev->mpls_features;
  2630. return features;
  2631. }
  2632. #else
  2633. static netdev_features_t net_mpls_features(struct sk_buff *skb,
  2634. netdev_features_t features,
  2635. __be16 type)
  2636. {
  2637. return features;
  2638. }
  2639. #endif
  2640. static netdev_features_t harmonize_features(struct sk_buff *skb,
  2641. netdev_features_t features)
  2642. {
  2643. int tmp;
  2644. __be16 type;
  2645. type = skb_network_protocol(skb, &tmp);
  2646. features = net_mpls_features(skb, features, type);
  2647. if (skb->ip_summed != CHECKSUM_NONE &&
  2648. !can_checksum_protocol(features, type)) {
  2649. features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
  2650. }
  2651. if (illegal_highdma(skb->dev, skb))
  2652. features &= ~NETIF_F_SG;
  2653. return features;
  2654. }
  2655. netdev_features_t passthru_features_check(struct sk_buff *skb,
  2656. struct net_device *dev,
  2657. netdev_features_t features)
  2658. {
  2659. return features;
  2660. }
  2661. EXPORT_SYMBOL(passthru_features_check);
  2662. static netdev_features_t dflt_features_check(struct sk_buff *skb,
  2663. struct net_device *dev,
  2664. netdev_features_t features)
  2665. {
  2666. return vlan_features_check(skb, features);
  2667. }
  2668. static netdev_features_t gso_features_check(const struct sk_buff *skb,
  2669. struct net_device *dev,
  2670. netdev_features_t features)
  2671. {
  2672. u16 gso_segs = skb_shinfo(skb)->gso_segs;
  2673. if (gso_segs > dev->gso_max_segs)
  2674. return features & ~NETIF_F_GSO_MASK;
  2675. /* Support for GSO partial features requires software
  2676. * intervention before we can actually process the packets
  2677. * so we need to strip support for any partial features now
  2678. * and we can pull them back in after we have partially
  2679. * segmented the frame.
  2680. */
  2681. if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
  2682. features &= ~dev->gso_partial_features;
  2683. /* Make sure to clear the IPv4 ID mangling feature if the
  2684. * IPv4 header has the potential to be fragmented.
  2685. */
  2686. if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
  2687. struct iphdr *iph = skb->encapsulation ?
  2688. inner_ip_hdr(skb) : ip_hdr(skb);
  2689. if (!(iph->frag_off & htons(IP_DF)))
  2690. features &= ~NETIF_F_TSO_MANGLEID;
  2691. }
  2692. return features;
  2693. }
  2694. netdev_features_t netif_skb_features(struct sk_buff *skb)
  2695. {
  2696. struct net_device *dev = skb->dev;
  2697. netdev_features_t features = dev->features;
  2698. if (skb_is_gso(skb))
  2699. features = gso_features_check(skb, dev, features);
  2700. /* If encapsulation offload request, verify we are testing
  2701. * hardware encapsulation features instead of standard
  2702. * features for the netdev
  2703. */
  2704. if (skb->encapsulation)
  2705. features &= dev->hw_enc_features;
  2706. if (skb_vlan_tagged(skb))
  2707. features = netdev_intersect_features(features,
  2708. dev->vlan_features |
  2709. NETIF_F_HW_VLAN_CTAG_TX |
  2710. NETIF_F_HW_VLAN_STAG_TX);
  2711. if (dev->netdev_ops->ndo_features_check)
  2712. features &= dev->netdev_ops->ndo_features_check(skb, dev,
  2713. features);
  2714. else
  2715. features &= dflt_features_check(skb, dev, features);
  2716. return harmonize_features(skb, features);
  2717. }
  2718. EXPORT_SYMBOL(netif_skb_features);
  2719. static int xmit_one(struct sk_buff *skb, struct net_device *dev,
  2720. struct netdev_queue *txq, bool more)
  2721. {
  2722. unsigned int len;
  2723. int rc;
  2724. if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
  2725. dev_queue_xmit_nit(skb, dev);
  2726. len = skb->len;
  2727. trace_net_dev_start_xmit(skb, dev);
  2728. rc = netdev_start_xmit(skb, dev, txq, more);
  2729. trace_net_dev_xmit(skb, rc, dev, len);
  2730. return rc;
  2731. }
  2732. struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
  2733. struct netdev_queue *txq, int *ret)
  2734. {
  2735. struct sk_buff *skb = first;
  2736. int rc = NETDEV_TX_OK;
  2737. while (skb) {
  2738. struct sk_buff *next = skb->next;
  2739. skb->next = NULL;
  2740. rc = xmit_one(skb, dev, txq, next != NULL);
  2741. if (unlikely(!dev_xmit_complete(rc))) {
  2742. skb->next = next;
  2743. goto out;
  2744. }
  2745. skb = next;
  2746. if (netif_xmit_stopped(txq) && skb) {
  2747. rc = NETDEV_TX_BUSY;
  2748. break;
  2749. }
  2750. }
  2751. out:
  2752. *ret = rc;
  2753. return skb;
  2754. }
  2755. static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
  2756. netdev_features_t features)
  2757. {
  2758. if (skb_vlan_tag_present(skb) &&
  2759. !vlan_hw_offload_capable(features, skb->vlan_proto))
  2760. skb = __vlan_hwaccel_push_inside(skb);
  2761. return skb;
  2762. }
  2763. int skb_csum_hwoffload_help(struct sk_buff *skb,
  2764. const netdev_features_t features)
  2765. {
  2766. if (unlikely(skb->csum_not_inet))
  2767. return !!(features & NETIF_F_SCTP_CRC) ? 0 :
  2768. skb_crc32c_csum_help(skb);
  2769. return !!(features & NETIF_F_CSUM_MASK) ? 0 : skb_checksum_help(skb);
  2770. }
  2771. EXPORT_SYMBOL(skb_csum_hwoffload_help);
  2772. static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
  2773. {
  2774. netdev_features_t features;
  2775. features = netif_skb_features(skb);
  2776. skb = validate_xmit_vlan(skb, features);
  2777. if (unlikely(!skb))
  2778. goto out_null;
  2779. skb = sk_validate_xmit_skb(skb, dev);
  2780. if (unlikely(!skb))
  2781. goto out_null;
  2782. if (netif_needs_gso(skb, features)) {
  2783. struct sk_buff *segs;
  2784. segs = skb_gso_segment(skb, features);
  2785. if (IS_ERR(segs)) {
  2786. goto out_kfree_skb;
  2787. } else if (segs) {
  2788. consume_skb(skb);
  2789. skb = segs;
  2790. }
  2791. } else {
  2792. if (skb_needs_linearize(skb, features) &&
  2793. __skb_linearize(skb))
  2794. goto out_kfree_skb;
  2795. /* If packet is not checksummed and device does not
  2796. * support checksumming for this protocol, complete
  2797. * checksumming here.
  2798. */
  2799. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  2800. if (skb->encapsulation)
  2801. skb_set_inner_transport_header(skb,
  2802. skb_checksum_start_offset(skb));
  2803. else
  2804. skb_set_transport_header(skb,
  2805. skb_checksum_start_offset(skb));
  2806. if (skb_csum_hwoffload_help(skb, features))
  2807. goto out_kfree_skb;
  2808. }
  2809. }
  2810. skb = validate_xmit_xfrm(skb, features, again);
  2811. return skb;
  2812. out_kfree_skb:
  2813. kfree_skb(skb);
  2814. out_null:
  2815. atomic_long_inc(&dev->tx_dropped);
  2816. return NULL;
  2817. }
  2818. struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
  2819. {
  2820. struct sk_buff *next, *head = NULL, *tail;
  2821. for (; skb != NULL; skb = next) {
  2822. next = skb->next;
  2823. skb->next = NULL;
  2824. /* in case skb wont be segmented, point to itself */
  2825. skb->prev = skb;
  2826. skb = validate_xmit_skb(skb, dev, again);
  2827. if (!skb)
  2828. continue;
  2829. if (!head)
  2830. head = skb;
  2831. else
  2832. tail->next = skb;
  2833. /* If skb was segmented, skb->prev points to
  2834. * the last segment. If not, it still contains skb.
  2835. */
  2836. tail = skb->prev;
  2837. }
  2838. return head;
  2839. }
  2840. EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
  2841. static void qdisc_pkt_len_init(struct sk_buff *skb)
  2842. {
  2843. const struct skb_shared_info *shinfo = skb_shinfo(skb);
  2844. qdisc_skb_cb(skb)->pkt_len = skb->len;
  2845. /* To get more precise estimation of bytes sent on wire,
  2846. * we add to pkt_len the headers size of all segments
  2847. */
  2848. if (shinfo->gso_size) {
  2849. unsigned int hdr_len;
  2850. u16 gso_segs = shinfo->gso_segs;
  2851. /* mac layer + network layer */
  2852. hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
  2853. /* + transport layer */
  2854. if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
  2855. const struct tcphdr *th;
  2856. struct tcphdr _tcphdr;
  2857. th = skb_header_pointer(skb, skb_transport_offset(skb),
  2858. sizeof(_tcphdr), &_tcphdr);
  2859. if (likely(th))
  2860. hdr_len += __tcp_hdrlen(th);
  2861. } else {
  2862. struct udphdr _udphdr;
  2863. if (skb_header_pointer(skb, skb_transport_offset(skb),
  2864. sizeof(_udphdr), &_udphdr))
  2865. hdr_len += sizeof(struct udphdr);
  2866. }
  2867. if (shinfo->gso_type & SKB_GSO_DODGY)
  2868. gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
  2869. shinfo->gso_size);
  2870. qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
  2871. }
  2872. }
  2873. static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
  2874. struct net_device *dev,
  2875. struct netdev_queue *txq)
  2876. {
  2877. spinlock_t *root_lock = qdisc_lock(q);
  2878. struct sk_buff *to_free = NULL;
  2879. bool contended;
  2880. int rc;
  2881. qdisc_calculate_pkt_len(skb, q);
  2882. if (q->flags & TCQ_F_NOLOCK) {
  2883. if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
  2884. __qdisc_drop(skb, &to_free);
  2885. rc = NET_XMIT_DROP;
  2886. } else {
  2887. rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
  2888. qdisc_run(q);
  2889. }
  2890. if (unlikely(to_free))
  2891. kfree_skb_list(to_free);
  2892. return rc;
  2893. }
  2894. /*
  2895. * Heuristic to force contended enqueues to serialize on a
  2896. * separate lock before trying to get qdisc main lock.
  2897. * This permits qdisc->running owner to get the lock more
  2898. * often and dequeue packets faster.
  2899. */
  2900. contended = qdisc_is_running(q);
  2901. if (unlikely(contended))
  2902. spin_lock(&q->busylock);
  2903. spin_lock(root_lock);
  2904. if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
  2905. __qdisc_drop(skb, &to_free);
  2906. rc = NET_XMIT_DROP;
  2907. } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
  2908. qdisc_run_begin(q)) {
  2909. /*
  2910. * This is a work-conserving queue; there are no old skbs
  2911. * waiting to be sent out; and the qdisc is not running -
  2912. * xmit the skb directly.
  2913. */
  2914. qdisc_bstats_update(q, skb);
  2915. if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
  2916. if (unlikely(contended)) {
  2917. spin_unlock(&q->busylock);
  2918. contended = false;
  2919. }
  2920. __qdisc_run(q);
  2921. }
  2922. qdisc_run_end(q);
  2923. rc = NET_XMIT_SUCCESS;
  2924. } else {
  2925. rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
  2926. if (qdisc_run_begin(q)) {
  2927. if (unlikely(contended)) {
  2928. spin_unlock(&q->busylock);
  2929. contended = false;
  2930. }
  2931. __qdisc_run(q);
  2932. qdisc_run_end(q);
  2933. }
  2934. }
  2935. spin_unlock(root_lock);
  2936. if (unlikely(to_free))
  2937. kfree_skb_list(to_free);
  2938. if (unlikely(contended))
  2939. spin_unlock(&q->busylock);
  2940. return rc;
  2941. }
  2942. #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
  2943. static void skb_update_prio(struct sk_buff *skb)
  2944. {
  2945. const struct netprio_map *map;
  2946. const struct sock *sk;
  2947. unsigned int prioidx;
  2948. if (skb->priority)
  2949. return;
  2950. map = rcu_dereference_bh(skb->dev->priomap);
  2951. if (!map)
  2952. return;
  2953. sk = skb_to_full_sk(skb);
  2954. if (!sk)
  2955. return;
  2956. prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data);
  2957. if (prioidx < map->priomap_len)
  2958. skb->priority = map->priomap[prioidx];
  2959. }
  2960. #else
  2961. #define skb_update_prio(skb)
  2962. #endif
  2963. DEFINE_PER_CPU(int, xmit_recursion);
  2964. EXPORT_SYMBOL(xmit_recursion);
  2965. /**
  2966. * dev_loopback_xmit - loop back @skb
  2967. * @net: network namespace this loopback is happening in
  2968. * @sk: sk needed to be a netfilter okfn
  2969. * @skb: buffer to transmit
  2970. */
  2971. int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
  2972. {
  2973. skb_reset_mac_header(skb);
  2974. __skb_pull(skb, skb_network_offset(skb));
  2975. skb->pkt_type = PACKET_LOOPBACK;
  2976. skb->ip_summed = CHECKSUM_UNNECESSARY;
  2977. WARN_ON(!skb_dst(skb));
  2978. skb_dst_force(skb);
  2979. netif_rx_ni(skb);
  2980. return 0;
  2981. }
  2982. EXPORT_SYMBOL(dev_loopback_xmit);
  2983. #ifdef CONFIG_NET_EGRESS
  2984. static struct sk_buff *
  2985. sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
  2986. {
  2987. struct mini_Qdisc *miniq = rcu_dereference_bh(dev->miniq_egress);
  2988. struct tcf_result cl_res;
  2989. if (!miniq)
  2990. return skb;
  2991. /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
  2992. mini_qdisc_bstats_cpu_update(miniq, skb);
  2993. switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) {
  2994. case TC_ACT_OK:
  2995. case TC_ACT_RECLASSIFY:
  2996. skb->tc_index = TC_H_MIN(cl_res.classid);
  2997. break;
  2998. case TC_ACT_SHOT:
  2999. mini_qdisc_qstats_cpu_drop(miniq);
  3000. *ret = NET_XMIT_DROP;
  3001. kfree_skb(skb);
  3002. return NULL;
  3003. case TC_ACT_STOLEN:
  3004. case TC_ACT_QUEUED:
  3005. case TC_ACT_TRAP:
  3006. *ret = NET_XMIT_SUCCESS;
  3007. consume_skb(skb);
  3008. return NULL;
  3009. case TC_ACT_REDIRECT:
  3010. /* No need to push/pop skb's mac_header here on egress! */
  3011. skb_do_redirect(skb);
  3012. *ret = NET_XMIT_SUCCESS;
  3013. return NULL;
  3014. default:
  3015. break;
  3016. }
  3017. return skb;
  3018. }
  3019. #endif /* CONFIG_NET_EGRESS */
  3020. #ifdef CONFIG_XPS
  3021. static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb,
  3022. struct xps_dev_maps *dev_maps, unsigned int tci)
  3023. {
  3024. struct xps_map *map;
  3025. int queue_index = -1;
  3026. if (dev->num_tc) {
  3027. tci *= dev->num_tc;
  3028. tci += netdev_get_prio_tc_map(dev, skb->priority);
  3029. }
  3030. map = rcu_dereference(dev_maps->attr_map[tci]);
  3031. if (map) {
  3032. if (map->len == 1)
  3033. queue_index = map->queues[0];
  3034. else
  3035. queue_index = map->queues[reciprocal_scale(
  3036. skb_get_hash(skb), map->len)];
  3037. if (unlikely(queue_index >= dev->real_num_tx_queues))
  3038. queue_index = -1;
  3039. }
  3040. return queue_index;
  3041. }
  3042. #endif
  3043. static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev,
  3044. struct sk_buff *skb)
  3045. {
  3046. #ifdef CONFIG_XPS
  3047. struct xps_dev_maps *dev_maps;
  3048. struct sock *sk = skb->sk;
  3049. int queue_index = -1;
  3050. if (!static_key_false(&xps_needed))
  3051. return -1;
  3052. rcu_read_lock();
  3053. if (!static_key_false(&xps_rxqs_needed))
  3054. goto get_cpus_map;
  3055. dev_maps = rcu_dereference(sb_dev->xps_rxqs_map);
  3056. if (dev_maps) {
  3057. int tci = sk_rx_queue_get(sk);
  3058. if (tci >= 0 && tci < dev->num_rx_queues)
  3059. queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
  3060. tci);
  3061. }
  3062. get_cpus_map:
  3063. if (queue_index < 0) {
  3064. dev_maps = rcu_dereference(sb_dev->xps_cpus_map);
  3065. if (dev_maps) {
  3066. unsigned int tci = skb->sender_cpu - 1;
  3067. queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
  3068. tci);
  3069. }
  3070. }
  3071. rcu_read_unlock();
  3072. return queue_index;
  3073. #else
  3074. return -1;
  3075. #endif
  3076. }
  3077. u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
  3078. void *accel_priv, select_queue_fallback_t fallback)
  3079. {
  3080. return 0;
  3081. }
  3082. EXPORT_SYMBOL(dev_pick_tx_zero);
  3083. u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
  3084. void *accel_priv, select_queue_fallback_t fallback)
  3085. {
  3086. return (u16)raw_smp_processor_id() % dev->real_num_tx_queues;
  3087. }
  3088. EXPORT_SYMBOL(dev_pick_tx_cpu_id);
  3089. static u16 ___netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
  3090. struct net_device *sb_dev)
  3091. {
  3092. struct sock *sk = skb->sk;
  3093. int queue_index = sk_tx_queue_get(sk);
  3094. sb_dev = sb_dev ? : dev;
  3095. if (queue_index < 0 || skb->ooo_okay ||
  3096. queue_index >= dev->real_num_tx_queues) {
  3097. int new_index = get_xps_queue(dev, sb_dev, skb);
  3098. if (new_index < 0)
  3099. new_index = skb_tx_hash(dev, sb_dev, skb);
  3100. if (queue_index != new_index && sk &&
  3101. sk_fullsock(sk) &&
  3102. rcu_access_pointer(sk->sk_dst_cache))
  3103. sk_tx_queue_set(sk, new_index);
  3104. queue_index = new_index;
  3105. }
  3106. return queue_index;
  3107. }
  3108. static u16 __netdev_pick_tx(struct net_device *dev,
  3109. struct sk_buff *skb)
  3110. {
  3111. return ___netdev_pick_tx(dev, skb, NULL);
  3112. }
  3113. struct netdev_queue *netdev_pick_tx(struct net_device *dev,
  3114. struct sk_buff *skb,
  3115. struct net_device *sb_dev)
  3116. {
  3117. int queue_index = 0;
  3118. #ifdef CONFIG_XPS
  3119. u32 sender_cpu = skb->sender_cpu - 1;
  3120. if (sender_cpu >= (u32)NR_CPUS)
  3121. skb->sender_cpu = raw_smp_processor_id() + 1;
  3122. #endif
  3123. if (dev->real_num_tx_queues != 1) {
  3124. const struct net_device_ops *ops = dev->netdev_ops;
  3125. if (ops->ndo_select_queue)
  3126. queue_index = ops->ndo_select_queue(dev, skb, sb_dev,
  3127. __netdev_pick_tx);
  3128. else
  3129. queue_index = ___netdev_pick_tx(dev, skb, sb_dev);
  3130. queue_index = netdev_cap_txqueue(dev, queue_index);
  3131. }
  3132. skb_set_queue_mapping(skb, queue_index);
  3133. return netdev_get_tx_queue(dev, queue_index);
  3134. }
  3135. /**
  3136. * __dev_queue_xmit - transmit a buffer
  3137. * @skb: buffer to transmit
  3138. * @sb_dev: suboordinate device used for L2 forwarding offload
  3139. *
  3140. * Queue a buffer for transmission to a network device. The caller must
  3141. * have set the device and priority and built the buffer before calling
  3142. * this function. The function can be called from an interrupt.
  3143. *
  3144. * A negative errno code is returned on a failure. A success does not
  3145. * guarantee the frame will be transmitted as it may be dropped due
  3146. * to congestion or traffic shaping.
  3147. *
  3148. * -----------------------------------------------------------------------------------
  3149. * I notice this method can also return errors from the queue disciplines,
  3150. * including NET_XMIT_DROP, which is a positive value. So, errors can also
  3151. * be positive.
  3152. *
  3153. * Regardless of the return value, the skb is consumed, so it is currently
  3154. * difficult to retry a send to this method. (You can bump the ref count
  3155. * before sending to hold a reference for retry if you are careful.)
  3156. *
  3157. * When calling this method, interrupts MUST be enabled. This is because
  3158. * the BH enable code must have IRQs enabled so that it will not deadlock.
  3159. * --BLG
  3160. */
  3161. static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
  3162. {
  3163. struct net_device *dev = skb->dev;
  3164. struct netdev_queue *txq;
  3165. struct Qdisc *q;
  3166. int rc = -ENOMEM;
  3167. bool again = false;
  3168. skb_reset_mac_header(skb);
  3169. if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
  3170. __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
  3171. /* Disable soft irqs for various locks below. Also
  3172. * stops preemption for RCU.
  3173. */
  3174. rcu_read_lock_bh();
  3175. skb_update_prio(skb);
  3176. qdisc_pkt_len_init(skb);
  3177. #ifdef CONFIG_NET_CLS_ACT
  3178. skb->tc_at_ingress = 0;
  3179. # ifdef CONFIG_NET_EGRESS
  3180. if (static_branch_unlikely(&egress_needed_key)) {
  3181. skb = sch_handle_egress(skb, &rc, dev);
  3182. if (!skb)
  3183. goto out;
  3184. }
  3185. # endif
  3186. #endif
  3187. /* If device/qdisc don't need skb->dst, release it right now while
  3188. * its hot in this cpu cache.
  3189. */
  3190. if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
  3191. skb_dst_drop(skb);
  3192. else
  3193. skb_dst_force(skb);
  3194. txq = netdev_pick_tx(dev, skb, sb_dev);
  3195. q = rcu_dereference_bh(txq->qdisc);
  3196. trace_net_dev_queue(skb);
  3197. if (q->enqueue) {
  3198. rc = __dev_xmit_skb(skb, q, dev, txq);
  3199. goto out;
  3200. }
  3201. /* The device has no queue. Common case for software devices:
  3202. * loopback, all the sorts of tunnels...
  3203. * Really, it is unlikely that netif_tx_lock protection is necessary
  3204. * here. (f.e. loopback and IP tunnels are clean ignoring statistics
  3205. * counters.)
  3206. * However, it is possible, that they rely on protection
  3207. * made by us here.
  3208. * Check this and shot the lock. It is not prone from deadlocks.
  3209. *Either shot noqueue qdisc, it is even simpler 8)
  3210. */
  3211. if (dev->flags & IFF_UP) {
  3212. int cpu = smp_processor_id(); /* ok because BHs are off */
  3213. if (txq->xmit_lock_owner != cpu) {
  3214. if (unlikely(__this_cpu_read(xmit_recursion) >
  3215. XMIT_RECURSION_LIMIT))
  3216. goto recursion_alert;
  3217. skb = validate_xmit_skb(skb, dev, &again);
  3218. if (!skb)
  3219. goto out;
  3220. HARD_TX_LOCK(dev, txq, cpu);
  3221. if (!netif_xmit_stopped(txq)) {
  3222. __this_cpu_inc(xmit_recursion);
  3223. skb = dev_hard_start_xmit(skb, dev, txq, &rc);
  3224. __this_cpu_dec(xmit_recursion);
  3225. if (dev_xmit_complete(rc)) {
  3226. HARD_TX_UNLOCK(dev, txq);
  3227. goto out;
  3228. }
  3229. }
  3230. HARD_TX_UNLOCK(dev, txq);
  3231. net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
  3232. dev->name);
  3233. } else {
  3234. /* Recursion is detected! It is possible,
  3235. * unfortunately
  3236. */
  3237. recursion_alert:
  3238. net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
  3239. dev->name);
  3240. }
  3241. }
  3242. rc = -ENETDOWN;
  3243. rcu_read_unlock_bh();
  3244. atomic_long_inc(&dev->tx_dropped);
  3245. kfree_skb_list(skb);
  3246. return rc;
  3247. out:
  3248. rcu_read_unlock_bh();
  3249. return rc;
  3250. }
  3251. int dev_queue_xmit(struct sk_buff *skb)
  3252. {
  3253. return __dev_queue_xmit(skb, NULL);
  3254. }
  3255. EXPORT_SYMBOL(dev_queue_xmit);
  3256. int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev)
  3257. {
  3258. return __dev_queue_xmit(skb, sb_dev);
  3259. }
  3260. EXPORT_SYMBOL(dev_queue_xmit_accel);
  3261. int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
  3262. {
  3263. struct net_device *dev = skb->dev;
  3264. struct sk_buff *orig_skb = skb;
  3265. struct netdev_queue *txq;
  3266. int ret = NETDEV_TX_BUSY;
  3267. bool again = false;
  3268. if (unlikely(!netif_running(dev) ||
  3269. !netif_carrier_ok(dev)))
  3270. goto drop;
  3271. skb = validate_xmit_skb_list(skb, dev, &again);
  3272. if (skb != orig_skb)
  3273. goto drop;
  3274. skb_set_queue_mapping(skb, queue_id);
  3275. txq = skb_get_tx_queue(dev, skb);
  3276. local_bh_disable();
  3277. HARD_TX_LOCK(dev, txq, smp_processor_id());
  3278. if (!netif_xmit_frozen_or_drv_stopped(txq))
  3279. ret = netdev_start_xmit(skb, dev, txq, false);
  3280. HARD_TX_UNLOCK(dev, txq);
  3281. local_bh_enable();
  3282. if (!dev_xmit_complete(ret))
  3283. kfree_skb(skb);
  3284. return ret;
  3285. drop:
  3286. atomic_long_inc(&dev->tx_dropped);
  3287. kfree_skb_list(skb);
  3288. return NET_XMIT_DROP;
  3289. }
  3290. EXPORT_SYMBOL(dev_direct_xmit);
  3291. /*************************************************************************
  3292. * Receiver routines
  3293. *************************************************************************/
  3294. int netdev_max_backlog __read_mostly = 1000;
  3295. EXPORT_SYMBOL(netdev_max_backlog);
  3296. int netdev_tstamp_prequeue __read_mostly = 1;
  3297. int netdev_budget __read_mostly = 300;
  3298. unsigned int __read_mostly netdev_budget_usecs = 2000;
  3299. int weight_p __read_mostly = 64; /* old backlog weight */
  3300. int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */
  3301. int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */
  3302. int dev_rx_weight __read_mostly = 64;
  3303. int dev_tx_weight __read_mostly = 64;
  3304. /* Called with irq disabled */
  3305. static inline void ____napi_schedule(struct softnet_data *sd,
  3306. struct napi_struct *napi)
  3307. {
  3308. list_add_tail(&napi->poll_list, &sd->poll_list);
  3309. __raise_softirq_irqoff(NET_RX_SOFTIRQ);
  3310. }
  3311. #ifdef CONFIG_RPS
  3312. /* One global table that all flow-based protocols share. */
  3313. struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
  3314. EXPORT_SYMBOL(rps_sock_flow_table);
  3315. u32 rps_cpu_mask __read_mostly;
  3316. EXPORT_SYMBOL(rps_cpu_mask);
  3317. struct static_key rps_needed __read_mostly;
  3318. EXPORT_SYMBOL(rps_needed);
  3319. struct static_key rfs_needed __read_mostly;
  3320. EXPORT_SYMBOL(rfs_needed);
  3321. static struct rps_dev_flow *
  3322. set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
  3323. struct rps_dev_flow *rflow, u16 next_cpu)
  3324. {
  3325. if (next_cpu < nr_cpu_ids) {
  3326. #ifdef CONFIG_RFS_ACCEL
  3327. struct netdev_rx_queue *rxqueue;
  3328. struct rps_dev_flow_table *flow_table;
  3329. struct rps_dev_flow *old_rflow;
  3330. u32 flow_id;
  3331. u16 rxq_index;
  3332. int rc;
  3333. /* Should we steer this flow to a different hardware queue? */
  3334. if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
  3335. !(dev->features & NETIF_F_NTUPLE))
  3336. goto out;
  3337. rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
  3338. if (rxq_index == skb_get_rx_queue(skb))
  3339. goto out;
  3340. rxqueue = dev->_rx + rxq_index;
  3341. flow_table = rcu_dereference(rxqueue->rps_flow_table);
  3342. if (!flow_table)
  3343. goto out;
  3344. flow_id = skb_get_hash(skb) & flow_table->mask;
  3345. rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
  3346. rxq_index, flow_id);
  3347. if (rc < 0)
  3348. goto out;
  3349. old_rflow = rflow;
  3350. rflow = &flow_table->flows[flow_id];
  3351. rflow->filter = rc;
  3352. if (old_rflow->filter == rflow->filter)
  3353. old_rflow->filter = RPS_NO_FILTER;
  3354. out:
  3355. #endif
  3356. rflow->last_qtail =
  3357. per_cpu(softnet_data, next_cpu).input_queue_head;
  3358. }
  3359. rflow->cpu = next_cpu;
  3360. return rflow;
  3361. }
  3362. /*
  3363. * get_rps_cpu is called from netif_receive_skb and returns the target
  3364. * CPU from the RPS map of the receiving queue for a given skb.
  3365. * rcu_read_lock must be held on entry.
  3366. */
  3367. static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
  3368. struct rps_dev_flow **rflowp)
  3369. {
  3370. const struct rps_sock_flow_table *sock_flow_table;
  3371. struct netdev_rx_queue *rxqueue = dev->_rx;
  3372. struct rps_dev_flow_table *flow_table;
  3373. struct rps_map *map;
  3374. int cpu = -1;
  3375. u32 tcpu;
  3376. u32 hash;
  3377. if (skb_rx_queue_recorded(skb)) {
  3378. u16 index = skb_get_rx_queue(skb);
  3379. if (unlikely(index >= dev->real_num_rx_queues)) {
  3380. WARN_ONCE(dev->real_num_rx_queues > 1,
  3381. "%s received packet on queue %u, but number "
  3382. "of RX queues is %u\n",
  3383. dev->name, index, dev->real_num_rx_queues);
  3384. goto done;
  3385. }
  3386. rxqueue += index;
  3387. }
  3388. /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
  3389. flow_table = rcu_dereference(rxqueue->rps_flow_table);
  3390. map = rcu_dereference(rxqueue->rps_map);
  3391. if (!flow_table && !map)
  3392. goto done;
  3393. skb_reset_network_header(skb);
  3394. hash = skb_get_hash(skb);
  3395. if (!hash)
  3396. goto done;
  3397. sock_flow_table = rcu_dereference(rps_sock_flow_table);
  3398. if (flow_table && sock_flow_table) {
  3399. struct rps_dev_flow *rflow;
  3400. u32 next_cpu;
  3401. u32 ident;
  3402. /* First check into global flow table if there is a match */
  3403. ident = sock_flow_table->ents[hash & sock_flow_table->mask];
  3404. if ((ident ^ hash) & ~rps_cpu_mask)
  3405. goto try_rps;
  3406. next_cpu = ident & rps_cpu_mask;
  3407. /* OK, now we know there is a match,
  3408. * we can look at the local (per receive queue) flow table
  3409. */
  3410. rflow = &flow_table->flows[hash & flow_table->mask];
  3411. tcpu = rflow->cpu;
  3412. /*
  3413. * If the desired CPU (where last recvmsg was done) is
  3414. * different from current CPU (one in the rx-queue flow
  3415. * table entry), switch if one of the following holds:
  3416. * - Current CPU is unset (>= nr_cpu_ids).
  3417. * - Current CPU is offline.
  3418. * - The current CPU's queue tail has advanced beyond the
  3419. * last packet that was enqueued using this table entry.
  3420. * This guarantees that all previous packets for the flow
  3421. * have been dequeued, thus preserving in order delivery.
  3422. */
  3423. if (unlikely(tcpu != next_cpu) &&
  3424. (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
  3425. ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
  3426. rflow->last_qtail)) >= 0)) {
  3427. tcpu = next_cpu;
  3428. rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
  3429. }
  3430. if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
  3431. *rflowp = rflow;
  3432. cpu = tcpu;
  3433. goto done;
  3434. }
  3435. }
  3436. try_rps:
  3437. if (map) {
  3438. tcpu = map->cpus[reciprocal_scale(hash, map->len)];
  3439. if (cpu_online(tcpu)) {
  3440. cpu = tcpu;
  3441. goto done;
  3442. }
  3443. }
  3444. done:
  3445. return cpu;
  3446. }
  3447. #ifdef CONFIG_RFS_ACCEL
  3448. /**
  3449. * rps_may_expire_flow - check whether an RFS hardware filter may be removed
  3450. * @dev: Device on which the filter was set
  3451. * @rxq_index: RX queue index
  3452. * @flow_id: Flow ID passed to ndo_rx_flow_steer()
  3453. * @filter_id: Filter ID returned by ndo_rx_flow_steer()
  3454. *
  3455. * Drivers that implement ndo_rx_flow_steer() should periodically call
  3456. * this function for each installed filter and remove the filters for
  3457. * which it returns %true.
  3458. */
  3459. bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
  3460. u32 flow_id, u16 filter_id)
  3461. {
  3462. struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
  3463. struct rps_dev_flow_table *flow_table;
  3464. struct rps_dev_flow *rflow;
  3465. bool expire = true;
  3466. unsigned int cpu;
  3467. rcu_read_lock();
  3468. flow_table = rcu_dereference(rxqueue->rps_flow_table);
  3469. if (flow_table && flow_id <= flow_table->mask) {
  3470. rflow = &flow_table->flows[flow_id];
  3471. cpu = READ_ONCE(rflow->cpu);
  3472. if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
  3473. ((int)(per_cpu(softnet_data, cpu).input_queue_head -
  3474. rflow->last_qtail) <
  3475. (int)(10 * flow_table->mask)))
  3476. expire = false;
  3477. }
  3478. rcu_read_unlock();
  3479. return expire;
  3480. }
  3481. EXPORT_SYMBOL(rps_may_expire_flow);
  3482. #endif /* CONFIG_RFS_ACCEL */
  3483. /* Called from hardirq (IPI) context */
  3484. static void rps_trigger_softirq(void *data)
  3485. {
  3486. struct softnet_data *sd = data;
  3487. ____napi_schedule(sd, &sd->backlog);
  3488. sd->received_rps++;
  3489. }
  3490. #endif /* CONFIG_RPS */
  3491. /*
  3492. * Check if this softnet_data structure is another cpu one
  3493. * If yes, queue it to our IPI list and return 1
  3494. * If no, return 0
  3495. */
  3496. static int rps_ipi_queued(struct softnet_data *sd)
  3497. {
  3498. #ifdef CONFIG_RPS
  3499. struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
  3500. if (sd != mysd) {
  3501. sd->rps_ipi_next = mysd->rps_ipi_list;
  3502. mysd->rps_ipi_list = sd;
  3503. __raise_softirq_irqoff(NET_RX_SOFTIRQ);
  3504. return 1;
  3505. }
  3506. #endif /* CONFIG_RPS */
  3507. return 0;
  3508. }
  3509. #ifdef CONFIG_NET_FLOW_LIMIT
  3510. int netdev_flow_limit_table_len __read_mostly = (1 << 12);
  3511. #endif
  3512. static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
  3513. {
  3514. #ifdef CONFIG_NET_FLOW_LIMIT
  3515. struct sd_flow_limit *fl;
  3516. struct softnet_data *sd;
  3517. unsigned int old_flow, new_flow;
  3518. if (qlen < (netdev_max_backlog >> 1))
  3519. return false;
  3520. sd = this_cpu_ptr(&softnet_data);
  3521. rcu_read_lock();
  3522. fl = rcu_dereference(sd->flow_limit);
  3523. if (fl) {
  3524. new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
  3525. old_flow = fl->history[fl->history_head];
  3526. fl->history[fl->history_head] = new_flow;
  3527. fl->history_head++;
  3528. fl->history_head &= FLOW_LIMIT_HISTORY - 1;
  3529. if (likely(fl->buckets[old_flow]))
  3530. fl->buckets[old_flow]--;
  3531. if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
  3532. fl->count++;
  3533. rcu_read_unlock();
  3534. return true;
  3535. }
  3536. }
  3537. rcu_read_unlock();
  3538. #endif
  3539. return false;
  3540. }
  3541. /*
  3542. * enqueue_to_backlog is called to queue an skb to a per CPU backlog
  3543. * queue (may be a remote CPU queue).
  3544. */
  3545. static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
  3546. unsigned int *qtail)
  3547. {
  3548. struct softnet_data *sd;
  3549. unsigned long flags;
  3550. unsigned int qlen;
  3551. sd = &per_cpu(softnet_data, cpu);
  3552. local_irq_save(flags);
  3553. rps_lock(sd);
  3554. if (!netif_running(skb->dev))
  3555. goto drop;
  3556. qlen = skb_queue_len(&sd->input_pkt_queue);
  3557. if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
  3558. if (qlen) {
  3559. enqueue:
  3560. __skb_queue_tail(&sd->input_pkt_queue, skb);
  3561. input_queue_tail_incr_save(sd, qtail);
  3562. rps_unlock(sd);
  3563. local_irq_restore(flags);
  3564. return NET_RX_SUCCESS;
  3565. }
  3566. /* Schedule NAPI for backlog device
  3567. * We can use non atomic operation since we own the queue lock
  3568. */
  3569. if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
  3570. if (!rps_ipi_queued(sd))
  3571. ____napi_schedule(sd, &sd->backlog);
  3572. }
  3573. goto enqueue;
  3574. }
  3575. drop:
  3576. sd->dropped++;
  3577. rps_unlock(sd);
  3578. local_irq_restore(flags);
  3579. atomic_long_inc(&skb->dev->rx_dropped);
  3580. kfree_skb(skb);
  3581. return NET_RX_DROP;
  3582. }
  3583. static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb)
  3584. {
  3585. struct net_device *dev = skb->dev;
  3586. struct netdev_rx_queue *rxqueue;
  3587. rxqueue = dev->_rx;
  3588. if (skb_rx_queue_recorded(skb)) {
  3589. u16 index = skb_get_rx_queue(skb);
  3590. if (unlikely(index >= dev->real_num_rx_queues)) {
  3591. WARN_ONCE(dev->real_num_rx_queues > 1,
  3592. "%s received packet on queue %u, but number "
  3593. "of RX queues is %u\n",
  3594. dev->name, index, dev->real_num_rx_queues);
  3595. return rxqueue; /* Return first rxqueue */
  3596. }
  3597. rxqueue += index;
  3598. }
  3599. return rxqueue;
  3600. }
  3601. static u32 netif_receive_generic_xdp(struct sk_buff *skb,
  3602. struct xdp_buff *xdp,
  3603. struct bpf_prog *xdp_prog)
  3604. {
  3605. struct netdev_rx_queue *rxqueue;
  3606. void *orig_data, *orig_data_end;
  3607. u32 metalen, act = XDP_DROP;
  3608. int hlen, off;
  3609. u32 mac_len;
  3610. /* Reinjected packets coming from act_mirred or similar should
  3611. * not get XDP generic processing.
  3612. */
  3613. if (skb_cloned(skb))
  3614. return XDP_PASS;
  3615. /* XDP packets must be linear and must have sufficient headroom
  3616. * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
  3617. * native XDP provides, thus we need to do it here as well.
  3618. */
  3619. if (skb_is_nonlinear(skb) ||
  3620. skb_headroom(skb) < XDP_PACKET_HEADROOM) {
  3621. int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
  3622. int troom = skb->tail + skb->data_len - skb->end;
  3623. /* In case we have to go down the path and also linearize,
  3624. * then lets do the pskb_expand_head() work just once here.
  3625. */
  3626. if (pskb_expand_head(skb,
  3627. hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
  3628. troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
  3629. goto do_drop;
  3630. if (skb_linearize(skb))
  3631. goto do_drop;
  3632. }
  3633. /* The XDP program wants to see the packet starting at the MAC
  3634. * header.
  3635. */
  3636. mac_len = skb->data - skb_mac_header(skb);
  3637. hlen = skb_headlen(skb) + mac_len;
  3638. xdp->data = skb->data - mac_len;
  3639. xdp->data_meta = xdp->data;
  3640. xdp->data_end = xdp->data + hlen;
  3641. xdp->data_hard_start = skb->data - skb_headroom(skb);
  3642. orig_data_end = xdp->data_end;
  3643. orig_data = xdp->data;
  3644. rxqueue = netif_get_rxqueue(skb);
  3645. xdp->rxq = &rxqueue->xdp_rxq;
  3646. act = bpf_prog_run_xdp(xdp_prog, xdp);
  3647. off = xdp->data - orig_data;
  3648. if (off > 0)
  3649. __skb_pull(skb, off);
  3650. else if (off < 0)
  3651. __skb_push(skb, -off);
  3652. skb->mac_header += off;
  3653. /* check if bpf_xdp_adjust_tail was used. it can only "shrink"
  3654. * pckt.
  3655. */
  3656. off = orig_data_end - xdp->data_end;
  3657. if (off != 0) {
  3658. skb_set_tail_pointer(skb, xdp->data_end - xdp->data);
  3659. skb->len -= off;
  3660. }
  3661. switch (act) {
  3662. case XDP_REDIRECT:
  3663. case XDP_TX:
  3664. __skb_push(skb, mac_len);
  3665. break;
  3666. case XDP_PASS:
  3667. metalen = xdp->data - xdp->data_meta;
  3668. if (metalen)
  3669. skb_metadata_set(skb, metalen);
  3670. break;
  3671. default:
  3672. bpf_warn_invalid_xdp_action(act);
  3673. /* fall through */
  3674. case XDP_ABORTED:
  3675. trace_xdp_exception(skb->dev, xdp_prog, act);
  3676. /* fall through */
  3677. case XDP_DROP:
  3678. do_drop:
  3679. kfree_skb(skb);
  3680. break;
  3681. }
  3682. return act;
  3683. }
  3684. /* When doing generic XDP we have to bypass the qdisc layer and the
  3685. * network taps in order to match in-driver-XDP behavior.
  3686. */
  3687. void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
  3688. {
  3689. struct net_device *dev = skb->dev;
  3690. struct netdev_queue *txq;
  3691. bool free_skb = true;
  3692. int cpu, rc;
  3693. txq = netdev_pick_tx(dev, skb, NULL);
  3694. cpu = smp_processor_id();
  3695. HARD_TX_LOCK(dev, txq, cpu);
  3696. if (!netif_xmit_stopped(txq)) {
  3697. rc = netdev_start_xmit(skb, dev, txq, 0);
  3698. if (dev_xmit_complete(rc))
  3699. free_skb = false;
  3700. }
  3701. HARD_TX_UNLOCK(dev, txq);
  3702. if (free_skb) {
  3703. trace_xdp_exception(dev, xdp_prog, XDP_TX);
  3704. kfree_skb(skb);
  3705. }
  3706. }
  3707. EXPORT_SYMBOL_GPL(generic_xdp_tx);
  3708. static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key);
  3709. int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
  3710. {
  3711. if (xdp_prog) {
  3712. struct xdp_buff xdp;
  3713. u32 act;
  3714. int err;
  3715. act = netif_receive_generic_xdp(skb, &xdp, xdp_prog);
  3716. if (act != XDP_PASS) {
  3717. switch (act) {
  3718. case XDP_REDIRECT:
  3719. err = xdp_do_generic_redirect(skb->dev, skb,
  3720. &xdp, xdp_prog);
  3721. if (err)
  3722. goto out_redir;
  3723. break;
  3724. case XDP_TX:
  3725. generic_xdp_tx(skb, xdp_prog);
  3726. break;
  3727. }
  3728. return XDP_DROP;
  3729. }
  3730. }
  3731. return XDP_PASS;
  3732. out_redir:
  3733. kfree_skb(skb);
  3734. return XDP_DROP;
  3735. }
  3736. EXPORT_SYMBOL_GPL(do_xdp_generic);
  3737. static int netif_rx_internal(struct sk_buff *skb)
  3738. {
  3739. int ret;
  3740. net_timestamp_check(netdev_tstamp_prequeue, skb);
  3741. trace_netif_rx(skb);
  3742. if (static_branch_unlikely(&generic_xdp_needed_key)) {
  3743. int ret;
  3744. preempt_disable();
  3745. rcu_read_lock();
  3746. ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
  3747. rcu_read_unlock();
  3748. preempt_enable();
  3749. /* Consider XDP consuming the packet a success from
  3750. * the netdev point of view we do not want to count
  3751. * this as an error.
  3752. */
  3753. if (ret != XDP_PASS)
  3754. return NET_RX_SUCCESS;
  3755. }
  3756. #ifdef CONFIG_RPS
  3757. if (static_key_false(&rps_needed)) {
  3758. struct rps_dev_flow voidflow, *rflow = &voidflow;
  3759. int cpu;
  3760. preempt_disable();
  3761. rcu_read_lock();
  3762. cpu = get_rps_cpu(skb->dev, skb, &rflow);
  3763. if (cpu < 0)
  3764. cpu = smp_processor_id();
  3765. ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
  3766. rcu_read_unlock();
  3767. preempt_enable();
  3768. } else
  3769. #endif
  3770. {
  3771. unsigned int qtail;
  3772. ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
  3773. put_cpu();
  3774. }
  3775. return ret;
  3776. }
  3777. /**
  3778. * netif_rx - post buffer to the network code
  3779. * @skb: buffer to post
  3780. *
  3781. * This function receives a packet from a device driver and queues it for
  3782. * the upper (protocol) levels to process. It always succeeds. The buffer
  3783. * may be dropped during processing for congestion control or by the
  3784. * protocol layers.
  3785. *
  3786. * return values:
  3787. * NET_RX_SUCCESS (no congestion)
  3788. * NET_RX_DROP (packet was dropped)
  3789. *
  3790. */
  3791. int netif_rx(struct sk_buff *skb)
  3792. {
  3793. trace_netif_rx_entry(skb);
  3794. return netif_rx_internal(skb);
  3795. }
  3796. EXPORT_SYMBOL(netif_rx);
  3797. int netif_rx_ni(struct sk_buff *skb)
  3798. {
  3799. int err;
  3800. trace_netif_rx_ni_entry(skb);
  3801. preempt_disable();
  3802. err = netif_rx_internal(skb);
  3803. if (local_softirq_pending())
  3804. do_softirq();
  3805. preempt_enable();
  3806. return err;
  3807. }
  3808. EXPORT_SYMBOL(netif_rx_ni);
  3809. static __latent_entropy void net_tx_action(struct softirq_action *h)
  3810. {
  3811. struct softnet_data *sd = this_cpu_ptr(&softnet_data);
  3812. if (sd->completion_queue) {
  3813. struct sk_buff *clist;
  3814. local_irq_disable();
  3815. clist = sd->completion_queue;
  3816. sd->completion_queue = NULL;
  3817. local_irq_enable();
  3818. while (clist) {
  3819. struct sk_buff *skb = clist;
  3820. clist = clist->next;
  3821. WARN_ON(refcount_read(&skb->users));
  3822. if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
  3823. trace_consume_skb(skb);
  3824. else
  3825. trace_kfree_skb(skb, net_tx_action);
  3826. if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
  3827. __kfree_skb(skb);
  3828. else
  3829. __kfree_skb_defer(skb);
  3830. }
  3831. __kfree_skb_flush();
  3832. }
  3833. if (sd->output_queue) {
  3834. struct Qdisc *head;
  3835. local_irq_disable();
  3836. head = sd->output_queue;
  3837. sd->output_queue = NULL;
  3838. sd->output_queue_tailp = &sd->output_queue;
  3839. local_irq_enable();
  3840. while (head) {
  3841. struct Qdisc *q = head;
  3842. spinlock_t *root_lock = NULL;
  3843. head = head->next_sched;
  3844. if (!(q->flags & TCQ_F_NOLOCK)) {
  3845. root_lock = qdisc_lock(q);
  3846. spin_lock(root_lock);
  3847. }
  3848. /* We need to make sure head->next_sched is read
  3849. * before clearing __QDISC_STATE_SCHED
  3850. */
  3851. smp_mb__before_atomic();
  3852. clear_bit(__QDISC_STATE_SCHED, &q->state);
  3853. qdisc_run(q);
  3854. if (root_lock)
  3855. spin_unlock(root_lock);
  3856. }
  3857. }
  3858. xfrm_dev_backlog(sd);
  3859. }
  3860. #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
  3861. /* This hook is defined here for ATM LANE */
  3862. int (*br_fdb_test_addr_hook)(struct net_device *dev,
  3863. unsigned char *addr) __read_mostly;
  3864. EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
  3865. #endif
  3866. static inline struct sk_buff *
  3867. sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
  3868. struct net_device *orig_dev)
  3869. {
  3870. #ifdef CONFIG_NET_CLS_ACT
  3871. struct mini_Qdisc *miniq = rcu_dereference_bh(skb->dev->miniq_ingress);
  3872. struct tcf_result cl_res;
  3873. /* If there's at least one ingress present somewhere (so
  3874. * we get here via enabled static key), remaining devices
  3875. * that are not configured with an ingress qdisc will bail
  3876. * out here.
  3877. */
  3878. if (!miniq)
  3879. return skb;
  3880. if (*pt_prev) {
  3881. *ret = deliver_skb(skb, *pt_prev, orig_dev);
  3882. *pt_prev = NULL;
  3883. }
  3884. qdisc_skb_cb(skb)->pkt_len = skb->len;
  3885. skb->tc_at_ingress = 1;
  3886. mini_qdisc_bstats_cpu_update(miniq, skb);
  3887. switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) {
  3888. case TC_ACT_OK:
  3889. case TC_ACT_RECLASSIFY:
  3890. skb->tc_index = TC_H_MIN(cl_res.classid);
  3891. break;
  3892. case TC_ACT_SHOT:
  3893. mini_qdisc_qstats_cpu_drop(miniq);
  3894. kfree_skb(skb);
  3895. return NULL;
  3896. case TC_ACT_STOLEN:
  3897. case TC_ACT_QUEUED:
  3898. case TC_ACT_TRAP:
  3899. consume_skb(skb);
  3900. return NULL;
  3901. case TC_ACT_REDIRECT:
  3902. /* skb_mac_header check was done by cls/act_bpf, so
  3903. * we can safely push the L2 header back before
  3904. * redirecting to another netdev
  3905. */
  3906. __skb_push(skb, skb->mac_len);
  3907. skb_do_redirect(skb);
  3908. return NULL;
  3909. default:
  3910. break;
  3911. }
  3912. #endif /* CONFIG_NET_CLS_ACT */
  3913. return skb;
  3914. }
  3915. /**
  3916. * netdev_is_rx_handler_busy - check if receive handler is registered
  3917. * @dev: device to check
  3918. *
  3919. * Check if a receive handler is already registered for a given device.
  3920. * Return true if there one.
  3921. *
  3922. * The caller must hold the rtnl_mutex.
  3923. */
  3924. bool netdev_is_rx_handler_busy(struct net_device *dev)
  3925. {
  3926. ASSERT_RTNL();
  3927. return dev && rtnl_dereference(dev->rx_handler);
  3928. }
  3929. EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
  3930. /**
  3931. * netdev_rx_handler_register - register receive handler
  3932. * @dev: device to register a handler for
  3933. * @rx_handler: receive handler to register
  3934. * @rx_handler_data: data pointer that is used by rx handler
  3935. *
  3936. * Register a receive handler for a device. This handler will then be
  3937. * called from __netif_receive_skb. A negative errno code is returned
  3938. * on a failure.
  3939. *
  3940. * The caller must hold the rtnl_mutex.
  3941. *
  3942. * For a general description of rx_handler, see enum rx_handler_result.
  3943. */
  3944. int netdev_rx_handler_register(struct net_device *dev,
  3945. rx_handler_func_t *rx_handler,
  3946. void *rx_handler_data)
  3947. {
  3948. if (netdev_is_rx_handler_busy(dev))
  3949. return -EBUSY;
  3950. if (dev->priv_flags & IFF_NO_RX_HANDLER)
  3951. return -EINVAL;
  3952. /* Note: rx_handler_data must be set before rx_handler */
  3953. rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
  3954. rcu_assign_pointer(dev->rx_handler, rx_handler);
  3955. return 0;
  3956. }
  3957. EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
  3958. /**
  3959. * netdev_rx_handler_unregister - unregister receive handler
  3960. * @dev: device to unregister a handler from
  3961. *
  3962. * Unregister a receive handler from a device.
  3963. *
  3964. * The caller must hold the rtnl_mutex.
  3965. */
  3966. void netdev_rx_handler_unregister(struct net_device *dev)
  3967. {
  3968. ASSERT_RTNL();
  3969. RCU_INIT_POINTER(dev->rx_handler, NULL);
  3970. /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
  3971. * section has a guarantee to see a non NULL rx_handler_data
  3972. * as well.
  3973. */
  3974. synchronize_net();
  3975. RCU_INIT_POINTER(dev->rx_handler_data, NULL);
  3976. }
  3977. EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
  3978. /*
  3979. * Limit the use of PFMEMALLOC reserves to those protocols that implement
  3980. * the special handling of PFMEMALLOC skbs.
  3981. */
  3982. static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
  3983. {
  3984. switch (skb->protocol) {
  3985. case htons(ETH_P_ARP):
  3986. case htons(ETH_P_IP):
  3987. case htons(ETH_P_IPV6):
  3988. case htons(ETH_P_8021Q):
  3989. case htons(ETH_P_8021AD):
  3990. return true;
  3991. default:
  3992. return false;
  3993. }
  3994. }
  3995. static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
  3996. int *ret, struct net_device *orig_dev)
  3997. {
  3998. #ifdef CONFIG_NETFILTER_INGRESS
  3999. if (nf_hook_ingress_active(skb)) {
  4000. int ingress_retval;
  4001. if (*pt_prev) {
  4002. *ret = deliver_skb(skb, *pt_prev, orig_dev);
  4003. *pt_prev = NULL;
  4004. }
  4005. rcu_read_lock();
  4006. ingress_retval = nf_hook_ingress(skb);
  4007. rcu_read_unlock();
  4008. return ingress_retval;
  4009. }
  4010. #endif /* CONFIG_NETFILTER_INGRESS */
  4011. return 0;
  4012. }
  4013. static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc,
  4014. struct packet_type **ppt_prev)
  4015. {
  4016. struct packet_type *ptype, *pt_prev;
  4017. rx_handler_func_t *rx_handler;
  4018. struct net_device *orig_dev;
  4019. bool deliver_exact = false;
  4020. int ret = NET_RX_DROP;
  4021. __be16 type;
  4022. net_timestamp_check(!netdev_tstamp_prequeue, skb);
  4023. trace_netif_receive_skb(skb);
  4024. orig_dev = skb->dev;
  4025. skb_reset_network_header(skb);
  4026. if (!skb_transport_header_was_set(skb))
  4027. skb_reset_transport_header(skb);
  4028. skb_reset_mac_len(skb);
  4029. pt_prev = NULL;
  4030. another_round:
  4031. skb->skb_iif = skb->dev->ifindex;
  4032. __this_cpu_inc(softnet_data.processed);
  4033. if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
  4034. skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
  4035. skb = skb_vlan_untag(skb);
  4036. if (unlikely(!skb))
  4037. goto out;
  4038. }
  4039. if (skb_skip_tc_classify(skb))
  4040. goto skip_classify;
  4041. if (pfmemalloc)
  4042. goto skip_taps;
  4043. list_for_each_entry_rcu(ptype, &ptype_all, list) {
  4044. if (pt_prev)
  4045. ret = deliver_skb(skb, pt_prev, orig_dev);
  4046. pt_prev = ptype;
  4047. }
  4048. list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
  4049. if (pt_prev)
  4050. ret = deliver_skb(skb, pt_prev, orig_dev);
  4051. pt_prev = ptype;
  4052. }
  4053. skip_taps:
  4054. #ifdef CONFIG_NET_INGRESS
  4055. if (static_branch_unlikely(&ingress_needed_key)) {
  4056. skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev);
  4057. if (!skb)
  4058. goto out;
  4059. if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
  4060. goto out;
  4061. }
  4062. #endif
  4063. skb_reset_tc(skb);
  4064. skip_classify:
  4065. if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
  4066. goto drop;
  4067. if (skb_vlan_tag_present(skb)) {
  4068. if (pt_prev) {
  4069. ret = deliver_skb(skb, pt_prev, orig_dev);
  4070. pt_prev = NULL;
  4071. }
  4072. if (vlan_do_receive(&skb))
  4073. goto another_round;
  4074. else if (unlikely(!skb))
  4075. goto out;
  4076. }
  4077. rx_handler = rcu_dereference(skb->dev->rx_handler);
  4078. if (rx_handler) {
  4079. if (pt_prev) {
  4080. ret = deliver_skb(skb, pt_prev, orig_dev);
  4081. pt_prev = NULL;
  4082. }
  4083. switch (rx_handler(&skb)) {
  4084. case RX_HANDLER_CONSUMED:
  4085. ret = NET_RX_SUCCESS;
  4086. goto out;
  4087. case RX_HANDLER_ANOTHER:
  4088. goto another_round;
  4089. case RX_HANDLER_EXACT:
  4090. deliver_exact = true;
  4091. case RX_HANDLER_PASS:
  4092. break;
  4093. default:
  4094. BUG();
  4095. }
  4096. }
  4097. if (unlikely(skb_vlan_tag_present(skb))) {
  4098. if (skb_vlan_tag_get_id(skb))
  4099. skb->pkt_type = PACKET_OTHERHOST;
  4100. /* Note: we might in the future use prio bits
  4101. * and set skb->priority like in vlan_do_receive()
  4102. * For the time being, just ignore Priority Code Point
  4103. */
  4104. skb->vlan_tci = 0;
  4105. }
  4106. type = skb->protocol;
  4107. /* deliver only exact match when indicated */
  4108. if (likely(!deliver_exact)) {
  4109. deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
  4110. &ptype_base[ntohs(type) &
  4111. PTYPE_HASH_MASK]);
  4112. }
  4113. deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
  4114. &orig_dev->ptype_specific);
  4115. if (unlikely(skb->dev != orig_dev)) {
  4116. deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
  4117. &skb->dev->ptype_specific);
  4118. }
  4119. if (pt_prev) {
  4120. if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
  4121. goto drop;
  4122. *ppt_prev = pt_prev;
  4123. } else {
  4124. drop:
  4125. if (!deliver_exact)
  4126. atomic_long_inc(&skb->dev->rx_dropped);
  4127. else
  4128. atomic_long_inc(&skb->dev->rx_nohandler);
  4129. kfree_skb(skb);
  4130. /* Jamal, now you will not able to escape explaining
  4131. * me how you were going to use this. :-)
  4132. */
  4133. ret = NET_RX_DROP;
  4134. }
  4135. out:
  4136. return ret;
  4137. }
  4138. static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc)
  4139. {
  4140. struct net_device *orig_dev = skb->dev;
  4141. struct packet_type *pt_prev = NULL;
  4142. int ret;
  4143. ret = __netif_receive_skb_core(skb, pfmemalloc, &pt_prev);
  4144. if (pt_prev)
  4145. ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
  4146. return ret;
  4147. }
  4148. /**
  4149. * netif_receive_skb_core - special purpose version of netif_receive_skb
  4150. * @skb: buffer to process
  4151. *
  4152. * More direct receive version of netif_receive_skb(). It should
  4153. * only be used by callers that have a need to skip RPS and Generic XDP.
  4154. * Caller must also take care of handling if (page_is_)pfmemalloc.
  4155. *
  4156. * This function may only be called from softirq context and interrupts
  4157. * should be enabled.
  4158. *
  4159. * Return values (usually ignored):
  4160. * NET_RX_SUCCESS: no congestion
  4161. * NET_RX_DROP: packet was dropped
  4162. */
  4163. int netif_receive_skb_core(struct sk_buff *skb)
  4164. {
  4165. int ret;
  4166. rcu_read_lock();
  4167. ret = __netif_receive_skb_one_core(skb, false);
  4168. rcu_read_unlock();
  4169. return ret;
  4170. }
  4171. EXPORT_SYMBOL(netif_receive_skb_core);
  4172. static inline void __netif_receive_skb_list_ptype(struct list_head *head,
  4173. struct packet_type *pt_prev,
  4174. struct net_device *orig_dev)
  4175. {
  4176. struct sk_buff *skb, *next;
  4177. if (!pt_prev)
  4178. return;
  4179. if (list_empty(head))
  4180. return;
  4181. if (pt_prev->list_func != NULL)
  4182. pt_prev->list_func(head, pt_prev, orig_dev);
  4183. else
  4184. list_for_each_entry_safe(skb, next, head, list)
  4185. pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
  4186. }
  4187. static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
  4188. {
  4189. /* Fast-path assumptions:
  4190. * - There is no RX handler.
  4191. * - Only one packet_type matches.
  4192. * If either of these fails, we will end up doing some per-packet
  4193. * processing in-line, then handling the 'last ptype' for the whole
  4194. * sublist. This can't cause out-of-order delivery to any single ptype,
  4195. * because the 'last ptype' must be constant across the sublist, and all
  4196. * other ptypes are handled per-packet.
  4197. */
  4198. /* Current (common) ptype of sublist */
  4199. struct packet_type *pt_curr = NULL;
  4200. /* Current (common) orig_dev of sublist */
  4201. struct net_device *od_curr = NULL;
  4202. struct list_head sublist;
  4203. struct sk_buff *skb, *next;
  4204. list_for_each_entry_safe(skb, next, head, list) {
  4205. struct net_device *orig_dev = skb->dev;
  4206. struct packet_type *pt_prev = NULL;
  4207. __netif_receive_skb_core(skb, pfmemalloc, &pt_prev);
  4208. if (pt_curr != pt_prev || od_curr != orig_dev) {
  4209. /* dispatch old sublist */
  4210. list_cut_before(&sublist, head, &skb->list);
  4211. __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
  4212. /* start new sublist */
  4213. pt_curr = pt_prev;
  4214. od_curr = orig_dev;
  4215. }
  4216. }
  4217. /* dispatch final sublist */
  4218. __netif_receive_skb_list_ptype(head, pt_curr, od_curr);
  4219. }
  4220. static int __netif_receive_skb(struct sk_buff *skb)
  4221. {
  4222. int ret;
  4223. if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
  4224. unsigned int noreclaim_flag;
  4225. /*
  4226. * PFMEMALLOC skbs are special, they should
  4227. * - be delivered to SOCK_MEMALLOC sockets only
  4228. * - stay away from userspace
  4229. * - have bounded memory usage
  4230. *
  4231. * Use PF_MEMALLOC as this saves us from propagating the allocation
  4232. * context down to all allocation sites.
  4233. */
  4234. noreclaim_flag = memalloc_noreclaim_save();
  4235. ret = __netif_receive_skb_one_core(skb, true);
  4236. memalloc_noreclaim_restore(noreclaim_flag);
  4237. } else
  4238. ret = __netif_receive_skb_one_core(skb, false);
  4239. return ret;
  4240. }
  4241. static void __netif_receive_skb_list(struct list_head *head)
  4242. {
  4243. unsigned long noreclaim_flag = 0;
  4244. struct sk_buff *skb, *next;
  4245. bool pfmemalloc = false; /* Is current sublist PF_MEMALLOC? */
  4246. list_for_each_entry_safe(skb, next, head, list) {
  4247. if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) {
  4248. struct list_head sublist;
  4249. /* Handle the previous sublist */
  4250. list_cut_before(&sublist, head, &skb->list);
  4251. if (!list_empty(&sublist))
  4252. __netif_receive_skb_list_core(&sublist, pfmemalloc);
  4253. pfmemalloc = !pfmemalloc;
  4254. /* See comments in __netif_receive_skb */
  4255. if (pfmemalloc)
  4256. noreclaim_flag = memalloc_noreclaim_save();
  4257. else
  4258. memalloc_noreclaim_restore(noreclaim_flag);
  4259. }
  4260. }
  4261. /* Handle the remaining sublist */
  4262. if (!list_empty(head))
  4263. __netif_receive_skb_list_core(head, pfmemalloc);
  4264. /* Restore pflags */
  4265. if (pfmemalloc)
  4266. memalloc_noreclaim_restore(noreclaim_flag);
  4267. }
  4268. static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
  4269. {
  4270. struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
  4271. struct bpf_prog *new = xdp->prog;
  4272. int ret = 0;
  4273. switch (xdp->command) {
  4274. case XDP_SETUP_PROG:
  4275. rcu_assign_pointer(dev->xdp_prog, new);
  4276. if (old)
  4277. bpf_prog_put(old);
  4278. if (old && !new) {
  4279. static_branch_dec(&generic_xdp_needed_key);
  4280. } else if (new && !old) {
  4281. static_branch_inc(&generic_xdp_needed_key);
  4282. dev_disable_lro(dev);
  4283. dev_disable_gro_hw(dev);
  4284. }
  4285. break;
  4286. case XDP_QUERY_PROG:
  4287. xdp->prog_attached = !!old;
  4288. xdp->prog_id = old ? old->aux->id : 0;
  4289. break;
  4290. default:
  4291. ret = -EINVAL;
  4292. break;
  4293. }
  4294. return ret;
  4295. }
  4296. static int netif_receive_skb_internal(struct sk_buff *skb)
  4297. {
  4298. int ret;
  4299. net_timestamp_check(netdev_tstamp_prequeue, skb);
  4300. if (skb_defer_rx_timestamp(skb))
  4301. return NET_RX_SUCCESS;
  4302. if (static_branch_unlikely(&generic_xdp_needed_key)) {
  4303. int ret;
  4304. preempt_disable();
  4305. rcu_read_lock();
  4306. ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
  4307. rcu_read_unlock();
  4308. preempt_enable();
  4309. if (ret != XDP_PASS)
  4310. return NET_RX_DROP;
  4311. }
  4312. rcu_read_lock();
  4313. #ifdef CONFIG_RPS
  4314. if (static_key_false(&rps_needed)) {
  4315. struct rps_dev_flow voidflow, *rflow = &voidflow;
  4316. int cpu = get_rps_cpu(skb->dev, skb, &rflow);
  4317. if (cpu >= 0) {
  4318. ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
  4319. rcu_read_unlock();
  4320. return ret;
  4321. }
  4322. }
  4323. #endif
  4324. ret = __netif_receive_skb(skb);
  4325. rcu_read_unlock();
  4326. return ret;
  4327. }
  4328. static void netif_receive_skb_list_internal(struct list_head *head)
  4329. {
  4330. struct bpf_prog *xdp_prog = NULL;
  4331. struct sk_buff *skb, *next;
  4332. list_for_each_entry_safe(skb, next, head, list) {
  4333. net_timestamp_check(netdev_tstamp_prequeue, skb);
  4334. if (skb_defer_rx_timestamp(skb))
  4335. /* Handled, remove from list */
  4336. list_del(&skb->list);
  4337. }
  4338. if (static_branch_unlikely(&generic_xdp_needed_key)) {
  4339. preempt_disable();
  4340. rcu_read_lock();
  4341. list_for_each_entry_safe(skb, next, head, list) {
  4342. xdp_prog = rcu_dereference(skb->dev->xdp_prog);
  4343. if (do_xdp_generic(xdp_prog, skb) != XDP_PASS)
  4344. /* Dropped, remove from list */
  4345. list_del(&skb->list);
  4346. }
  4347. rcu_read_unlock();
  4348. preempt_enable();
  4349. }
  4350. rcu_read_lock();
  4351. #ifdef CONFIG_RPS
  4352. if (static_key_false(&rps_needed)) {
  4353. list_for_each_entry_safe(skb, next, head, list) {
  4354. struct rps_dev_flow voidflow, *rflow = &voidflow;
  4355. int cpu = get_rps_cpu(skb->dev, skb, &rflow);
  4356. if (cpu >= 0) {
  4357. enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
  4358. /* Handled, remove from list */
  4359. list_del(&skb->list);
  4360. }
  4361. }
  4362. }
  4363. #endif
  4364. __netif_receive_skb_list(head);
  4365. rcu_read_unlock();
  4366. }
  4367. /**
  4368. * netif_receive_skb - process receive buffer from network
  4369. * @skb: buffer to process
  4370. *
  4371. * netif_receive_skb() is the main receive data processing function.
  4372. * It always succeeds. The buffer may be dropped during processing
  4373. * for congestion control or by the protocol layers.
  4374. *
  4375. * This function may only be called from softirq context and interrupts
  4376. * should be enabled.
  4377. *
  4378. * Return values (usually ignored):
  4379. * NET_RX_SUCCESS: no congestion
  4380. * NET_RX_DROP: packet was dropped
  4381. */
  4382. int netif_receive_skb(struct sk_buff *skb)
  4383. {
  4384. trace_netif_receive_skb_entry(skb);
  4385. return netif_receive_skb_internal(skb);
  4386. }
  4387. EXPORT_SYMBOL(netif_receive_skb);
  4388. /**
  4389. * netif_receive_skb_list - process many receive buffers from network
  4390. * @head: list of skbs to process.
  4391. *
  4392. * Since return value of netif_receive_skb() is normally ignored, and
  4393. * wouldn't be meaningful for a list, this function returns void.
  4394. *
  4395. * This function may only be called from softirq context and interrupts
  4396. * should be enabled.
  4397. */
  4398. void netif_receive_skb_list(struct list_head *head)
  4399. {
  4400. struct sk_buff *skb;
  4401. if (list_empty(head))
  4402. return;
  4403. list_for_each_entry(skb, head, list)
  4404. trace_netif_receive_skb_list_entry(skb);
  4405. netif_receive_skb_list_internal(head);
  4406. }
  4407. EXPORT_SYMBOL(netif_receive_skb_list);
  4408. DEFINE_PER_CPU(struct work_struct, flush_works);
  4409. /* Network device is going away, flush any packets still pending */
  4410. static void flush_backlog(struct work_struct *work)
  4411. {
  4412. struct sk_buff *skb, *tmp;
  4413. struct softnet_data *sd;
  4414. local_bh_disable();
  4415. sd = this_cpu_ptr(&softnet_data);
  4416. local_irq_disable();
  4417. rps_lock(sd);
  4418. skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
  4419. if (skb->dev->reg_state == NETREG_UNREGISTERING) {
  4420. __skb_unlink(skb, &sd->input_pkt_queue);
  4421. kfree_skb(skb);
  4422. input_queue_head_incr(sd);
  4423. }
  4424. }
  4425. rps_unlock(sd);
  4426. local_irq_enable();
  4427. skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
  4428. if (skb->dev->reg_state == NETREG_UNREGISTERING) {
  4429. __skb_unlink(skb, &sd->process_queue);
  4430. kfree_skb(skb);
  4431. input_queue_head_incr(sd);
  4432. }
  4433. }
  4434. local_bh_enable();
  4435. }
  4436. static void flush_all_backlogs(void)
  4437. {
  4438. unsigned int cpu;
  4439. get_online_cpus();
  4440. for_each_online_cpu(cpu)
  4441. queue_work_on(cpu, system_highpri_wq,
  4442. per_cpu_ptr(&flush_works, cpu));
  4443. for_each_online_cpu(cpu)
  4444. flush_work(per_cpu_ptr(&flush_works, cpu));
  4445. put_online_cpus();
  4446. }
  4447. static int napi_gro_complete(struct sk_buff *skb)
  4448. {
  4449. struct packet_offload *ptype;
  4450. __be16 type = skb->protocol;
  4451. struct list_head *head = &offload_base;
  4452. int err = -ENOENT;
  4453. BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
  4454. if (NAPI_GRO_CB(skb)->count == 1) {
  4455. skb_shinfo(skb)->gso_size = 0;
  4456. goto out;
  4457. }
  4458. rcu_read_lock();
  4459. list_for_each_entry_rcu(ptype, head, list) {
  4460. if (ptype->type != type || !ptype->callbacks.gro_complete)
  4461. continue;
  4462. err = ptype->callbacks.gro_complete(skb, 0);
  4463. break;
  4464. }
  4465. rcu_read_unlock();
  4466. if (err) {
  4467. WARN_ON(&ptype->list == head);
  4468. kfree_skb(skb);
  4469. return NET_RX_SUCCESS;
  4470. }
  4471. out:
  4472. return netif_receive_skb_internal(skb);
  4473. }
  4474. static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
  4475. bool flush_old)
  4476. {
  4477. struct list_head *head = &napi->gro_hash[index].list;
  4478. struct sk_buff *skb, *p;
  4479. list_for_each_entry_safe_reverse(skb, p, head, list) {
  4480. if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
  4481. return;
  4482. list_del_init(&skb->list);
  4483. napi_gro_complete(skb);
  4484. napi->gro_count--;
  4485. napi->gro_hash[index].count--;
  4486. }
  4487. }
  4488. /* napi->gro_hash[].list contains packets ordered by age.
  4489. * youngest packets at the head of it.
  4490. * Complete skbs in reverse order to reduce latencies.
  4491. */
  4492. void napi_gro_flush(struct napi_struct *napi, bool flush_old)
  4493. {
  4494. u32 i;
  4495. for (i = 0; i < GRO_HASH_BUCKETS; i++)
  4496. __napi_gro_flush_chain(napi, i, flush_old);
  4497. }
  4498. EXPORT_SYMBOL(napi_gro_flush);
  4499. static struct list_head *gro_list_prepare(struct napi_struct *napi,
  4500. struct sk_buff *skb)
  4501. {
  4502. unsigned int maclen = skb->dev->hard_header_len;
  4503. u32 hash = skb_get_hash_raw(skb);
  4504. struct list_head *head;
  4505. struct sk_buff *p;
  4506. head = &napi->gro_hash[hash & (GRO_HASH_BUCKETS - 1)].list;
  4507. list_for_each_entry(p, head, list) {
  4508. unsigned long diffs;
  4509. NAPI_GRO_CB(p)->flush = 0;
  4510. if (hash != skb_get_hash_raw(p)) {
  4511. NAPI_GRO_CB(p)->same_flow = 0;
  4512. continue;
  4513. }
  4514. diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
  4515. diffs |= p->vlan_tci ^ skb->vlan_tci;
  4516. diffs |= skb_metadata_dst_cmp(p, skb);
  4517. diffs |= skb_metadata_differs(p, skb);
  4518. if (maclen == ETH_HLEN)
  4519. diffs |= compare_ether_header(skb_mac_header(p),
  4520. skb_mac_header(skb));
  4521. else if (!diffs)
  4522. diffs = memcmp(skb_mac_header(p),
  4523. skb_mac_header(skb),
  4524. maclen);
  4525. NAPI_GRO_CB(p)->same_flow = !diffs;
  4526. }
  4527. return head;
  4528. }
  4529. static void skb_gro_reset_offset(struct sk_buff *skb)
  4530. {
  4531. const struct skb_shared_info *pinfo = skb_shinfo(skb);
  4532. const skb_frag_t *frag0 = &pinfo->frags[0];
  4533. NAPI_GRO_CB(skb)->data_offset = 0;
  4534. NAPI_GRO_CB(skb)->frag0 = NULL;
  4535. NAPI_GRO_CB(skb)->frag0_len = 0;
  4536. if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
  4537. pinfo->nr_frags &&
  4538. !PageHighMem(skb_frag_page(frag0))) {
  4539. NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
  4540. NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
  4541. skb_frag_size(frag0),
  4542. skb->end - skb->tail);
  4543. }
  4544. }
  4545. static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
  4546. {
  4547. struct skb_shared_info *pinfo = skb_shinfo(skb);
  4548. BUG_ON(skb->end - skb->tail < grow);
  4549. memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
  4550. skb->data_len -= grow;
  4551. skb->tail += grow;
  4552. pinfo->frags[0].page_offset += grow;
  4553. skb_frag_size_sub(&pinfo->frags[0], grow);
  4554. if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
  4555. skb_frag_unref(skb, 0);
  4556. memmove(pinfo->frags, pinfo->frags + 1,
  4557. --pinfo->nr_frags * sizeof(pinfo->frags[0]));
  4558. }
  4559. }
  4560. static void gro_flush_oldest(struct list_head *head)
  4561. {
  4562. struct sk_buff *oldest;
  4563. oldest = list_last_entry(head, struct sk_buff, list);
  4564. /* We are called with head length >= MAX_GRO_SKBS, so this is
  4565. * impossible.
  4566. */
  4567. if (WARN_ON_ONCE(!oldest))
  4568. return;
  4569. /* Do not adjust napi->gro_count, caller is adding a new SKB to
  4570. * the chain.
  4571. */
  4572. list_del(&oldest->list);
  4573. napi_gro_complete(oldest);
  4574. }
  4575. static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
  4576. {
  4577. u32 hash = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
  4578. struct list_head *head = &offload_base;
  4579. struct packet_offload *ptype;
  4580. __be16 type = skb->protocol;
  4581. struct list_head *gro_head;
  4582. struct sk_buff *pp = NULL;
  4583. enum gro_result ret;
  4584. int same_flow;
  4585. int grow;
  4586. if (netif_elide_gro(skb->dev))
  4587. goto normal;
  4588. gro_head = gro_list_prepare(napi, skb);
  4589. rcu_read_lock();
  4590. list_for_each_entry_rcu(ptype, head, list) {
  4591. if (ptype->type != type || !ptype->callbacks.gro_receive)
  4592. continue;
  4593. skb_set_network_header(skb, skb_gro_offset(skb));
  4594. skb_reset_mac_len(skb);
  4595. NAPI_GRO_CB(skb)->same_flow = 0;
  4596. NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb);
  4597. NAPI_GRO_CB(skb)->free = 0;
  4598. NAPI_GRO_CB(skb)->encap_mark = 0;
  4599. NAPI_GRO_CB(skb)->recursion_counter = 0;
  4600. NAPI_GRO_CB(skb)->is_fou = 0;
  4601. NAPI_GRO_CB(skb)->is_atomic = 1;
  4602. NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
  4603. /* Setup for GRO checksum validation */
  4604. switch (skb->ip_summed) {
  4605. case CHECKSUM_COMPLETE:
  4606. NAPI_GRO_CB(skb)->csum = skb->csum;
  4607. NAPI_GRO_CB(skb)->csum_valid = 1;
  4608. NAPI_GRO_CB(skb)->csum_cnt = 0;
  4609. break;
  4610. case CHECKSUM_UNNECESSARY:
  4611. NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
  4612. NAPI_GRO_CB(skb)->csum_valid = 0;
  4613. break;
  4614. default:
  4615. NAPI_GRO_CB(skb)->csum_cnt = 0;
  4616. NAPI_GRO_CB(skb)->csum_valid = 0;
  4617. }
  4618. pp = ptype->callbacks.gro_receive(gro_head, skb);
  4619. break;
  4620. }
  4621. rcu_read_unlock();
  4622. if (&ptype->list == head)
  4623. goto normal;
  4624. if (IS_ERR(pp) && PTR_ERR(pp) == -EINPROGRESS) {
  4625. ret = GRO_CONSUMED;
  4626. goto ok;
  4627. }
  4628. same_flow = NAPI_GRO_CB(skb)->same_flow;
  4629. ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
  4630. if (pp) {
  4631. list_del_init(&pp->list);
  4632. napi_gro_complete(pp);
  4633. napi->gro_count--;
  4634. napi->gro_hash[hash].count--;
  4635. }
  4636. if (same_flow)
  4637. goto ok;
  4638. if (NAPI_GRO_CB(skb)->flush)
  4639. goto normal;
  4640. if (unlikely(napi->gro_hash[hash].count >= MAX_GRO_SKBS)) {
  4641. gro_flush_oldest(gro_head);
  4642. } else {
  4643. napi->gro_count++;
  4644. napi->gro_hash[hash].count++;
  4645. }
  4646. NAPI_GRO_CB(skb)->count = 1;
  4647. NAPI_GRO_CB(skb)->age = jiffies;
  4648. NAPI_GRO_CB(skb)->last = skb;
  4649. skb_shinfo(skb)->gso_size = skb_gro_len(skb);
  4650. list_add(&skb->list, gro_head);
  4651. ret = GRO_HELD;
  4652. pull:
  4653. grow = skb_gro_offset(skb) - skb_headlen(skb);
  4654. if (grow > 0)
  4655. gro_pull_from_frag0(skb, grow);
  4656. ok:
  4657. return ret;
  4658. normal:
  4659. ret = GRO_NORMAL;
  4660. goto pull;
  4661. }
  4662. struct packet_offload *gro_find_receive_by_type(__be16 type)
  4663. {
  4664. struct list_head *offload_head = &offload_base;
  4665. struct packet_offload *ptype;
  4666. list_for_each_entry_rcu(ptype, offload_head, list) {
  4667. if (ptype->type != type || !ptype->callbacks.gro_receive)
  4668. continue;
  4669. return ptype;
  4670. }
  4671. return NULL;
  4672. }
  4673. EXPORT_SYMBOL(gro_find_receive_by_type);
  4674. struct packet_offload *gro_find_complete_by_type(__be16 type)
  4675. {
  4676. struct list_head *offload_head = &offload_base;
  4677. struct packet_offload *ptype;
  4678. list_for_each_entry_rcu(ptype, offload_head, list) {
  4679. if (ptype->type != type || !ptype->callbacks.gro_complete)
  4680. continue;
  4681. return ptype;
  4682. }
  4683. return NULL;
  4684. }
  4685. EXPORT_SYMBOL(gro_find_complete_by_type);
  4686. static void napi_skb_free_stolen_head(struct sk_buff *skb)
  4687. {
  4688. skb_dst_drop(skb);
  4689. secpath_reset(skb);
  4690. kmem_cache_free(skbuff_head_cache, skb);
  4691. }
  4692. static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
  4693. {
  4694. switch (ret) {
  4695. case GRO_NORMAL:
  4696. if (netif_receive_skb_internal(skb))
  4697. ret = GRO_DROP;
  4698. break;
  4699. case GRO_DROP:
  4700. kfree_skb(skb);
  4701. break;
  4702. case GRO_MERGED_FREE:
  4703. if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
  4704. napi_skb_free_stolen_head(skb);
  4705. else
  4706. __kfree_skb(skb);
  4707. break;
  4708. case GRO_HELD:
  4709. case GRO_MERGED:
  4710. case GRO_CONSUMED:
  4711. break;
  4712. }
  4713. return ret;
  4714. }
  4715. gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
  4716. {
  4717. skb_mark_napi_id(skb, napi);
  4718. trace_napi_gro_receive_entry(skb);
  4719. skb_gro_reset_offset(skb);
  4720. return napi_skb_finish(dev_gro_receive(napi, skb), skb);
  4721. }
  4722. EXPORT_SYMBOL(napi_gro_receive);
  4723. static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
  4724. {
  4725. if (unlikely(skb->pfmemalloc)) {
  4726. consume_skb(skb);
  4727. return;
  4728. }
  4729. __skb_pull(skb, skb_headlen(skb));
  4730. /* restore the reserve we had after netdev_alloc_skb_ip_align() */
  4731. skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
  4732. skb->vlan_tci = 0;
  4733. skb->dev = napi->dev;
  4734. skb->skb_iif = 0;
  4735. skb->encapsulation = 0;
  4736. skb_shinfo(skb)->gso_type = 0;
  4737. skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
  4738. secpath_reset(skb);
  4739. napi->skb = skb;
  4740. }
  4741. struct sk_buff *napi_get_frags(struct napi_struct *napi)
  4742. {
  4743. struct sk_buff *skb = napi->skb;
  4744. if (!skb) {
  4745. skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
  4746. if (skb) {
  4747. napi->skb = skb;
  4748. skb_mark_napi_id(skb, napi);
  4749. }
  4750. }
  4751. return skb;
  4752. }
  4753. EXPORT_SYMBOL(napi_get_frags);
  4754. static gro_result_t napi_frags_finish(struct napi_struct *napi,
  4755. struct sk_buff *skb,
  4756. gro_result_t ret)
  4757. {
  4758. switch (ret) {
  4759. case GRO_NORMAL:
  4760. case GRO_HELD:
  4761. __skb_push(skb, ETH_HLEN);
  4762. skb->protocol = eth_type_trans(skb, skb->dev);
  4763. if (ret == GRO_NORMAL && netif_receive_skb_internal(skb))
  4764. ret = GRO_DROP;
  4765. break;
  4766. case GRO_DROP:
  4767. napi_reuse_skb(napi, skb);
  4768. break;
  4769. case GRO_MERGED_FREE:
  4770. if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
  4771. napi_skb_free_stolen_head(skb);
  4772. else
  4773. napi_reuse_skb(napi, skb);
  4774. break;
  4775. case GRO_MERGED:
  4776. case GRO_CONSUMED:
  4777. break;
  4778. }
  4779. return ret;
  4780. }
  4781. /* Upper GRO stack assumes network header starts at gro_offset=0
  4782. * Drivers could call both napi_gro_frags() and napi_gro_receive()
  4783. * We copy ethernet header into skb->data to have a common layout.
  4784. */
  4785. static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
  4786. {
  4787. struct sk_buff *skb = napi->skb;
  4788. const struct ethhdr *eth;
  4789. unsigned int hlen = sizeof(*eth);
  4790. napi->skb = NULL;
  4791. skb_reset_mac_header(skb);
  4792. skb_gro_reset_offset(skb);
  4793. eth = skb_gro_header_fast(skb, 0);
  4794. if (unlikely(skb_gro_header_hard(skb, hlen))) {
  4795. eth = skb_gro_header_slow(skb, hlen, 0);
  4796. if (unlikely(!eth)) {
  4797. net_warn_ratelimited("%s: dropping impossible skb from %s\n",
  4798. __func__, napi->dev->name);
  4799. napi_reuse_skb(napi, skb);
  4800. return NULL;
  4801. }
  4802. } else {
  4803. gro_pull_from_frag0(skb, hlen);
  4804. NAPI_GRO_CB(skb)->frag0 += hlen;
  4805. NAPI_GRO_CB(skb)->frag0_len -= hlen;
  4806. }
  4807. __skb_pull(skb, hlen);
  4808. /*
  4809. * This works because the only protocols we care about don't require
  4810. * special handling.
  4811. * We'll fix it up properly in napi_frags_finish()
  4812. */
  4813. skb->protocol = eth->h_proto;
  4814. return skb;
  4815. }
  4816. gro_result_t napi_gro_frags(struct napi_struct *napi)
  4817. {
  4818. struct sk_buff *skb = napi_frags_skb(napi);
  4819. if (!skb)
  4820. return GRO_DROP;
  4821. trace_napi_gro_frags_entry(skb);
  4822. return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
  4823. }
  4824. EXPORT_SYMBOL(napi_gro_frags);
  4825. /* Compute the checksum from gro_offset and return the folded value
  4826. * after adding in any pseudo checksum.
  4827. */
  4828. __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
  4829. {
  4830. __wsum wsum;
  4831. __sum16 sum;
  4832. wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
  4833. /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
  4834. sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
  4835. if (likely(!sum)) {
  4836. if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
  4837. !skb->csum_complete_sw)
  4838. netdev_rx_csum_fault(skb->dev);
  4839. }
  4840. NAPI_GRO_CB(skb)->csum = wsum;
  4841. NAPI_GRO_CB(skb)->csum_valid = 1;
  4842. return sum;
  4843. }
  4844. EXPORT_SYMBOL(__skb_gro_checksum_complete);
  4845. static void net_rps_send_ipi(struct softnet_data *remsd)
  4846. {
  4847. #ifdef CONFIG_RPS
  4848. while (remsd) {
  4849. struct softnet_data *next = remsd->rps_ipi_next;
  4850. if (cpu_online(remsd->cpu))
  4851. smp_call_function_single_async(remsd->cpu, &remsd->csd);
  4852. remsd = next;
  4853. }
  4854. #endif
  4855. }
  4856. /*
  4857. * net_rps_action_and_irq_enable sends any pending IPI's for rps.
  4858. * Note: called with local irq disabled, but exits with local irq enabled.
  4859. */
  4860. static void net_rps_action_and_irq_enable(struct softnet_data *sd)
  4861. {
  4862. #ifdef CONFIG_RPS
  4863. struct softnet_data *remsd = sd->rps_ipi_list;
  4864. if (remsd) {
  4865. sd->rps_ipi_list = NULL;
  4866. local_irq_enable();
  4867. /* Send pending IPI's to kick RPS processing on remote cpus. */
  4868. net_rps_send_ipi(remsd);
  4869. } else
  4870. #endif
  4871. local_irq_enable();
  4872. }
  4873. static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
  4874. {
  4875. #ifdef CONFIG_RPS
  4876. return sd->rps_ipi_list != NULL;
  4877. #else
  4878. return false;
  4879. #endif
  4880. }
  4881. static int process_backlog(struct napi_struct *napi, int quota)
  4882. {
  4883. struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
  4884. bool again = true;
  4885. int work = 0;
  4886. /* Check if we have pending ipi, its better to send them now,
  4887. * not waiting net_rx_action() end.
  4888. */
  4889. if (sd_has_rps_ipi_waiting(sd)) {
  4890. local_irq_disable();
  4891. net_rps_action_and_irq_enable(sd);
  4892. }
  4893. napi->weight = dev_rx_weight;
  4894. while (again) {
  4895. struct sk_buff *skb;
  4896. while ((skb = __skb_dequeue(&sd->process_queue))) {
  4897. rcu_read_lock();
  4898. __netif_receive_skb(skb);
  4899. rcu_read_unlock();
  4900. input_queue_head_incr(sd);
  4901. if (++work >= quota)
  4902. return work;
  4903. }
  4904. local_irq_disable();
  4905. rps_lock(sd);
  4906. if (skb_queue_empty(&sd->input_pkt_queue)) {
  4907. /*
  4908. * Inline a custom version of __napi_complete().
  4909. * only current cpu owns and manipulates this napi,
  4910. * and NAPI_STATE_SCHED is the only possible flag set
  4911. * on backlog.
  4912. * We can use a plain write instead of clear_bit(),
  4913. * and we dont need an smp_mb() memory barrier.
  4914. */
  4915. napi->state = 0;
  4916. again = false;
  4917. } else {
  4918. skb_queue_splice_tail_init(&sd->input_pkt_queue,
  4919. &sd->process_queue);
  4920. }
  4921. rps_unlock(sd);
  4922. local_irq_enable();
  4923. }
  4924. return work;
  4925. }
  4926. /**
  4927. * __napi_schedule - schedule for receive
  4928. * @n: entry to schedule
  4929. *
  4930. * The entry's receive function will be scheduled to run.
  4931. * Consider using __napi_schedule_irqoff() if hard irqs are masked.
  4932. */
  4933. void __napi_schedule(struct napi_struct *n)
  4934. {
  4935. unsigned long flags;
  4936. local_irq_save(flags);
  4937. ____napi_schedule(this_cpu_ptr(&softnet_data), n);
  4938. local_irq_restore(flags);
  4939. }
  4940. EXPORT_SYMBOL(__napi_schedule);
  4941. /**
  4942. * napi_schedule_prep - check if napi can be scheduled
  4943. * @n: napi context
  4944. *
  4945. * Test if NAPI routine is already running, and if not mark
  4946. * it as running. This is used as a condition variable
  4947. * insure only one NAPI poll instance runs. We also make
  4948. * sure there is no pending NAPI disable.
  4949. */
  4950. bool napi_schedule_prep(struct napi_struct *n)
  4951. {
  4952. unsigned long val, new;
  4953. do {
  4954. val = READ_ONCE(n->state);
  4955. if (unlikely(val & NAPIF_STATE_DISABLE))
  4956. return false;
  4957. new = val | NAPIF_STATE_SCHED;
  4958. /* Sets STATE_MISSED bit if STATE_SCHED was already set
  4959. * This was suggested by Alexander Duyck, as compiler
  4960. * emits better code than :
  4961. * if (val & NAPIF_STATE_SCHED)
  4962. * new |= NAPIF_STATE_MISSED;
  4963. */
  4964. new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED *
  4965. NAPIF_STATE_MISSED;
  4966. } while (cmpxchg(&n->state, val, new) != val);
  4967. return !(val & NAPIF_STATE_SCHED);
  4968. }
  4969. EXPORT_SYMBOL(napi_schedule_prep);
  4970. /**
  4971. * __napi_schedule_irqoff - schedule for receive
  4972. * @n: entry to schedule
  4973. *
  4974. * Variant of __napi_schedule() assuming hard irqs are masked
  4975. */
  4976. void __napi_schedule_irqoff(struct napi_struct *n)
  4977. {
  4978. ____napi_schedule(this_cpu_ptr(&softnet_data), n);
  4979. }
  4980. EXPORT_SYMBOL(__napi_schedule_irqoff);
  4981. bool napi_complete_done(struct napi_struct *n, int work_done)
  4982. {
  4983. unsigned long flags, val, new;
  4984. /*
  4985. * 1) Don't let napi dequeue from the cpu poll list
  4986. * just in case its running on a different cpu.
  4987. * 2) If we are busy polling, do nothing here, we have
  4988. * the guarantee we will be called later.
  4989. */
  4990. if (unlikely(n->state & (NAPIF_STATE_NPSVC |
  4991. NAPIF_STATE_IN_BUSY_POLL)))
  4992. return false;
  4993. if (n->gro_count) {
  4994. unsigned long timeout = 0;
  4995. if (work_done)
  4996. timeout = n->dev->gro_flush_timeout;
  4997. if (timeout)
  4998. hrtimer_start(&n->timer, ns_to_ktime(timeout),
  4999. HRTIMER_MODE_REL_PINNED);
  5000. else
  5001. napi_gro_flush(n, false);
  5002. }
  5003. if (unlikely(!list_empty(&n->poll_list))) {
  5004. /* If n->poll_list is not empty, we need to mask irqs */
  5005. local_irq_save(flags);
  5006. list_del_init(&n->poll_list);
  5007. local_irq_restore(flags);
  5008. }
  5009. do {
  5010. val = READ_ONCE(n->state);
  5011. WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
  5012. new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED);
  5013. /* If STATE_MISSED was set, leave STATE_SCHED set,
  5014. * because we will call napi->poll() one more time.
  5015. * This C code was suggested by Alexander Duyck to help gcc.
  5016. */
  5017. new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED *
  5018. NAPIF_STATE_SCHED;
  5019. } while (cmpxchg(&n->state, val, new) != val);
  5020. if (unlikely(val & NAPIF_STATE_MISSED)) {
  5021. __napi_schedule(n);
  5022. return false;
  5023. }
  5024. return true;
  5025. }
  5026. EXPORT_SYMBOL(napi_complete_done);
  5027. /* must be called under rcu_read_lock(), as we dont take a reference */
  5028. static struct napi_struct *napi_by_id(unsigned int napi_id)
  5029. {
  5030. unsigned int hash = napi_id % HASH_SIZE(napi_hash);
  5031. struct napi_struct *napi;
  5032. hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
  5033. if (napi->napi_id == napi_id)
  5034. return napi;
  5035. return NULL;
  5036. }
  5037. #if defined(CONFIG_NET_RX_BUSY_POLL)
  5038. #define BUSY_POLL_BUDGET 8
  5039. static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
  5040. {
  5041. int rc;
  5042. /* Busy polling means there is a high chance device driver hard irq
  5043. * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
  5044. * set in napi_schedule_prep().
  5045. * Since we are about to call napi->poll() once more, we can safely
  5046. * clear NAPI_STATE_MISSED.
  5047. *
  5048. * Note: x86 could use a single "lock and ..." instruction
  5049. * to perform these two clear_bit()
  5050. */
  5051. clear_bit(NAPI_STATE_MISSED, &napi->state);
  5052. clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
  5053. local_bh_disable();
  5054. /* All we really want here is to re-enable device interrupts.
  5055. * Ideally, a new ndo_busy_poll_stop() could avoid another round.
  5056. */
  5057. rc = napi->poll(napi, BUSY_POLL_BUDGET);
  5058. trace_napi_poll(napi, rc, BUSY_POLL_BUDGET);
  5059. netpoll_poll_unlock(have_poll_lock);
  5060. if (rc == BUSY_POLL_BUDGET)
  5061. __napi_schedule(napi);
  5062. local_bh_enable();
  5063. }
  5064. void napi_busy_loop(unsigned int napi_id,
  5065. bool (*loop_end)(void *, unsigned long),
  5066. void *loop_end_arg)
  5067. {
  5068. unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
  5069. int (*napi_poll)(struct napi_struct *napi, int budget);
  5070. void *have_poll_lock = NULL;
  5071. struct napi_struct *napi;
  5072. restart:
  5073. napi_poll = NULL;
  5074. rcu_read_lock();
  5075. napi = napi_by_id(napi_id);
  5076. if (!napi)
  5077. goto out;
  5078. preempt_disable();
  5079. for (;;) {
  5080. int work = 0;
  5081. local_bh_disable();
  5082. if (!napi_poll) {
  5083. unsigned long val = READ_ONCE(napi->state);
  5084. /* If multiple threads are competing for this napi,
  5085. * we avoid dirtying napi->state as much as we can.
  5086. */
  5087. if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
  5088. NAPIF_STATE_IN_BUSY_POLL))
  5089. goto count;
  5090. if (cmpxchg(&napi->state, val,
  5091. val | NAPIF_STATE_IN_BUSY_POLL |
  5092. NAPIF_STATE_SCHED) != val)
  5093. goto count;
  5094. have_poll_lock = netpoll_poll_lock(napi);
  5095. napi_poll = napi->poll;
  5096. }
  5097. work = napi_poll(napi, BUSY_POLL_BUDGET);
  5098. trace_napi_poll(napi, work, BUSY_POLL_BUDGET);
  5099. count:
  5100. if (work > 0)
  5101. __NET_ADD_STATS(dev_net(napi->dev),
  5102. LINUX_MIB_BUSYPOLLRXPACKETS, work);
  5103. local_bh_enable();
  5104. if (!loop_end || loop_end(loop_end_arg, start_time))
  5105. break;
  5106. if (unlikely(need_resched())) {
  5107. if (napi_poll)
  5108. busy_poll_stop(napi, have_poll_lock);
  5109. preempt_enable();
  5110. rcu_read_unlock();
  5111. cond_resched();
  5112. if (loop_end(loop_end_arg, start_time))
  5113. return;
  5114. goto restart;
  5115. }
  5116. cpu_relax();
  5117. }
  5118. if (napi_poll)
  5119. busy_poll_stop(napi, have_poll_lock);
  5120. preempt_enable();
  5121. out:
  5122. rcu_read_unlock();
  5123. }
  5124. EXPORT_SYMBOL(napi_busy_loop);
  5125. #endif /* CONFIG_NET_RX_BUSY_POLL */
  5126. static void napi_hash_add(struct napi_struct *napi)
  5127. {
  5128. if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state) ||
  5129. test_and_set_bit(NAPI_STATE_HASHED, &napi->state))
  5130. return;
  5131. spin_lock(&napi_hash_lock);
  5132. /* 0..NR_CPUS range is reserved for sender_cpu use */
  5133. do {
  5134. if (unlikely(++napi_gen_id < MIN_NAPI_ID))
  5135. napi_gen_id = MIN_NAPI_ID;
  5136. } while (napi_by_id(napi_gen_id));
  5137. napi->napi_id = napi_gen_id;
  5138. hlist_add_head_rcu(&napi->napi_hash_node,
  5139. &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
  5140. spin_unlock(&napi_hash_lock);
  5141. }
  5142. /* Warning : caller is responsible to make sure rcu grace period
  5143. * is respected before freeing memory containing @napi
  5144. */
  5145. bool napi_hash_del(struct napi_struct *napi)
  5146. {
  5147. bool rcu_sync_needed = false;
  5148. spin_lock(&napi_hash_lock);
  5149. if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state)) {
  5150. rcu_sync_needed = true;
  5151. hlist_del_rcu(&napi->napi_hash_node);
  5152. }
  5153. spin_unlock(&napi_hash_lock);
  5154. return rcu_sync_needed;
  5155. }
  5156. EXPORT_SYMBOL_GPL(napi_hash_del);
  5157. static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
  5158. {
  5159. struct napi_struct *napi;
  5160. napi = container_of(timer, struct napi_struct, timer);
  5161. /* Note : we use a relaxed variant of napi_schedule_prep() not setting
  5162. * NAPI_STATE_MISSED, since we do not react to a device IRQ.
  5163. */
  5164. if (napi->gro_count && !napi_disable_pending(napi) &&
  5165. !test_and_set_bit(NAPI_STATE_SCHED, &napi->state))
  5166. __napi_schedule_irqoff(napi);
  5167. return HRTIMER_NORESTART;
  5168. }
  5169. void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
  5170. int (*poll)(struct napi_struct *, int), int weight)
  5171. {
  5172. int i;
  5173. INIT_LIST_HEAD(&napi->poll_list);
  5174. hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
  5175. napi->timer.function = napi_watchdog;
  5176. napi->gro_count = 0;
  5177. for (i = 0; i < GRO_HASH_BUCKETS; i++) {
  5178. INIT_LIST_HEAD(&napi->gro_hash[i].list);
  5179. napi->gro_hash[i].count = 0;
  5180. }
  5181. napi->skb = NULL;
  5182. napi->poll = poll;
  5183. if (weight > NAPI_POLL_WEIGHT)
  5184. pr_err_once("netif_napi_add() called with weight %d on device %s\n",
  5185. weight, dev->name);
  5186. napi->weight = weight;
  5187. list_add(&napi->dev_list, &dev->napi_list);
  5188. napi->dev = dev;
  5189. #ifdef CONFIG_NETPOLL
  5190. napi->poll_owner = -1;
  5191. #endif
  5192. set_bit(NAPI_STATE_SCHED, &napi->state);
  5193. napi_hash_add(napi);
  5194. }
  5195. EXPORT_SYMBOL(netif_napi_add);
  5196. void napi_disable(struct napi_struct *n)
  5197. {
  5198. might_sleep();
  5199. set_bit(NAPI_STATE_DISABLE, &n->state);
  5200. while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
  5201. msleep(1);
  5202. while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state))
  5203. msleep(1);
  5204. hrtimer_cancel(&n->timer);
  5205. clear_bit(NAPI_STATE_DISABLE, &n->state);
  5206. }
  5207. EXPORT_SYMBOL(napi_disable);
  5208. static void flush_gro_hash(struct napi_struct *napi)
  5209. {
  5210. int i;
  5211. for (i = 0; i < GRO_HASH_BUCKETS; i++) {
  5212. struct sk_buff *skb, *n;
  5213. list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list)
  5214. kfree_skb(skb);
  5215. napi->gro_hash[i].count = 0;
  5216. }
  5217. }
  5218. /* Must be called in process context */
  5219. void netif_napi_del(struct napi_struct *napi)
  5220. {
  5221. might_sleep();
  5222. if (napi_hash_del(napi))
  5223. synchronize_net();
  5224. list_del_init(&napi->dev_list);
  5225. napi_free_frags(napi);
  5226. flush_gro_hash(napi);
  5227. napi->gro_count = 0;
  5228. }
  5229. EXPORT_SYMBOL(netif_napi_del);
  5230. static int napi_poll(struct napi_struct *n, struct list_head *repoll)
  5231. {
  5232. void *have;
  5233. int work, weight;
  5234. list_del_init(&n->poll_list);
  5235. have = netpoll_poll_lock(n);
  5236. weight = n->weight;
  5237. /* This NAPI_STATE_SCHED test is for avoiding a race
  5238. * with netpoll's poll_napi(). Only the entity which
  5239. * obtains the lock and sees NAPI_STATE_SCHED set will
  5240. * actually make the ->poll() call. Therefore we avoid
  5241. * accidentally calling ->poll() when NAPI is not scheduled.
  5242. */
  5243. work = 0;
  5244. if (test_bit(NAPI_STATE_SCHED, &n->state)) {
  5245. work = n->poll(n, weight);
  5246. trace_napi_poll(n, work, weight);
  5247. }
  5248. WARN_ON_ONCE(work > weight);
  5249. if (likely(work < weight))
  5250. goto out_unlock;
  5251. /* Drivers must not modify the NAPI state if they
  5252. * consume the entire weight. In such cases this code
  5253. * still "owns" the NAPI instance and therefore can
  5254. * move the instance around on the list at-will.
  5255. */
  5256. if (unlikely(napi_disable_pending(n))) {
  5257. napi_complete(n);
  5258. goto out_unlock;
  5259. }
  5260. if (n->gro_count) {
  5261. /* flush too old packets
  5262. * If HZ < 1000, flush all packets.
  5263. */
  5264. napi_gro_flush(n, HZ >= 1000);
  5265. }
  5266. /* Some drivers may have called napi_schedule
  5267. * prior to exhausting their budget.
  5268. */
  5269. if (unlikely(!list_empty(&n->poll_list))) {
  5270. pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
  5271. n->dev ? n->dev->name : "backlog");
  5272. goto out_unlock;
  5273. }
  5274. list_add_tail(&n->poll_list, repoll);
  5275. out_unlock:
  5276. netpoll_poll_unlock(have);
  5277. return work;
  5278. }
  5279. static __latent_entropy void net_rx_action(struct softirq_action *h)
  5280. {
  5281. struct softnet_data *sd = this_cpu_ptr(&softnet_data);
  5282. unsigned long time_limit = jiffies +
  5283. usecs_to_jiffies(netdev_budget_usecs);
  5284. int budget = netdev_budget;
  5285. LIST_HEAD(list);
  5286. LIST_HEAD(repoll);
  5287. local_irq_disable();
  5288. list_splice_init(&sd->poll_list, &list);
  5289. local_irq_enable();
  5290. for (;;) {
  5291. struct napi_struct *n;
  5292. if (list_empty(&list)) {
  5293. if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
  5294. goto out;
  5295. break;
  5296. }
  5297. n = list_first_entry(&list, struct napi_struct, poll_list);
  5298. budget -= napi_poll(n, &repoll);
  5299. /* If softirq window is exhausted then punt.
  5300. * Allow this to run for 2 jiffies since which will allow
  5301. * an average latency of 1.5/HZ.
  5302. */
  5303. if (unlikely(budget <= 0 ||
  5304. time_after_eq(jiffies, time_limit))) {
  5305. sd->time_squeeze++;
  5306. break;
  5307. }
  5308. }
  5309. local_irq_disable();
  5310. list_splice_tail_init(&sd->poll_list, &list);
  5311. list_splice_tail(&repoll, &list);
  5312. list_splice(&list, &sd->poll_list);
  5313. if (!list_empty(&sd->poll_list))
  5314. __raise_softirq_irqoff(NET_RX_SOFTIRQ);
  5315. net_rps_action_and_irq_enable(sd);
  5316. out:
  5317. __kfree_skb_flush();
  5318. }
  5319. struct netdev_adjacent {
  5320. struct net_device *dev;
  5321. /* upper master flag, there can only be one master device per list */
  5322. bool master;
  5323. /* counter for the number of times this device was added to us */
  5324. u16 ref_nr;
  5325. /* private field for the users */
  5326. void *private;
  5327. struct list_head list;
  5328. struct rcu_head rcu;
  5329. };
  5330. static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
  5331. struct list_head *adj_list)
  5332. {
  5333. struct netdev_adjacent *adj;
  5334. list_for_each_entry(adj, adj_list, list) {
  5335. if (adj->dev == adj_dev)
  5336. return adj;
  5337. }
  5338. return NULL;
  5339. }
  5340. static int __netdev_has_upper_dev(struct net_device *upper_dev, void *data)
  5341. {
  5342. struct net_device *dev = data;
  5343. return upper_dev == dev;
  5344. }
  5345. /**
  5346. * netdev_has_upper_dev - Check if device is linked to an upper device
  5347. * @dev: device
  5348. * @upper_dev: upper device to check
  5349. *
  5350. * Find out if a device is linked to specified upper device and return true
  5351. * in case it is. Note that this checks only immediate upper device,
  5352. * not through a complete stack of devices. The caller must hold the RTNL lock.
  5353. */
  5354. bool netdev_has_upper_dev(struct net_device *dev,
  5355. struct net_device *upper_dev)
  5356. {
  5357. ASSERT_RTNL();
  5358. return netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
  5359. upper_dev);
  5360. }
  5361. EXPORT_SYMBOL(netdev_has_upper_dev);
  5362. /**
  5363. * netdev_has_upper_dev_all - Check if device is linked to an upper device
  5364. * @dev: device
  5365. * @upper_dev: upper device to check
  5366. *
  5367. * Find out if a device is linked to specified upper device and return true
  5368. * in case it is. Note that this checks the entire upper device chain.
  5369. * The caller must hold rcu lock.
  5370. */
  5371. bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
  5372. struct net_device *upper_dev)
  5373. {
  5374. return !!netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
  5375. upper_dev);
  5376. }
  5377. EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
  5378. /**
  5379. * netdev_has_any_upper_dev - Check if device is linked to some device
  5380. * @dev: device
  5381. *
  5382. * Find out if a device is linked to an upper device and return true in case
  5383. * it is. The caller must hold the RTNL lock.
  5384. */
  5385. bool netdev_has_any_upper_dev(struct net_device *dev)
  5386. {
  5387. ASSERT_RTNL();
  5388. return !list_empty(&dev->adj_list.upper);
  5389. }
  5390. EXPORT_SYMBOL(netdev_has_any_upper_dev);
  5391. /**
  5392. * netdev_master_upper_dev_get - Get master upper device
  5393. * @dev: device
  5394. *
  5395. * Find a master upper device and return pointer to it or NULL in case
  5396. * it's not there. The caller must hold the RTNL lock.
  5397. */
  5398. struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
  5399. {
  5400. struct netdev_adjacent *upper;
  5401. ASSERT_RTNL();
  5402. if (list_empty(&dev->adj_list.upper))
  5403. return NULL;
  5404. upper = list_first_entry(&dev->adj_list.upper,
  5405. struct netdev_adjacent, list);
  5406. if (likely(upper->master))
  5407. return upper->dev;
  5408. return NULL;
  5409. }
  5410. EXPORT_SYMBOL(netdev_master_upper_dev_get);
  5411. /**
  5412. * netdev_has_any_lower_dev - Check if device is linked to some device
  5413. * @dev: device
  5414. *
  5415. * Find out if a device is linked to a lower device and return true in case
  5416. * it is. The caller must hold the RTNL lock.
  5417. */
  5418. static bool netdev_has_any_lower_dev(struct net_device *dev)
  5419. {
  5420. ASSERT_RTNL();
  5421. return !list_empty(&dev->adj_list.lower);
  5422. }
  5423. void *netdev_adjacent_get_private(struct list_head *adj_list)
  5424. {
  5425. struct netdev_adjacent *adj;
  5426. adj = list_entry(adj_list, struct netdev_adjacent, list);
  5427. return adj->private;
  5428. }
  5429. EXPORT_SYMBOL(netdev_adjacent_get_private);
  5430. /**
  5431. * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
  5432. * @dev: device
  5433. * @iter: list_head ** of the current position
  5434. *
  5435. * Gets the next device from the dev's upper list, starting from iter
  5436. * position. The caller must hold RCU read lock.
  5437. */
  5438. struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
  5439. struct list_head **iter)
  5440. {
  5441. struct netdev_adjacent *upper;
  5442. WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
  5443. upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
  5444. if (&upper->list == &dev->adj_list.upper)
  5445. return NULL;
  5446. *iter = &upper->list;
  5447. return upper->dev;
  5448. }
  5449. EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
  5450. static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
  5451. struct list_head **iter)
  5452. {
  5453. struct netdev_adjacent *upper;
  5454. WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
  5455. upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
  5456. if (&upper->list == &dev->adj_list.upper)
  5457. return NULL;
  5458. *iter = &upper->list;
  5459. return upper->dev;
  5460. }
  5461. int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
  5462. int (*fn)(struct net_device *dev,
  5463. void *data),
  5464. void *data)
  5465. {
  5466. struct net_device *udev;
  5467. struct list_head *iter;
  5468. int ret;
  5469. for (iter = &dev->adj_list.upper,
  5470. udev = netdev_next_upper_dev_rcu(dev, &iter);
  5471. udev;
  5472. udev = netdev_next_upper_dev_rcu(dev, &iter)) {
  5473. /* first is the upper device itself */
  5474. ret = fn(udev, data);
  5475. if (ret)
  5476. return ret;
  5477. /* then look at all of its upper devices */
  5478. ret = netdev_walk_all_upper_dev_rcu(udev, fn, data);
  5479. if (ret)
  5480. return ret;
  5481. }
  5482. return 0;
  5483. }
  5484. EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
  5485. /**
  5486. * netdev_lower_get_next_private - Get the next ->private from the
  5487. * lower neighbour list
  5488. * @dev: device
  5489. * @iter: list_head ** of the current position
  5490. *
  5491. * Gets the next netdev_adjacent->private from the dev's lower neighbour
  5492. * list, starting from iter position. The caller must hold either hold the
  5493. * RTNL lock or its own locking that guarantees that the neighbour lower
  5494. * list will remain unchanged.
  5495. */
  5496. void *netdev_lower_get_next_private(struct net_device *dev,
  5497. struct list_head **iter)
  5498. {
  5499. struct netdev_adjacent *lower;
  5500. lower = list_entry(*iter, struct netdev_adjacent, list);
  5501. if (&lower->list == &dev->adj_list.lower)
  5502. return NULL;
  5503. *iter = lower->list.next;
  5504. return lower->private;
  5505. }
  5506. EXPORT_SYMBOL(netdev_lower_get_next_private);
  5507. /**
  5508. * netdev_lower_get_next_private_rcu - Get the next ->private from the
  5509. * lower neighbour list, RCU
  5510. * variant
  5511. * @dev: device
  5512. * @iter: list_head ** of the current position
  5513. *
  5514. * Gets the next netdev_adjacent->private from the dev's lower neighbour
  5515. * list, starting from iter position. The caller must hold RCU read lock.
  5516. */
  5517. void *netdev_lower_get_next_private_rcu(struct net_device *dev,
  5518. struct list_head **iter)
  5519. {
  5520. struct netdev_adjacent *lower;
  5521. WARN_ON_ONCE(!rcu_read_lock_held());
  5522. lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
  5523. if (&lower->list == &dev->adj_list.lower)
  5524. return NULL;
  5525. *iter = &lower->list;
  5526. return lower->private;
  5527. }
  5528. EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
  5529. /**
  5530. * netdev_lower_get_next - Get the next device from the lower neighbour
  5531. * list
  5532. * @dev: device
  5533. * @iter: list_head ** of the current position
  5534. *
  5535. * Gets the next netdev_adjacent from the dev's lower neighbour
  5536. * list, starting from iter position. The caller must hold RTNL lock or
  5537. * its own locking that guarantees that the neighbour lower
  5538. * list will remain unchanged.
  5539. */
  5540. void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
  5541. {
  5542. struct netdev_adjacent *lower;
  5543. lower = list_entry(*iter, struct netdev_adjacent, list);
  5544. if (&lower->list == &dev->adj_list.lower)
  5545. return NULL;
  5546. *iter = lower->list.next;
  5547. return lower->dev;
  5548. }
  5549. EXPORT_SYMBOL(netdev_lower_get_next);
  5550. static struct net_device *netdev_next_lower_dev(struct net_device *dev,
  5551. struct list_head **iter)
  5552. {
  5553. struct netdev_adjacent *lower;
  5554. lower = list_entry((*iter)->next, struct netdev_adjacent, list);
  5555. if (&lower->list == &dev->adj_list.lower)
  5556. return NULL;
  5557. *iter = &lower->list;
  5558. return lower->dev;
  5559. }
  5560. int netdev_walk_all_lower_dev(struct net_device *dev,
  5561. int (*fn)(struct net_device *dev,
  5562. void *data),
  5563. void *data)
  5564. {
  5565. struct net_device *ldev;
  5566. struct list_head *iter;
  5567. int ret;
  5568. for (iter = &dev->adj_list.lower,
  5569. ldev = netdev_next_lower_dev(dev, &iter);
  5570. ldev;
  5571. ldev = netdev_next_lower_dev(dev, &iter)) {
  5572. /* first is the lower device itself */
  5573. ret = fn(ldev, data);
  5574. if (ret)
  5575. return ret;
  5576. /* then look at all of its lower devices */
  5577. ret = netdev_walk_all_lower_dev(ldev, fn, data);
  5578. if (ret)
  5579. return ret;
  5580. }
  5581. return 0;
  5582. }
  5583. EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
  5584. static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
  5585. struct list_head **iter)
  5586. {
  5587. struct netdev_adjacent *lower;
  5588. lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
  5589. if (&lower->list == &dev->adj_list.lower)
  5590. return NULL;
  5591. *iter = &lower->list;
  5592. return lower->dev;
  5593. }
  5594. int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
  5595. int (*fn)(struct net_device *dev,
  5596. void *data),
  5597. void *data)
  5598. {
  5599. struct net_device *ldev;
  5600. struct list_head *iter;
  5601. int ret;
  5602. for (iter = &dev->adj_list.lower,
  5603. ldev = netdev_next_lower_dev_rcu(dev, &iter);
  5604. ldev;
  5605. ldev = netdev_next_lower_dev_rcu(dev, &iter)) {
  5606. /* first is the lower device itself */
  5607. ret = fn(ldev, data);
  5608. if (ret)
  5609. return ret;
  5610. /* then look at all of its lower devices */
  5611. ret = netdev_walk_all_lower_dev_rcu(ldev, fn, data);
  5612. if (ret)
  5613. return ret;
  5614. }
  5615. return 0;
  5616. }
  5617. EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu);
  5618. /**
  5619. * netdev_lower_get_first_private_rcu - Get the first ->private from the
  5620. * lower neighbour list, RCU
  5621. * variant
  5622. * @dev: device
  5623. *
  5624. * Gets the first netdev_adjacent->private from the dev's lower neighbour
  5625. * list. The caller must hold RCU read lock.
  5626. */
  5627. void *netdev_lower_get_first_private_rcu(struct net_device *dev)
  5628. {
  5629. struct netdev_adjacent *lower;
  5630. lower = list_first_or_null_rcu(&dev->adj_list.lower,
  5631. struct netdev_adjacent, list);
  5632. if (lower)
  5633. return lower->private;
  5634. return NULL;
  5635. }
  5636. EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
  5637. /**
  5638. * netdev_master_upper_dev_get_rcu - Get master upper device
  5639. * @dev: device
  5640. *
  5641. * Find a master upper device and return pointer to it or NULL in case
  5642. * it's not there. The caller must hold the RCU read lock.
  5643. */
  5644. struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
  5645. {
  5646. struct netdev_adjacent *upper;
  5647. upper = list_first_or_null_rcu(&dev->adj_list.upper,
  5648. struct netdev_adjacent, list);
  5649. if (upper && likely(upper->master))
  5650. return upper->dev;
  5651. return NULL;
  5652. }
  5653. EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
  5654. static int netdev_adjacent_sysfs_add(struct net_device *dev,
  5655. struct net_device *adj_dev,
  5656. struct list_head *dev_list)
  5657. {
  5658. char linkname[IFNAMSIZ+7];
  5659. sprintf(linkname, dev_list == &dev->adj_list.upper ?
  5660. "upper_%s" : "lower_%s", adj_dev->name);
  5661. return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
  5662. linkname);
  5663. }
  5664. static void netdev_adjacent_sysfs_del(struct net_device *dev,
  5665. char *name,
  5666. struct list_head *dev_list)
  5667. {
  5668. char linkname[IFNAMSIZ+7];
  5669. sprintf(linkname, dev_list == &dev->adj_list.upper ?
  5670. "upper_%s" : "lower_%s", name);
  5671. sysfs_remove_link(&(dev->dev.kobj), linkname);
  5672. }
  5673. static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
  5674. struct net_device *adj_dev,
  5675. struct list_head *dev_list)
  5676. {
  5677. return (dev_list == &dev->adj_list.upper ||
  5678. dev_list == &dev->adj_list.lower) &&
  5679. net_eq(dev_net(dev), dev_net(adj_dev));
  5680. }
  5681. static int __netdev_adjacent_dev_insert(struct net_device *dev,
  5682. struct net_device *adj_dev,
  5683. struct list_head *dev_list,
  5684. void *private, bool master)
  5685. {
  5686. struct netdev_adjacent *adj;
  5687. int ret;
  5688. adj = __netdev_find_adj(adj_dev, dev_list);
  5689. if (adj) {
  5690. adj->ref_nr += 1;
  5691. pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
  5692. dev->name, adj_dev->name, adj->ref_nr);
  5693. return 0;
  5694. }
  5695. adj = kmalloc(sizeof(*adj), GFP_KERNEL);
  5696. if (!adj)
  5697. return -ENOMEM;
  5698. adj->dev = adj_dev;
  5699. adj->master = master;
  5700. adj->ref_nr = 1;
  5701. adj->private = private;
  5702. dev_hold(adj_dev);
  5703. pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
  5704. dev->name, adj_dev->name, adj->ref_nr, adj_dev->name);
  5705. if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
  5706. ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
  5707. if (ret)
  5708. goto free_adj;
  5709. }
  5710. /* Ensure that master link is always the first item in list. */
  5711. if (master) {
  5712. ret = sysfs_create_link(&(dev->dev.kobj),
  5713. &(adj_dev->dev.kobj), "master");
  5714. if (ret)
  5715. goto remove_symlinks;
  5716. list_add_rcu(&adj->list, dev_list);
  5717. } else {
  5718. list_add_tail_rcu(&adj->list, dev_list);
  5719. }
  5720. return 0;
  5721. remove_symlinks:
  5722. if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
  5723. netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
  5724. free_adj:
  5725. kfree(adj);
  5726. dev_put(adj_dev);
  5727. return ret;
  5728. }
  5729. static void __netdev_adjacent_dev_remove(struct net_device *dev,
  5730. struct net_device *adj_dev,
  5731. u16 ref_nr,
  5732. struct list_head *dev_list)
  5733. {
  5734. struct netdev_adjacent *adj;
  5735. pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
  5736. dev->name, adj_dev->name, ref_nr);
  5737. adj = __netdev_find_adj(adj_dev, dev_list);
  5738. if (!adj) {
  5739. pr_err("Adjacency does not exist for device %s from %s\n",
  5740. dev->name, adj_dev->name);
  5741. WARN_ON(1);
  5742. return;
  5743. }
  5744. if (adj->ref_nr > ref_nr) {
  5745. pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
  5746. dev->name, adj_dev->name, ref_nr,
  5747. adj->ref_nr - ref_nr);
  5748. adj->ref_nr -= ref_nr;
  5749. return;
  5750. }
  5751. if (adj->master)
  5752. sysfs_remove_link(&(dev->dev.kobj), "master");
  5753. if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
  5754. netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
  5755. list_del_rcu(&adj->list);
  5756. pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
  5757. adj_dev->name, dev->name, adj_dev->name);
  5758. dev_put(adj_dev);
  5759. kfree_rcu(adj, rcu);
  5760. }
  5761. static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
  5762. struct net_device *upper_dev,
  5763. struct list_head *up_list,
  5764. struct list_head *down_list,
  5765. void *private, bool master)
  5766. {
  5767. int ret;
  5768. ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list,
  5769. private, master);
  5770. if (ret)
  5771. return ret;
  5772. ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list,
  5773. private, false);
  5774. if (ret) {
  5775. __netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list);
  5776. return ret;
  5777. }
  5778. return 0;
  5779. }
  5780. static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
  5781. struct net_device *upper_dev,
  5782. u16 ref_nr,
  5783. struct list_head *up_list,
  5784. struct list_head *down_list)
  5785. {
  5786. __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
  5787. __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
  5788. }
  5789. static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
  5790. struct net_device *upper_dev,
  5791. void *private, bool master)
  5792. {
  5793. return __netdev_adjacent_dev_link_lists(dev, upper_dev,
  5794. &dev->adj_list.upper,
  5795. &upper_dev->adj_list.lower,
  5796. private, master);
  5797. }
  5798. static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
  5799. struct net_device *upper_dev)
  5800. {
  5801. __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
  5802. &dev->adj_list.upper,
  5803. &upper_dev->adj_list.lower);
  5804. }
  5805. static int __netdev_upper_dev_link(struct net_device *dev,
  5806. struct net_device *upper_dev, bool master,
  5807. void *upper_priv, void *upper_info,
  5808. struct netlink_ext_ack *extack)
  5809. {
  5810. struct netdev_notifier_changeupper_info changeupper_info = {
  5811. .info = {
  5812. .dev = dev,
  5813. .extack = extack,
  5814. },
  5815. .upper_dev = upper_dev,
  5816. .master = master,
  5817. .linking = true,
  5818. .upper_info = upper_info,
  5819. };
  5820. struct net_device *master_dev;
  5821. int ret = 0;
  5822. ASSERT_RTNL();
  5823. if (dev == upper_dev)
  5824. return -EBUSY;
  5825. /* To prevent loops, check if dev is not upper device to upper_dev. */
  5826. if (netdev_has_upper_dev(upper_dev, dev))
  5827. return -EBUSY;
  5828. if (!master) {
  5829. if (netdev_has_upper_dev(dev, upper_dev))
  5830. return -EEXIST;
  5831. } else {
  5832. master_dev = netdev_master_upper_dev_get(dev);
  5833. if (master_dev)
  5834. return master_dev == upper_dev ? -EEXIST : -EBUSY;
  5835. }
  5836. ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
  5837. &changeupper_info.info);
  5838. ret = notifier_to_errno(ret);
  5839. if (ret)
  5840. return ret;
  5841. ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
  5842. master);
  5843. if (ret)
  5844. return ret;
  5845. ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
  5846. &changeupper_info.info);
  5847. ret = notifier_to_errno(ret);
  5848. if (ret)
  5849. goto rollback;
  5850. return 0;
  5851. rollback:
  5852. __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
  5853. return ret;
  5854. }
  5855. /**
  5856. * netdev_upper_dev_link - Add a link to the upper device
  5857. * @dev: device
  5858. * @upper_dev: new upper device
  5859. * @extack: netlink extended ack
  5860. *
  5861. * Adds a link to device which is upper to this one. The caller must hold
  5862. * the RTNL lock. On a failure a negative errno code is returned.
  5863. * On success the reference counts are adjusted and the function
  5864. * returns zero.
  5865. */
  5866. int netdev_upper_dev_link(struct net_device *dev,
  5867. struct net_device *upper_dev,
  5868. struct netlink_ext_ack *extack)
  5869. {
  5870. return __netdev_upper_dev_link(dev, upper_dev, false,
  5871. NULL, NULL, extack);
  5872. }
  5873. EXPORT_SYMBOL(netdev_upper_dev_link);
  5874. /**
  5875. * netdev_master_upper_dev_link - Add a master link to the upper device
  5876. * @dev: device
  5877. * @upper_dev: new upper device
  5878. * @upper_priv: upper device private
  5879. * @upper_info: upper info to be passed down via notifier
  5880. * @extack: netlink extended ack
  5881. *
  5882. * Adds a link to device which is upper to this one. In this case, only
  5883. * one master upper device can be linked, although other non-master devices
  5884. * might be linked as well. The caller must hold the RTNL lock.
  5885. * On a failure a negative errno code is returned. On success the reference
  5886. * counts are adjusted and the function returns zero.
  5887. */
  5888. int netdev_master_upper_dev_link(struct net_device *dev,
  5889. struct net_device *upper_dev,
  5890. void *upper_priv, void *upper_info,
  5891. struct netlink_ext_ack *extack)
  5892. {
  5893. return __netdev_upper_dev_link(dev, upper_dev, true,
  5894. upper_priv, upper_info, extack);
  5895. }
  5896. EXPORT_SYMBOL(netdev_master_upper_dev_link);
  5897. /**
  5898. * netdev_upper_dev_unlink - Removes a link to upper device
  5899. * @dev: device
  5900. * @upper_dev: new upper device
  5901. *
  5902. * Removes a link to device which is upper to this one. The caller must hold
  5903. * the RTNL lock.
  5904. */
  5905. void netdev_upper_dev_unlink(struct net_device *dev,
  5906. struct net_device *upper_dev)
  5907. {
  5908. struct netdev_notifier_changeupper_info changeupper_info = {
  5909. .info = {
  5910. .dev = dev,
  5911. },
  5912. .upper_dev = upper_dev,
  5913. .linking = false,
  5914. };
  5915. ASSERT_RTNL();
  5916. changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
  5917. call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
  5918. &changeupper_info.info);
  5919. __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
  5920. call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
  5921. &changeupper_info.info);
  5922. }
  5923. EXPORT_SYMBOL(netdev_upper_dev_unlink);
  5924. /**
  5925. * netdev_bonding_info_change - Dispatch event about slave change
  5926. * @dev: device
  5927. * @bonding_info: info to dispatch
  5928. *
  5929. * Send NETDEV_BONDING_INFO to netdev notifiers with info.
  5930. * The caller must hold the RTNL lock.
  5931. */
  5932. void netdev_bonding_info_change(struct net_device *dev,
  5933. struct netdev_bonding_info *bonding_info)
  5934. {
  5935. struct netdev_notifier_bonding_info info = {
  5936. .info.dev = dev,
  5937. };
  5938. memcpy(&info.bonding_info, bonding_info,
  5939. sizeof(struct netdev_bonding_info));
  5940. call_netdevice_notifiers_info(NETDEV_BONDING_INFO,
  5941. &info.info);
  5942. }
  5943. EXPORT_SYMBOL(netdev_bonding_info_change);
  5944. static void netdev_adjacent_add_links(struct net_device *dev)
  5945. {
  5946. struct netdev_adjacent *iter;
  5947. struct net *net = dev_net(dev);
  5948. list_for_each_entry(iter, &dev->adj_list.upper, list) {
  5949. if (!net_eq(net, dev_net(iter->dev)))
  5950. continue;
  5951. netdev_adjacent_sysfs_add(iter->dev, dev,
  5952. &iter->dev->adj_list.lower);
  5953. netdev_adjacent_sysfs_add(dev, iter->dev,
  5954. &dev->adj_list.upper);
  5955. }
  5956. list_for_each_entry(iter, &dev->adj_list.lower, list) {
  5957. if (!net_eq(net, dev_net(iter->dev)))
  5958. continue;
  5959. netdev_adjacent_sysfs_add(iter->dev, dev,
  5960. &iter->dev->adj_list.upper);
  5961. netdev_adjacent_sysfs_add(dev, iter->dev,
  5962. &dev->adj_list.lower);
  5963. }
  5964. }
  5965. static void netdev_adjacent_del_links(struct net_device *dev)
  5966. {
  5967. struct netdev_adjacent *iter;
  5968. struct net *net = dev_net(dev);
  5969. list_for_each_entry(iter, &dev->adj_list.upper, list) {
  5970. if (!net_eq(net, dev_net(iter->dev)))
  5971. continue;
  5972. netdev_adjacent_sysfs_del(iter->dev, dev->name,
  5973. &iter->dev->adj_list.lower);
  5974. netdev_adjacent_sysfs_del(dev, iter->dev->name,
  5975. &dev->adj_list.upper);
  5976. }
  5977. list_for_each_entry(iter, &dev->adj_list.lower, list) {
  5978. if (!net_eq(net, dev_net(iter->dev)))
  5979. continue;
  5980. netdev_adjacent_sysfs_del(iter->dev, dev->name,
  5981. &iter->dev->adj_list.upper);
  5982. netdev_adjacent_sysfs_del(dev, iter->dev->name,
  5983. &dev->adj_list.lower);
  5984. }
  5985. }
  5986. void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
  5987. {
  5988. struct netdev_adjacent *iter;
  5989. struct net *net = dev_net(dev);
  5990. list_for_each_entry(iter, &dev->adj_list.upper, list) {
  5991. if (!net_eq(net, dev_net(iter->dev)))
  5992. continue;
  5993. netdev_adjacent_sysfs_del(iter->dev, oldname,
  5994. &iter->dev->adj_list.lower);
  5995. netdev_adjacent_sysfs_add(iter->dev, dev,
  5996. &iter->dev->adj_list.lower);
  5997. }
  5998. list_for_each_entry(iter, &dev->adj_list.lower, list) {
  5999. if (!net_eq(net, dev_net(iter->dev)))
  6000. continue;
  6001. netdev_adjacent_sysfs_del(iter->dev, oldname,
  6002. &iter->dev->adj_list.upper);
  6003. netdev_adjacent_sysfs_add(iter->dev, dev,
  6004. &iter->dev->adj_list.upper);
  6005. }
  6006. }
  6007. void *netdev_lower_dev_get_private(struct net_device *dev,
  6008. struct net_device *lower_dev)
  6009. {
  6010. struct netdev_adjacent *lower;
  6011. if (!lower_dev)
  6012. return NULL;
  6013. lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
  6014. if (!lower)
  6015. return NULL;
  6016. return lower->private;
  6017. }
  6018. EXPORT_SYMBOL(netdev_lower_dev_get_private);
  6019. int dev_get_nest_level(struct net_device *dev)
  6020. {
  6021. struct net_device *lower = NULL;
  6022. struct list_head *iter;
  6023. int max_nest = -1;
  6024. int nest;
  6025. ASSERT_RTNL();
  6026. netdev_for_each_lower_dev(dev, lower, iter) {
  6027. nest = dev_get_nest_level(lower);
  6028. if (max_nest < nest)
  6029. max_nest = nest;
  6030. }
  6031. return max_nest + 1;
  6032. }
  6033. EXPORT_SYMBOL(dev_get_nest_level);
  6034. /**
  6035. * netdev_lower_change - Dispatch event about lower device state change
  6036. * @lower_dev: device
  6037. * @lower_state_info: state to dispatch
  6038. *
  6039. * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
  6040. * The caller must hold the RTNL lock.
  6041. */
  6042. void netdev_lower_state_changed(struct net_device *lower_dev,
  6043. void *lower_state_info)
  6044. {
  6045. struct netdev_notifier_changelowerstate_info changelowerstate_info = {
  6046. .info.dev = lower_dev,
  6047. };
  6048. ASSERT_RTNL();
  6049. changelowerstate_info.lower_state_info = lower_state_info;
  6050. call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE,
  6051. &changelowerstate_info.info);
  6052. }
  6053. EXPORT_SYMBOL(netdev_lower_state_changed);
  6054. static void dev_change_rx_flags(struct net_device *dev, int flags)
  6055. {
  6056. const struct net_device_ops *ops = dev->netdev_ops;
  6057. if (ops->ndo_change_rx_flags)
  6058. ops->ndo_change_rx_flags(dev, flags);
  6059. }
  6060. static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
  6061. {
  6062. unsigned int old_flags = dev->flags;
  6063. kuid_t uid;
  6064. kgid_t gid;
  6065. ASSERT_RTNL();
  6066. dev->flags |= IFF_PROMISC;
  6067. dev->promiscuity += inc;
  6068. if (dev->promiscuity == 0) {
  6069. /*
  6070. * Avoid overflow.
  6071. * If inc causes overflow, untouch promisc and return error.
  6072. */
  6073. if (inc < 0)
  6074. dev->flags &= ~IFF_PROMISC;
  6075. else {
  6076. dev->promiscuity -= inc;
  6077. pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
  6078. dev->name);
  6079. return -EOVERFLOW;
  6080. }
  6081. }
  6082. if (dev->flags != old_flags) {
  6083. pr_info("device %s %s promiscuous mode\n",
  6084. dev->name,
  6085. dev->flags & IFF_PROMISC ? "entered" : "left");
  6086. if (audit_enabled) {
  6087. current_uid_gid(&uid, &gid);
  6088. audit_log(audit_context(), GFP_ATOMIC,
  6089. AUDIT_ANOM_PROMISCUOUS,
  6090. "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
  6091. dev->name, (dev->flags & IFF_PROMISC),
  6092. (old_flags & IFF_PROMISC),
  6093. from_kuid(&init_user_ns, audit_get_loginuid(current)),
  6094. from_kuid(&init_user_ns, uid),
  6095. from_kgid(&init_user_ns, gid),
  6096. audit_get_sessionid(current));
  6097. }
  6098. dev_change_rx_flags(dev, IFF_PROMISC);
  6099. }
  6100. if (notify)
  6101. __dev_notify_flags(dev, old_flags, IFF_PROMISC);
  6102. return 0;
  6103. }
  6104. /**
  6105. * dev_set_promiscuity - update promiscuity count on a device
  6106. * @dev: device
  6107. * @inc: modifier
  6108. *
  6109. * Add or remove promiscuity from a device. While the count in the device
  6110. * remains above zero the interface remains promiscuous. Once it hits zero
  6111. * the device reverts back to normal filtering operation. A negative inc
  6112. * value is used to drop promiscuity on the device.
  6113. * Return 0 if successful or a negative errno code on error.
  6114. */
  6115. int dev_set_promiscuity(struct net_device *dev, int inc)
  6116. {
  6117. unsigned int old_flags = dev->flags;
  6118. int err;
  6119. err = __dev_set_promiscuity(dev, inc, true);
  6120. if (err < 0)
  6121. return err;
  6122. if (dev->flags != old_flags)
  6123. dev_set_rx_mode(dev);
  6124. return err;
  6125. }
  6126. EXPORT_SYMBOL(dev_set_promiscuity);
  6127. static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
  6128. {
  6129. unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
  6130. ASSERT_RTNL();
  6131. dev->flags |= IFF_ALLMULTI;
  6132. dev->allmulti += inc;
  6133. if (dev->allmulti == 0) {
  6134. /*
  6135. * Avoid overflow.
  6136. * If inc causes overflow, untouch allmulti and return error.
  6137. */
  6138. if (inc < 0)
  6139. dev->flags &= ~IFF_ALLMULTI;
  6140. else {
  6141. dev->allmulti -= inc;
  6142. pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
  6143. dev->name);
  6144. return -EOVERFLOW;
  6145. }
  6146. }
  6147. if (dev->flags ^ old_flags) {
  6148. dev_change_rx_flags(dev, IFF_ALLMULTI);
  6149. dev_set_rx_mode(dev);
  6150. if (notify)
  6151. __dev_notify_flags(dev, old_flags,
  6152. dev->gflags ^ old_gflags);
  6153. }
  6154. return 0;
  6155. }
  6156. /**
  6157. * dev_set_allmulti - update allmulti count on a device
  6158. * @dev: device
  6159. * @inc: modifier
  6160. *
  6161. * Add or remove reception of all multicast frames to a device. While the
  6162. * count in the device remains above zero the interface remains listening
  6163. * to all interfaces. Once it hits zero the device reverts back to normal
  6164. * filtering operation. A negative @inc value is used to drop the counter
  6165. * when releasing a resource needing all multicasts.
  6166. * Return 0 if successful or a negative errno code on error.
  6167. */
  6168. int dev_set_allmulti(struct net_device *dev, int inc)
  6169. {
  6170. return __dev_set_allmulti(dev, inc, true);
  6171. }
  6172. EXPORT_SYMBOL(dev_set_allmulti);
  6173. /*
  6174. * Upload unicast and multicast address lists to device and
  6175. * configure RX filtering. When the device doesn't support unicast
  6176. * filtering it is put in promiscuous mode while unicast addresses
  6177. * are present.
  6178. */
  6179. void __dev_set_rx_mode(struct net_device *dev)
  6180. {
  6181. const struct net_device_ops *ops = dev->netdev_ops;
  6182. /* dev_open will call this function so the list will stay sane. */
  6183. if (!(dev->flags&IFF_UP))
  6184. return;
  6185. if (!netif_device_present(dev))
  6186. return;
  6187. if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
  6188. /* Unicast addresses changes may only happen under the rtnl,
  6189. * therefore calling __dev_set_promiscuity here is safe.
  6190. */
  6191. if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
  6192. __dev_set_promiscuity(dev, 1, false);
  6193. dev->uc_promisc = true;
  6194. } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
  6195. __dev_set_promiscuity(dev, -1, false);
  6196. dev->uc_promisc = false;
  6197. }
  6198. }
  6199. if (ops->ndo_set_rx_mode)
  6200. ops->ndo_set_rx_mode(dev);
  6201. }
  6202. void dev_set_rx_mode(struct net_device *dev)
  6203. {
  6204. netif_addr_lock_bh(dev);
  6205. __dev_set_rx_mode(dev);
  6206. netif_addr_unlock_bh(dev);
  6207. }
  6208. /**
  6209. * dev_get_flags - get flags reported to userspace
  6210. * @dev: device
  6211. *
  6212. * Get the combination of flag bits exported through APIs to userspace.
  6213. */
  6214. unsigned int dev_get_flags(const struct net_device *dev)
  6215. {
  6216. unsigned int flags;
  6217. flags = (dev->flags & ~(IFF_PROMISC |
  6218. IFF_ALLMULTI |
  6219. IFF_RUNNING |
  6220. IFF_LOWER_UP |
  6221. IFF_DORMANT)) |
  6222. (dev->gflags & (IFF_PROMISC |
  6223. IFF_ALLMULTI));
  6224. if (netif_running(dev)) {
  6225. if (netif_oper_up(dev))
  6226. flags |= IFF_RUNNING;
  6227. if (netif_carrier_ok(dev))
  6228. flags |= IFF_LOWER_UP;
  6229. if (netif_dormant(dev))
  6230. flags |= IFF_DORMANT;
  6231. }
  6232. return flags;
  6233. }
  6234. EXPORT_SYMBOL(dev_get_flags);
  6235. int __dev_change_flags(struct net_device *dev, unsigned int flags)
  6236. {
  6237. unsigned int old_flags = dev->flags;
  6238. int ret;
  6239. ASSERT_RTNL();
  6240. /*
  6241. * Set the flags on our device.
  6242. */
  6243. dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
  6244. IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
  6245. IFF_AUTOMEDIA)) |
  6246. (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
  6247. IFF_ALLMULTI));
  6248. /*
  6249. * Load in the correct multicast list now the flags have changed.
  6250. */
  6251. if ((old_flags ^ flags) & IFF_MULTICAST)
  6252. dev_change_rx_flags(dev, IFF_MULTICAST);
  6253. dev_set_rx_mode(dev);
  6254. /*
  6255. * Have we downed the interface. We handle IFF_UP ourselves
  6256. * according to user attempts to set it, rather than blindly
  6257. * setting it.
  6258. */
  6259. ret = 0;
  6260. if ((old_flags ^ flags) & IFF_UP) {
  6261. if (old_flags & IFF_UP)
  6262. __dev_close(dev);
  6263. else
  6264. ret = __dev_open(dev);
  6265. }
  6266. if ((flags ^ dev->gflags) & IFF_PROMISC) {
  6267. int inc = (flags & IFF_PROMISC) ? 1 : -1;
  6268. unsigned int old_flags = dev->flags;
  6269. dev->gflags ^= IFF_PROMISC;
  6270. if (__dev_set_promiscuity(dev, inc, false) >= 0)
  6271. if (dev->flags != old_flags)
  6272. dev_set_rx_mode(dev);
  6273. }
  6274. /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
  6275. * is important. Some (broken) drivers set IFF_PROMISC, when
  6276. * IFF_ALLMULTI is requested not asking us and not reporting.
  6277. */
  6278. if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
  6279. int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
  6280. dev->gflags ^= IFF_ALLMULTI;
  6281. __dev_set_allmulti(dev, inc, false);
  6282. }
  6283. return ret;
  6284. }
  6285. void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
  6286. unsigned int gchanges)
  6287. {
  6288. unsigned int changes = dev->flags ^ old_flags;
  6289. if (gchanges)
  6290. rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
  6291. if (changes & IFF_UP) {
  6292. if (dev->flags & IFF_UP)
  6293. call_netdevice_notifiers(NETDEV_UP, dev);
  6294. else
  6295. call_netdevice_notifiers(NETDEV_DOWN, dev);
  6296. }
  6297. if (dev->flags & IFF_UP &&
  6298. (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
  6299. struct netdev_notifier_change_info change_info = {
  6300. .info = {
  6301. .dev = dev,
  6302. },
  6303. .flags_changed = changes,
  6304. };
  6305. call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info);
  6306. }
  6307. }
  6308. /**
  6309. * dev_change_flags - change device settings
  6310. * @dev: device
  6311. * @flags: device state flags
  6312. *
  6313. * Change settings on device based state flags. The flags are
  6314. * in the userspace exported format.
  6315. */
  6316. int dev_change_flags(struct net_device *dev, unsigned int flags)
  6317. {
  6318. int ret;
  6319. unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
  6320. ret = __dev_change_flags(dev, flags);
  6321. if (ret < 0)
  6322. return ret;
  6323. changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
  6324. __dev_notify_flags(dev, old_flags, changes);
  6325. return ret;
  6326. }
  6327. EXPORT_SYMBOL(dev_change_flags);
  6328. int __dev_set_mtu(struct net_device *dev, int new_mtu)
  6329. {
  6330. const struct net_device_ops *ops = dev->netdev_ops;
  6331. if (ops->ndo_change_mtu)
  6332. return ops->ndo_change_mtu(dev, new_mtu);
  6333. dev->mtu = new_mtu;
  6334. return 0;
  6335. }
  6336. EXPORT_SYMBOL(__dev_set_mtu);
  6337. /**
  6338. * dev_set_mtu - Change maximum transfer unit
  6339. * @dev: device
  6340. * @new_mtu: new transfer unit
  6341. *
  6342. * Change the maximum transfer size of the network device.
  6343. */
  6344. int dev_set_mtu(struct net_device *dev, int new_mtu)
  6345. {
  6346. int err, orig_mtu;
  6347. if (new_mtu == dev->mtu)
  6348. return 0;
  6349. /* MTU must be positive, and in range */
  6350. if (new_mtu < 0 || new_mtu < dev->min_mtu) {
  6351. net_err_ratelimited("%s: Invalid MTU %d requested, hw min %d\n",
  6352. dev->name, new_mtu, dev->min_mtu);
  6353. return -EINVAL;
  6354. }
  6355. if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
  6356. net_err_ratelimited("%s: Invalid MTU %d requested, hw max %d\n",
  6357. dev->name, new_mtu, dev->max_mtu);
  6358. return -EINVAL;
  6359. }
  6360. if (!netif_device_present(dev))
  6361. return -ENODEV;
  6362. err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
  6363. err = notifier_to_errno(err);
  6364. if (err)
  6365. return err;
  6366. orig_mtu = dev->mtu;
  6367. err = __dev_set_mtu(dev, new_mtu);
  6368. if (!err) {
  6369. err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
  6370. err = notifier_to_errno(err);
  6371. if (err) {
  6372. /* setting mtu back and notifying everyone again,
  6373. * so that they have a chance to revert changes.
  6374. */
  6375. __dev_set_mtu(dev, orig_mtu);
  6376. call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
  6377. }
  6378. }
  6379. return err;
  6380. }
  6381. EXPORT_SYMBOL(dev_set_mtu);
  6382. /**
  6383. * dev_change_tx_queue_len - Change TX queue length of a netdevice
  6384. * @dev: device
  6385. * @new_len: new tx queue length
  6386. */
  6387. int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len)
  6388. {
  6389. unsigned int orig_len = dev->tx_queue_len;
  6390. int res;
  6391. if (new_len != (unsigned int)new_len)
  6392. return -ERANGE;
  6393. if (new_len != orig_len) {
  6394. dev->tx_queue_len = new_len;
  6395. res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev);
  6396. res = notifier_to_errno(res);
  6397. if (res) {
  6398. netdev_err(dev,
  6399. "refused to change device tx_queue_len\n");
  6400. dev->tx_queue_len = orig_len;
  6401. return res;
  6402. }
  6403. return dev_qdisc_change_tx_queue_len(dev);
  6404. }
  6405. return 0;
  6406. }
  6407. /**
  6408. * dev_set_group - Change group this device belongs to
  6409. * @dev: device
  6410. * @new_group: group this device should belong to
  6411. */
  6412. void dev_set_group(struct net_device *dev, int new_group)
  6413. {
  6414. dev->group = new_group;
  6415. }
  6416. EXPORT_SYMBOL(dev_set_group);
  6417. /**
  6418. * dev_set_mac_address - Change Media Access Control Address
  6419. * @dev: device
  6420. * @sa: new address
  6421. *
  6422. * Change the hardware (MAC) address of the device
  6423. */
  6424. int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
  6425. {
  6426. const struct net_device_ops *ops = dev->netdev_ops;
  6427. int err;
  6428. if (!ops->ndo_set_mac_address)
  6429. return -EOPNOTSUPP;
  6430. if (sa->sa_family != dev->type)
  6431. return -EINVAL;
  6432. if (!netif_device_present(dev))
  6433. return -ENODEV;
  6434. err = ops->ndo_set_mac_address(dev, sa);
  6435. if (err)
  6436. return err;
  6437. dev->addr_assign_type = NET_ADDR_SET;
  6438. call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
  6439. add_device_randomness(dev->dev_addr, dev->addr_len);
  6440. return 0;
  6441. }
  6442. EXPORT_SYMBOL(dev_set_mac_address);
  6443. /**
  6444. * dev_change_carrier - Change device carrier
  6445. * @dev: device
  6446. * @new_carrier: new value
  6447. *
  6448. * Change device carrier
  6449. */
  6450. int dev_change_carrier(struct net_device *dev, bool new_carrier)
  6451. {
  6452. const struct net_device_ops *ops = dev->netdev_ops;
  6453. if (!ops->ndo_change_carrier)
  6454. return -EOPNOTSUPP;
  6455. if (!netif_device_present(dev))
  6456. return -ENODEV;
  6457. return ops->ndo_change_carrier(dev, new_carrier);
  6458. }
  6459. EXPORT_SYMBOL(dev_change_carrier);
  6460. /**
  6461. * dev_get_phys_port_id - Get device physical port ID
  6462. * @dev: device
  6463. * @ppid: port ID
  6464. *
  6465. * Get device physical port ID
  6466. */
  6467. int dev_get_phys_port_id(struct net_device *dev,
  6468. struct netdev_phys_item_id *ppid)
  6469. {
  6470. const struct net_device_ops *ops = dev->netdev_ops;
  6471. if (!ops->ndo_get_phys_port_id)
  6472. return -EOPNOTSUPP;
  6473. return ops->ndo_get_phys_port_id(dev, ppid);
  6474. }
  6475. EXPORT_SYMBOL(dev_get_phys_port_id);
  6476. /**
  6477. * dev_get_phys_port_name - Get device physical port name
  6478. * @dev: device
  6479. * @name: port name
  6480. * @len: limit of bytes to copy to name
  6481. *
  6482. * Get device physical port name
  6483. */
  6484. int dev_get_phys_port_name(struct net_device *dev,
  6485. char *name, size_t len)
  6486. {
  6487. const struct net_device_ops *ops = dev->netdev_ops;
  6488. if (!ops->ndo_get_phys_port_name)
  6489. return -EOPNOTSUPP;
  6490. return ops->ndo_get_phys_port_name(dev, name, len);
  6491. }
  6492. EXPORT_SYMBOL(dev_get_phys_port_name);
  6493. /**
  6494. * dev_change_proto_down - update protocol port state information
  6495. * @dev: device
  6496. * @proto_down: new value
  6497. *
  6498. * This info can be used by switch drivers to set the phys state of the
  6499. * port.
  6500. */
  6501. int dev_change_proto_down(struct net_device *dev, bool proto_down)
  6502. {
  6503. const struct net_device_ops *ops = dev->netdev_ops;
  6504. if (!ops->ndo_change_proto_down)
  6505. return -EOPNOTSUPP;
  6506. if (!netif_device_present(dev))
  6507. return -ENODEV;
  6508. return ops->ndo_change_proto_down(dev, proto_down);
  6509. }
  6510. EXPORT_SYMBOL(dev_change_proto_down);
  6511. void __dev_xdp_query(struct net_device *dev, bpf_op_t bpf_op,
  6512. struct netdev_bpf *xdp)
  6513. {
  6514. memset(xdp, 0, sizeof(*xdp));
  6515. xdp->command = XDP_QUERY_PROG;
  6516. /* Query must always succeed. */
  6517. WARN_ON(bpf_op(dev, xdp) < 0);
  6518. }
  6519. static u8 __dev_xdp_attached(struct net_device *dev, bpf_op_t bpf_op)
  6520. {
  6521. struct netdev_bpf xdp;
  6522. __dev_xdp_query(dev, bpf_op, &xdp);
  6523. return xdp.prog_attached;
  6524. }
  6525. static int dev_xdp_install(struct net_device *dev, bpf_op_t bpf_op,
  6526. struct netlink_ext_ack *extack, u32 flags,
  6527. struct bpf_prog *prog)
  6528. {
  6529. struct netdev_bpf xdp;
  6530. memset(&xdp, 0, sizeof(xdp));
  6531. if (flags & XDP_FLAGS_HW_MODE)
  6532. xdp.command = XDP_SETUP_PROG_HW;
  6533. else
  6534. xdp.command = XDP_SETUP_PROG;
  6535. xdp.extack = extack;
  6536. xdp.flags = flags;
  6537. xdp.prog = prog;
  6538. return bpf_op(dev, &xdp);
  6539. }
  6540. static void dev_xdp_uninstall(struct net_device *dev)
  6541. {
  6542. struct netdev_bpf xdp;
  6543. bpf_op_t ndo_bpf;
  6544. /* Remove generic XDP */
  6545. WARN_ON(dev_xdp_install(dev, generic_xdp_install, NULL, 0, NULL));
  6546. /* Remove from the driver */
  6547. ndo_bpf = dev->netdev_ops->ndo_bpf;
  6548. if (!ndo_bpf)
  6549. return;
  6550. __dev_xdp_query(dev, ndo_bpf, &xdp);
  6551. if (xdp.prog_attached == XDP_ATTACHED_NONE)
  6552. return;
  6553. /* Program removal should always succeed */
  6554. WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags, NULL));
  6555. }
  6556. /**
  6557. * dev_change_xdp_fd - set or clear a bpf program for a device rx path
  6558. * @dev: device
  6559. * @extack: netlink extended ack
  6560. * @fd: new program fd or negative value to clear
  6561. * @flags: xdp-related flags
  6562. *
  6563. * Set or clear a bpf program for a device
  6564. */
  6565. int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
  6566. int fd, u32 flags)
  6567. {
  6568. const struct net_device_ops *ops = dev->netdev_ops;
  6569. struct bpf_prog *prog = NULL;
  6570. bpf_op_t bpf_op, bpf_chk;
  6571. int err;
  6572. ASSERT_RTNL();
  6573. bpf_op = bpf_chk = ops->ndo_bpf;
  6574. if (!bpf_op && (flags & (XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE)))
  6575. return -EOPNOTSUPP;
  6576. if (!bpf_op || (flags & XDP_FLAGS_SKB_MODE))
  6577. bpf_op = generic_xdp_install;
  6578. if (bpf_op == bpf_chk)
  6579. bpf_chk = generic_xdp_install;
  6580. if (fd >= 0) {
  6581. if (bpf_chk && __dev_xdp_attached(dev, bpf_chk))
  6582. return -EEXIST;
  6583. if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) &&
  6584. __dev_xdp_attached(dev, bpf_op))
  6585. return -EBUSY;
  6586. prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
  6587. bpf_op == ops->ndo_bpf);
  6588. if (IS_ERR(prog))
  6589. return PTR_ERR(prog);
  6590. if (!(flags & XDP_FLAGS_HW_MODE) &&
  6591. bpf_prog_is_dev_bound(prog->aux)) {
  6592. NL_SET_ERR_MSG(extack, "using device-bound program without HW_MODE flag is not supported");
  6593. bpf_prog_put(prog);
  6594. return -EINVAL;
  6595. }
  6596. }
  6597. err = dev_xdp_install(dev, bpf_op, extack, flags, prog);
  6598. if (err < 0 && prog)
  6599. bpf_prog_put(prog);
  6600. return err;
  6601. }
  6602. /**
  6603. * dev_new_index - allocate an ifindex
  6604. * @net: the applicable net namespace
  6605. *
  6606. * Returns a suitable unique value for a new device interface
  6607. * number. The caller must hold the rtnl semaphore or the
  6608. * dev_base_lock to be sure it remains unique.
  6609. */
  6610. static int dev_new_index(struct net *net)
  6611. {
  6612. int ifindex = net->ifindex;
  6613. for (;;) {
  6614. if (++ifindex <= 0)
  6615. ifindex = 1;
  6616. if (!__dev_get_by_index(net, ifindex))
  6617. return net->ifindex = ifindex;
  6618. }
  6619. }
  6620. /* Delayed registration/unregisteration */
  6621. static LIST_HEAD(net_todo_list);
  6622. DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
  6623. static void net_set_todo(struct net_device *dev)
  6624. {
  6625. list_add_tail(&dev->todo_list, &net_todo_list);
  6626. dev_net(dev)->dev_unreg_count++;
  6627. }
  6628. static void rollback_registered_many(struct list_head *head)
  6629. {
  6630. struct net_device *dev, *tmp;
  6631. LIST_HEAD(close_head);
  6632. BUG_ON(dev_boot_phase);
  6633. ASSERT_RTNL();
  6634. list_for_each_entry_safe(dev, tmp, head, unreg_list) {
  6635. /* Some devices call without registering
  6636. * for initialization unwind. Remove those
  6637. * devices and proceed with the remaining.
  6638. */
  6639. if (dev->reg_state == NETREG_UNINITIALIZED) {
  6640. pr_debug("unregister_netdevice: device %s/%p never was registered\n",
  6641. dev->name, dev);
  6642. WARN_ON(1);
  6643. list_del(&dev->unreg_list);
  6644. continue;
  6645. }
  6646. dev->dismantle = true;
  6647. BUG_ON(dev->reg_state != NETREG_REGISTERED);
  6648. }
  6649. /* If device is running, close it first. */
  6650. list_for_each_entry(dev, head, unreg_list)
  6651. list_add_tail(&dev->close_list, &close_head);
  6652. dev_close_many(&close_head, true);
  6653. list_for_each_entry(dev, head, unreg_list) {
  6654. /* And unlink it from device chain. */
  6655. unlist_netdevice(dev);
  6656. dev->reg_state = NETREG_UNREGISTERING;
  6657. }
  6658. flush_all_backlogs();
  6659. synchronize_net();
  6660. list_for_each_entry(dev, head, unreg_list) {
  6661. struct sk_buff *skb = NULL;
  6662. /* Shutdown queueing discipline. */
  6663. dev_shutdown(dev);
  6664. dev_xdp_uninstall(dev);
  6665. /* Notify protocols, that we are about to destroy
  6666. * this device. They should clean all the things.
  6667. */
  6668. call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
  6669. if (!dev->rtnl_link_ops ||
  6670. dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
  6671. skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
  6672. GFP_KERNEL, NULL, 0);
  6673. /*
  6674. * Flush the unicast and multicast chains
  6675. */
  6676. dev_uc_flush(dev);
  6677. dev_mc_flush(dev);
  6678. if (dev->netdev_ops->ndo_uninit)
  6679. dev->netdev_ops->ndo_uninit(dev);
  6680. if (skb)
  6681. rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
  6682. /* Notifier chain MUST detach us all upper devices. */
  6683. WARN_ON(netdev_has_any_upper_dev(dev));
  6684. WARN_ON(netdev_has_any_lower_dev(dev));
  6685. /* Remove entries from kobject tree */
  6686. netdev_unregister_kobject(dev);
  6687. #ifdef CONFIG_XPS
  6688. /* Remove XPS queueing entries */
  6689. netif_reset_xps_queues_gt(dev, 0);
  6690. #endif
  6691. }
  6692. synchronize_net();
  6693. list_for_each_entry(dev, head, unreg_list)
  6694. dev_put(dev);
  6695. }
  6696. static void rollback_registered(struct net_device *dev)
  6697. {
  6698. LIST_HEAD(single);
  6699. list_add(&dev->unreg_list, &single);
  6700. rollback_registered_many(&single);
  6701. list_del(&single);
  6702. }
  6703. static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
  6704. struct net_device *upper, netdev_features_t features)
  6705. {
  6706. netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
  6707. netdev_features_t feature;
  6708. int feature_bit;
  6709. for_each_netdev_feature(&upper_disables, feature_bit) {
  6710. feature = __NETIF_F_BIT(feature_bit);
  6711. if (!(upper->wanted_features & feature)
  6712. && (features & feature)) {
  6713. netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
  6714. &feature, upper->name);
  6715. features &= ~feature;
  6716. }
  6717. }
  6718. return features;
  6719. }
  6720. static void netdev_sync_lower_features(struct net_device *upper,
  6721. struct net_device *lower, netdev_features_t features)
  6722. {
  6723. netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
  6724. netdev_features_t feature;
  6725. int feature_bit;
  6726. for_each_netdev_feature(&upper_disables, feature_bit) {
  6727. feature = __NETIF_F_BIT(feature_bit);
  6728. if (!(features & feature) && (lower->features & feature)) {
  6729. netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
  6730. &feature, lower->name);
  6731. lower->wanted_features &= ~feature;
  6732. netdev_update_features(lower);
  6733. if (unlikely(lower->features & feature))
  6734. netdev_WARN(upper, "failed to disable %pNF on %s!\n",
  6735. &feature, lower->name);
  6736. }
  6737. }
  6738. }
  6739. static netdev_features_t netdev_fix_features(struct net_device *dev,
  6740. netdev_features_t features)
  6741. {
  6742. /* Fix illegal checksum combinations */
  6743. if ((features & NETIF_F_HW_CSUM) &&
  6744. (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
  6745. netdev_warn(dev, "mixed HW and IP checksum settings.\n");
  6746. features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
  6747. }
  6748. /* TSO requires that SG is present as well. */
  6749. if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
  6750. netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
  6751. features &= ~NETIF_F_ALL_TSO;
  6752. }
  6753. if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
  6754. !(features & NETIF_F_IP_CSUM)) {
  6755. netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
  6756. features &= ~NETIF_F_TSO;
  6757. features &= ~NETIF_F_TSO_ECN;
  6758. }
  6759. if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
  6760. !(features & NETIF_F_IPV6_CSUM)) {
  6761. netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
  6762. features &= ~NETIF_F_TSO6;
  6763. }
  6764. /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
  6765. if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO))
  6766. features &= ~NETIF_F_TSO_MANGLEID;
  6767. /* TSO ECN requires that TSO is present as well. */
  6768. if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
  6769. features &= ~NETIF_F_TSO_ECN;
  6770. /* Software GSO depends on SG. */
  6771. if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
  6772. netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
  6773. features &= ~NETIF_F_GSO;
  6774. }
  6775. /* GSO partial features require GSO partial be set */
  6776. if ((features & dev->gso_partial_features) &&
  6777. !(features & NETIF_F_GSO_PARTIAL)) {
  6778. netdev_dbg(dev,
  6779. "Dropping partially supported GSO features since no GSO partial.\n");
  6780. features &= ~dev->gso_partial_features;
  6781. }
  6782. if (!(features & NETIF_F_RXCSUM)) {
  6783. /* NETIF_F_GRO_HW implies doing RXCSUM since every packet
  6784. * successfully merged by hardware must also have the
  6785. * checksum verified by hardware. If the user does not
  6786. * want to enable RXCSUM, logically, we should disable GRO_HW.
  6787. */
  6788. if (features & NETIF_F_GRO_HW) {
  6789. netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n");
  6790. features &= ~NETIF_F_GRO_HW;
  6791. }
  6792. }
  6793. /* LRO/HW-GRO features cannot be combined with RX-FCS */
  6794. if (features & NETIF_F_RXFCS) {
  6795. if (features & NETIF_F_LRO) {
  6796. netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n");
  6797. features &= ~NETIF_F_LRO;
  6798. }
  6799. if (features & NETIF_F_GRO_HW) {
  6800. netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n");
  6801. features &= ~NETIF_F_GRO_HW;
  6802. }
  6803. }
  6804. return features;
  6805. }
  6806. int __netdev_update_features(struct net_device *dev)
  6807. {
  6808. struct net_device *upper, *lower;
  6809. netdev_features_t features;
  6810. struct list_head *iter;
  6811. int err = -1;
  6812. ASSERT_RTNL();
  6813. features = netdev_get_wanted_features(dev);
  6814. if (dev->netdev_ops->ndo_fix_features)
  6815. features = dev->netdev_ops->ndo_fix_features(dev, features);
  6816. /* driver might be less strict about feature dependencies */
  6817. features = netdev_fix_features(dev, features);
  6818. /* some features can't be enabled if they're off an an upper device */
  6819. netdev_for_each_upper_dev_rcu(dev, upper, iter)
  6820. features = netdev_sync_upper_features(dev, upper, features);
  6821. if (dev->features == features)
  6822. goto sync_lower;
  6823. netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
  6824. &dev->features, &features);
  6825. if (dev->netdev_ops->ndo_set_features)
  6826. err = dev->netdev_ops->ndo_set_features(dev, features);
  6827. else
  6828. err = 0;
  6829. if (unlikely(err < 0)) {
  6830. netdev_err(dev,
  6831. "set_features() failed (%d); wanted %pNF, left %pNF\n",
  6832. err, &features, &dev->features);
  6833. /* return non-0 since some features might have changed and
  6834. * it's better to fire a spurious notification than miss it
  6835. */
  6836. return -1;
  6837. }
  6838. sync_lower:
  6839. /* some features must be disabled on lower devices when disabled
  6840. * on an upper device (think: bonding master or bridge)
  6841. */
  6842. netdev_for_each_lower_dev(dev, lower, iter)
  6843. netdev_sync_lower_features(dev, lower, features);
  6844. if (!err) {
  6845. netdev_features_t diff = features ^ dev->features;
  6846. if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) {
  6847. /* udp_tunnel_{get,drop}_rx_info both need
  6848. * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the
  6849. * device, or they won't do anything.
  6850. * Thus we need to update dev->features
  6851. * *before* calling udp_tunnel_get_rx_info,
  6852. * but *after* calling udp_tunnel_drop_rx_info.
  6853. */
  6854. if (features & NETIF_F_RX_UDP_TUNNEL_PORT) {
  6855. dev->features = features;
  6856. udp_tunnel_get_rx_info(dev);
  6857. } else {
  6858. udp_tunnel_drop_rx_info(dev);
  6859. }
  6860. }
  6861. if (diff & NETIF_F_HW_VLAN_CTAG_FILTER) {
  6862. if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
  6863. dev->features = features;
  6864. err |= vlan_get_rx_ctag_filter_info(dev);
  6865. } else {
  6866. vlan_drop_rx_ctag_filter_info(dev);
  6867. }
  6868. }
  6869. if (diff & NETIF_F_HW_VLAN_STAG_FILTER) {
  6870. if (features & NETIF_F_HW_VLAN_STAG_FILTER) {
  6871. dev->features = features;
  6872. err |= vlan_get_rx_stag_filter_info(dev);
  6873. } else {
  6874. vlan_drop_rx_stag_filter_info(dev);
  6875. }
  6876. }
  6877. dev->features = features;
  6878. }
  6879. return err < 0 ? 0 : 1;
  6880. }
  6881. /**
  6882. * netdev_update_features - recalculate device features
  6883. * @dev: the device to check
  6884. *
  6885. * Recalculate dev->features set and send notifications if it
  6886. * has changed. Should be called after driver or hardware dependent
  6887. * conditions might have changed that influence the features.
  6888. */
  6889. void netdev_update_features(struct net_device *dev)
  6890. {
  6891. if (__netdev_update_features(dev))
  6892. netdev_features_change(dev);
  6893. }
  6894. EXPORT_SYMBOL(netdev_update_features);
  6895. /**
  6896. * netdev_change_features - recalculate device features
  6897. * @dev: the device to check
  6898. *
  6899. * Recalculate dev->features set and send notifications even
  6900. * if they have not changed. Should be called instead of
  6901. * netdev_update_features() if also dev->vlan_features might
  6902. * have changed to allow the changes to be propagated to stacked
  6903. * VLAN devices.
  6904. */
  6905. void netdev_change_features(struct net_device *dev)
  6906. {
  6907. __netdev_update_features(dev);
  6908. netdev_features_change(dev);
  6909. }
  6910. EXPORT_SYMBOL(netdev_change_features);
  6911. /**
  6912. * netif_stacked_transfer_operstate - transfer operstate
  6913. * @rootdev: the root or lower level device to transfer state from
  6914. * @dev: the device to transfer operstate to
  6915. *
  6916. * Transfer operational state from root to device. This is normally
  6917. * called when a stacking relationship exists between the root
  6918. * device and the device(a leaf device).
  6919. */
  6920. void netif_stacked_transfer_operstate(const struct net_device *rootdev,
  6921. struct net_device *dev)
  6922. {
  6923. if (rootdev->operstate == IF_OPER_DORMANT)
  6924. netif_dormant_on(dev);
  6925. else
  6926. netif_dormant_off(dev);
  6927. if (netif_carrier_ok(rootdev))
  6928. netif_carrier_on(dev);
  6929. else
  6930. netif_carrier_off(dev);
  6931. }
  6932. EXPORT_SYMBOL(netif_stacked_transfer_operstate);
  6933. static int netif_alloc_rx_queues(struct net_device *dev)
  6934. {
  6935. unsigned int i, count = dev->num_rx_queues;
  6936. struct netdev_rx_queue *rx;
  6937. size_t sz = count * sizeof(*rx);
  6938. int err = 0;
  6939. BUG_ON(count < 1);
  6940. rx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
  6941. if (!rx)
  6942. return -ENOMEM;
  6943. dev->_rx = rx;
  6944. for (i = 0; i < count; i++) {
  6945. rx[i].dev = dev;
  6946. /* XDP RX-queue setup */
  6947. err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i);
  6948. if (err < 0)
  6949. goto err_rxq_info;
  6950. }
  6951. return 0;
  6952. err_rxq_info:
  6953. /* Rollback successful reg's and free other resources */
  6954. while (i--)
  6955. xdp_rxq_info_unreg(&rx[i].xdp_rxq);
  6956. kvfree(dev->_rx);
  6957. dev->_rx = NULL;
  6958. return err;
  6959. }
  6960. static void netif_free_rx_queues(struct net_device *dev)
  6961. {
  6962. unsigned int i, count = dev->num_rx_queues;
  6963. /* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */
  6964. if (!dev->_rx)
  6965. return;
  6966. for (i = 0; i < count; i++)
  6967. xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq);
  6968. kvfree(dev->_rx);
  6969. }
  6970. static void netdev_init_one_queue(struct net_device *dev,
  6971. struct netdev_queue *queue, void *_unused)
  6972. {
  6973. /* Initialize queue lock */
  6974. spin_lock_init(&queue->_xmit_lock);
  6975. netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
  6976. queue->xmit_lock_owner = -1;
  6977. netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
  6978. queue->dev = dev;
  6979. #ifdef CONFIG_BQL
  6980. dql_init(&queue->dql, HZ);
  6981. #endif
  6982. }
  6983. static void netif_free_tx_queues(struct net_device *dev)
  6984. {
  6985. kvfree(dev->_tx);
  6986. }
  6987. static int netif_alloc_netdev_queues(struct net_device *dev)
  6988. {
  6989. unsigned int count = dev->num_tx_queues;
  6990. struct netdev_queue *tx;
  6991. size_t sz = count * sizeof(*tx);
  6992. if (count < 1 || count > 0xffff)
  6993. return -EINVAL;
  6994. tx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
  6995. if (!tx)
  6996. return -ENOMEM;
  6997. dev->_tx = tx;
  6998. netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
  6999. spin_lock_init(&dev->tx_global_lock);
  7000. return 0;
  7001. }
  7002. void netif_tx_stop_all_queues(struct net_device *dev)
  7003. {
  7004. unsigned int i;
  7005. for (i = 0; i < dev->num_tx_queues; i++) {
  7006. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  7007. netif_tx_stop_queue(txq);
  7008. }
  7009. }
  7010. EXPORT_SYMBOL(netif_tx_stop_all_queues);
  7011. /**
  7012. * register_netdevice - register a network device
  7013. * @dev: device to register
  7014. *
  7015. * Take a completed network device structure and add it to the kernel
  7016. * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
  7017. * chain. 0 is returned on success. A negative errno code is returned
  7018. * on a failure to set up the device, or if the name is a duplicate.
  7019. *
  7020. * Callers must hold the rtnl semaphore. You may want
  7021. * register_netdev() instead of this.
  7022. *
  7023. * BUGS:
  7024. * The locking appears insufficient to guarantee two parallel registers
  7025. * will not get the same name.
  7026. */
  7027. int register_netdevice(struct net_device *dev)
  7028. {
  7029. int ret;
  7030. struct net *net = dev_net(dev);
  7031. BUILD_BUG_ON(sizeof(netdev_features_t) * BITS_PER_BYTE <
  7032. NETDEV_FEATURE_COUNT);
  7033. BUG_ON(dev_boot_phase);
  7034. ASSERT_RTNL();
  7035. might_sleep();
  7036. /* When net_device's are persistent, this will be fatal. */
  7037. BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
  7038. BUG_ON(!net);
  7039. spin_lock_init(&dev->addr_list_lock);
  7040. netdev_set_addr_lockdep_class(dev);
  7041. ret = dev_get_valid_name(net, dev, dev->name);
  7042. if (ret < 0)
  7043. goto out;
  7044. /* Init, if this function is available */
  7045. if (dev->netdev_ops->ndo_init) {
  7046. ret = dev->netdev_ops->ndo_init(dev);
  7047. if (ret) {
  7048. if (ret > 0)
  7049. ret = -EIO;
  7050. goto out;
  7051. }
  7052. }
  7053. if (((dev->hw_features | dev->features) &
  7054. NETIF_F_HW_VLAN_CTAG_FILTER) &&
  7055. (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
  7056. !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
  7057. netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
  7058. ret = -EINVAL;
  7059. goto err_uninit;
  7060. }
  7061. ret = -EBUSY;
  7062. if (!dev->ifindex)
  7063. dev->ifindex = dev_new_index(net);
  7064. else if (__dev_get_by_index(net, dev->ifindex))
  7065. goto err_uninit;
  7066. /* Transfer changeable features to wanted_features and enable
  7067. * software offloads (GSO and GRO).
  7068. */
  7069. dev->hw_features |= NETIF_F_SOFT_FEATURES;
  7070. dev->features |= NETIF_F_SOFT_FEATURES;
  7071. if (dev->netdev_ops->ndo_udp_tunnel_add) {
  7072. dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT;
  7073. dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT;
  7074. }
  7075. dev->wanted_features = dev->features & dev->hw_features;
  7076. if (!(dev->flags & IFF_LOOPBACK))
  7077. dev->hw_features |= NETIF_F_NOCACHE_COPY;
  7078. /* If IPv4 TCP segmentation offload is supported we should also
  7079. * allow the device to enable segmenting the frame with the option
  7080. * of ignoring a static IP ID value. This doesn't enable the
  7081. * feature itself but allows the user to enable it later.
  7082. */
  7083. if (dev->hw_features & NETIF_F_TSO)
  7084. dev->hw_features |= NETIF_F_TSO_MANGLEID;
  7085. if (dev->vlan_features & NETIF_F_TSO)
  7086. dev->vlan_features |= NETIF_F_TSO_MANGLEID;
  7087. if (dev->mpls_features & NETIF_F_TSO)
  7088. dev->mpls_features |= NETIF_F_TSO_MANGLEID;
  7089. if (dev->hw_enc_features & NETIF_F_TSO)
  7090. dev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
  7091. /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
  7092. */
  7093. dev->vlan_features |= NETIF_F_HIGHDMA;
  7094. /* Make NETIF_F_SG inheritable to tunnel devices.
  7095. */
  7096. dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL;
  7097. /* Make NETIF_F_SG inheritable to MPLS.
  7098. */
  7099. dev->mpls_features |= NETIF_F_SG;
  7100. ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
  7101. ret = notifier_to_errno(ret);
  7102. if (ret)
  7103. goto err_uninit;
  7104. ret = netdev_register_kobject(dev);
  7105. if (ret)
  7106. goto err_uninit;
  7107. dev->reg_state = NETREG_REGISTERED;
  7108. __netdev_update_features(dev);
  7109. /*
  7110. * Default initial state at registry is that the
  7111. * device is present.
  7112. */
  7113. set_bit(__LINK_STATE_PRESENT, &dev->state);
  7114. linkwatch_init_dev(dev);
  7115. dev_init_scheduler(dev);
  7116. dev_hold(dev);
  7117. list_netdevice(dev);
  7118. add_device_randomness(dev->dev_addr, dev->addr_len);
  7119. /* If the device has permanent device address, driver should
  7120. * set dev_addr and also addr_assign_type should be set to
  7121. * NET_ADDR_PERM (default value).
  7122. */
  7123. if (dev->addr_assign_type == NET_ADDR_PERM)
  7124. memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
  7125. /* Notify protocols, that a new device appeared. */
  7126. ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
  7127. ret = notifier_to_errno(ret);
  7128. if (ret) {
  7129. rollback_registered(dev);
  7130. dev->reg_state = NETREG_UNREGISTERED;
  7131. }
  7132. /*
  7133. * Prevent userspace races by waiting until the network
  7134. * device is fully setup before sending notifications.
  7135. */
  7136. if (!dev->rtnl_link_ops ||
  7137. dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
  7138. rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
  7139. out:
  7140. return ret;
  7141. err_uninit:
  7142. if (dev->netdev_ops->ndo_uninit)
  7143. dev->netdev_ops->ndo_uninit(dev);
  7144. if (dev->priv_destructor)
  7145. dev->priv_destructor(dev);
  7146. goto out;
  7147. }
  7148. EXPORT_SYMBOL(register_netdevice);
  7149. /**
  7150. * init_dummy_netdev - init a dummy network device for NAPI
  7151. * @dev: device to init
  7152. *
  7153. * This takes a network device structure and initialize the minimum
  7154. * amount of fields so it can be used to schedule NAPI polls without
  7155. * registering a full blown interface. This is to be used by drivers
  7156. * that need to tie several hardware interfaces to a single NAPI
  7157. * poll scheduler due to HW limitations.
  7158. */
  7159. int init_dummy_netdev(struct net_device *dev)
  7160. {
  7161. /* Clear everything. Note we don't initialize spinlocks
  7162. * are they aren't supposed to be taken by any of the
  7163. * NAPI code and this dummy netdev is supposed to be
  7164. * only ever used for NAPI polls
  7165. */
  7166. memset(dev, 0, sizeof(struct net_device));
  7167. /* make sure we BUG if trying to hit standard
  7168. * register/unregister code path
  7169. */
  7170. dev->reg_state = NETREG_DUMMY;
  7171. /* NAPI wants this */
  7172. INIT_LIST_HEAD(&dev->napi_list);
  7173. /* a dummy interface is started by default */
  7174. set_bit(__LINK_STATE_PRESENT, &dev->state);
  7175. set_bit(__LINK_STATE_START, &dev->state);
  7176. /* Note : We dont allocate pcpu_refcnt for dummy devices,
  7177. * because users of this 'device' dont need to change
  7178. * its refcount.
  7179. */
  7180. return 0;
  7181. }
  7182. EXPORT_SYMBOL_GPL(init_dummy_netdev);
  7183. /**
  7184. * register_netdev - register a network device
  7185. * @dev: device to register
  7186. *
  7187. * Take a completed network device structure and add it to the kernel
  7188. * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
  7189. * chain. 0 is returned on success. A negative errno code is returned
  7190. * on a failure to set up the device, or if the name is a duplicate.
  7191. *
  7192. * This is a wrapper around register_netdevice that takes the rtnl semaphore
  7193. * and expands the device name if you passed a format string to
  7194. * alloc_netdev.
  7195. */
  7196. int register_netdev(struct net_device *dev)
  7197. {
  7198. int err;
  7199. if (rtnl_lock_killable())
  7200. return -EINTR;
  7201. err = register_netdevice(dev);
  7202. rtnl_unlock();
  7203. return err;
  7204. }
  7205. EXPORT_SYMBOL(register_netdev);
  7206. int netdev_refcnt_read(const struct net_device *dev)
  7207. {
  7208. int i, refcnt = 0;
  7209. for_each_possible_cpu(i)
  7210. refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
  7211. return refcnt;
  7212. }
  7213. EXPORT_SYMBOL(netdev_refcnt_read);
  7214. /**
  7215. * netdev_wait_allrefs - wait until all references are gone.
  7216. * @dev: target net_device
  7217. *
  7218. * This is called when unregistering network devices.
  7219. *
  7220. * Any protocol or device that holds a reference should register
  7221. * for netdevice notification, and cleanup and put back the
  7222. * reference if they receive an UNREGISTER event.
  7223. * We can get stuck here if buggy protocols don't correctly
  7224. * call dev_put.
  7225. */
  7226. static void netdev_wait_allrefs(struct net_device *dev)
  7227. {
  7228. unsigned long rebroadcast_time, warning_time;
  7229. int refcnt;
  7230. linkwatch_forget_dev(dev);
  7231. rebroadcast_time = warning_time = jiffies;
  7232. refcnt = netdev_refcnt_read(dev);
  7233. while (refcnt != 0) {
  7234. if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
  7235. rtnl_lock();
  7236. /* Rebroadcast unregister notification */
  7237. call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
  7238. __rtnl_unlock();
  7239. rcu_barrier();
  7240. rtnl_lock();
  7241. if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
  7242. &dev->state)) {
  7243. /* We must not have linkwatch events
  7244. * pending on unregister. If this
  7245. * happens, we simply run the queue
  7246. * unscheduled, resulting in a noop
  7247. * for this device.
  7248. */
  7249. linkwatch_run_queue();
  7250. }
  7251. __rtnl_unlock();
  7252. rebroadcast_time = jiffies;
  7253. }
  7254. msleep(250);
  7255. refcnt = netdev_refcnt_read(dev);
  7256. if (time_after(jiffies, warning_time + 10 * HZ)) {
  7257. pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
  7258. dev->name, refcnt);
  7259. warning_time = jiffies;
  7260. }
  7261. }
  7262. }
  7263. /* The sequence is:
  7264. *
  7265. * rtnl_lock();
  7266. * ...
  7267. * register_netdevice(x1);
  7268. * register_netdevice(x2);
  7269. * ...
  7270. * unregister_netdevice(y1);
  7271. * unregister_netdevice(y2);
  7272. * ...
  7273. * rtnl_unlock();
  7274. * free_netdev(y1);
  7275. * free_netdev(y2);
  7276. *
  7277. * We are invoked by rtnl_unlock().
  7278. * This allows us to deal with problems:
  7279. * 1) We can delete sysfs objects which invoke hotplug
  7280. * without deadlocking with linkwatch via keventd.
  7281. * 2) Since we run with the RTNL semaphore not held, we can sleep
  7282. * safely in order to wait for the netdev refcnt to drop to zero.
  7283. *
  7284. * We must not return until all unregister events added during
  7285. * the interval the lock was held have been completed.
  7286. */
  7287. void netdev_run_todo(void)
  7288. {
  7289. struct list_head list;
  7290. /* Snapshot list, allow later requests */
  7291. list_replace_init(&net_todo_list, &list);
  7292. __rtnl_unlock();
  7293. /* Wait for rcu callbacks to finish before next phase */
  7294. if (!list_empty(&list))
  7295. rcu_barrier();
  7296. while (!list_empty(&list)) {
  7297. struct net_device *dev
  7298. = list_first_entry(&list, struct net_device, todo_list);
  7299. list_del(&dev->todo_list);
  7300. if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
  7301. pr_err("network todo '%s' but state %d\n",
  7302. dev->name, dev->reg_state);
  7303. dump_stack();
  7304. continue;
  7305. }
  7306. dev->reg_state = NETREG_UNREGISTERED;
  7307. netdev_wait_allrefs(dev);
  7308. /* paranoia */
  7309. BUG_ON(netdev_refcnt_read(dev));
  7310. BUG_ON(!list_empty(&dev->ptype_all));
  7311. BUG_ON(!list_empty(&dev->ptype_specific));
  7312. WARN_ON(rcu_access_pointer(dev->ip_ptr));
  7313. WARN_ON(rcu_access_pointer(dev->ip6_ptr));
  7314. #if IS_ENABLED(CONFIG_DECNET)
  7315. WARN_ON(dev->dn_ptr);
  7316. #endif
  7317. if (dev->priv_destructor)
  7318. dev->priv_destructor(dev);
  7319. if (dev->needs_free_netdev)
  7320. free_netdev(dev);
  7321. /* Report a network device has been unregistered */
  7322. rtnl_lock();
  7323. dev_net(dev)->dev_unreg_count--;
  7324. __rtnl_unlock();
  7325. wake_up(&netdev_unregistering_wq);
  7326. /* Free network device */
  7327. kobject_put(&dev->dev.kobj);
  7328. }
  7329. }
  7330. /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
  7331. * all the same fields in the same order as net_device_stats, with only
  7332. * the type differing, but rtnl_link_stats64 may have additional fields
  7333. * at the end for newer counters.
  7334. */
  7335. void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
  7336. const struct net_device_stats *netdev_stats)
  7337. {
  7338. #if BITS_PER_LONG == 64
  7339. BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
  7340. memcpy(stats64, netdev_stats, sizeof(*netdev_stats));
  7341. /* zero out counters that only exist in rtnl_link_stats64 */
  7342. memset((char *)stats64 + sizeof(*netdev_stats), 0,
  7343. sizeof(*stats64) - sizeof(*netdev_stats));
  7344. #else
  7345. size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long);
  7346. const unsigned long *src = (const unsigned long *)netdev_stats;
  7347. u64 *dst = (u64 *)stats64;
  7348. BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
  7349. for (i = 0; i < n; i++)
  7350. dst[i] = src[i];
  7351. /* zero out counters that only exist in rtnl_link_stats64 */
  7352. memset((char *)stats64 + n * sizeof(u64), 0,
  7353. sizeof(*stats64) - n * sizeof(u64));
  7354. #endif
  7355. }
  7356. EXPORT_SYMBOL(netdev_stats_to_stats64);
  7357. /**
  7358. * dev_get_stats - get network device statistics
  7359. * @dev: device to get statistics from
  7360. * @storage: place to store stats
  7361. *
  7362. * Get network statistics from device. Return @storage.
  7363. * The device driver may provide its own method by setting
  7364. * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
  7365. * otherwise the internal statistics structure is used.
  7366. */
  7367. struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
  7368. struct rtnl_link_stats64 *storage)
  7369. {
  7370. const struct net_device_ops *ops = dev->netdev_ops;
  7371. if (ops->ndo_get_stats64) {
  7372. memset(storage, 0, sizeof(*storage));
  7373. ops->ndo_get_stats64(dev, storage);
  7374. } else if (ops->ndo_get_stats) {
  7375. netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
  7376. } else {
  7377. netdev_stats_to_stats64(storage, &dev->stats);
  7378. }
  7379. storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped);
  7380. storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped);
  7381. storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler);
  7382. return storage;
  7383. }
  7384. EXPORT_SYMBOL(dev_get_stats);
  7385. struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
  7386. {
  7387. struct netdev_queue *queue = dev_ingress_queue(dev);
  7388. #ifdef CONFIG_NET_CLS_ACT
  7389. if (queue)
  7390. return queue;
  7391. queue = kzalloc(sizeof(*queue), GFP_KERNEL);
  7392. if (!queue)
  7393. return NULL;
  7394. netdev_init_one_queue(dev, queue, NULL);
  7395. RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
  7396. queue->qdisc_sleeping = &noop_qdisc;
  7397. rcu_assign_pointer(dev->ingress_queue, queue);
  7398. #endif
  7399. return queue;
  7400. }
  7401. static const struct ethtool_ops default_ethtool_ops;
  7402. void netdev_set_default_ethtool_ops(struct net_device *dev,
  7403. const struct ethtool_ops *ops)
  7404. {
  7405. if (dev->ethtool_ops == &default_ethtool_ops)
  7406. dev->ethtool_ops = ops;
  7407. }
  7408. EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
  7409. void netdev_freemem(struct net_device *dev)
  7410. {
  7411. char *addr = (char *)dev - dev->padded;
  7412. kvfree(addr);
  7413. }
  7414. /**
  7415. * alloc_netdev_mqs - allocate network device
  7416. * @sizeof_priv: size of private data to allocate space for
  7417. * @name: device name format string
  7418. * @name_assign_type: origin of device name
  7419. * @setup: callback to initialize device
  7420. * @txqs: the number of TX subqueues to allocate
  7421. * @rxqs: the number of RX subqueues to allocate
  7422. *
  7423. * Allocates a struct net_device with private data area for driver use
  7424. * and performs basic initialization. Also allocates subqueue structs
  7425. * for each queue on the device.
  7426. */
  7427. struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
  7428. unsigned char name_assign_type,
  7429. void (*setup)(struct net_device *),
  7430. unsigned int txqs, unsigned int rxqs)
  7431. {
  7432. struct net_device *dev;
  7433. unsigned int alloc_size;
  7434. struct net_device *p;
  7435. BUG_ON(strlen(name) >= sizeof(dev->name));
  7436. if (txqs < 1) {
  7437. pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
  7438. return NULL;
  7439. }
  7440. if (rxqs < 1) {
  7441. pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
  7442. return NULL;
  7443. }
  7444. alloc_size = sizeof(struct net_device);
  7445. if (sizeof_priv) {
  7446. /* ensure 32-byte alignment of private area */
  7447. alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
  7448. alloc_size += sizeof_priv;
  7449. }
  7450. /* ensure 32-byte alignment of whole construct */
  7451. alloc_size += NETDEV_ALIGN - 1;
  7452. p = kvzalloc(alloc_size, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
  7453. if (!p)
  7454. return NULL;
  7455. dev = PTR_ALIGN(p, NETDEV_ALIGN);
  7456. dev->padded = (char *)dev - (char *)p;
  7457. dev->pcpu_refcnt = alloc_percpu(int);
  7458. if (!dev->pcpu_refcnt)
  7459. goto free_dev;
  7460. if (dev_addr_init(dev))
  7461. goto free_pcpu;
  7462. dev_mc_init(dev);
  7463. dev_uc_init(dev);
  7464. dev_net_set(dev, &init_net);
  7465. dev->gso_max_size = GSO_MAX_SIZE;
  7466. dev->gso_max_segs = GSO_MAX_SEGS;
  7467. INIT_LIST_HEAD(&dev->napi_list);
  7468. INIT_LIST_HEAD(&dev->unreg_list);
  7469. INIT_LIST_HEAD(&dev->close_list);
  7470. INIT_LIST_HEAD(&dev->link_watch_list);
  7471. INIT_LIST_HEAD(&dev->adj_list.upper);
  7472. INIT_LIST_HEAD(&dev->adj_list.lower);
  7473. INIT_LIST_HEAD(&dev->ptype_all);
  7474. INIT_LIST_HEAD(&dev->ptype_specific);
  7475. #ifdef CONFIG_NET_SCHED
  7476. hash_init(dev->qdisc_hash);
  7477. #endif
  7478. dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
  7479. setup(dev);
  7480. if (!dev->tx_queue_len) {
  7481. dev->priv_flags |= IFF_NO_QUEUE;
  7482. dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
  7483. }
  7484. dev->num_tx_queues = txqs;
  7485. dev->real_num_tx_queues = txqs;
  7486. if (netif_alloc_netdev_queues(dev))
  7487. goto free_all;
  7488. dev->num_rx_queues = rxqs;
  7489. dev->real_num_rx_queues = rxqs;
  7490. if (netif_alloc_rx_queues(dev))
  7491. goto free_all;
  7492. strcpy(dev->name, name);
  7493. dev->name_assign_type = name_assign_type;
  7494. dev->group = INIT_NETDEV_GROUP;
  7495. if (!dev->ethtool_ops)
  7496. dev->ethtool_ops = &default_ethtool_ops;
  7497. nf_hook_ingress_init(dev);
  7498. return dev;
  7499. free_all:
  7500. free_netdev(dev);
  7501. return NULL;
  7502. free_pcpu:
  7503. free_percpu(dev->pcpu_refcnt);
  7504. free_dev:
  7505. netdev_freemem(dev);
  7506. return NULL;
  7507. }
  7508. EXPORT_SYMBOL(alloc_netdev_mqs);
  7509. /**
  7510. * free_netdev - free network device
  7511. * @dev: device
  7512. *
  7513. * This function does the last stage of destroying an allocated device
  7514. * interface. The reference to the device object is released. If this
  7515. * is the last reference then it will be freed.Must be called in process
  7516. * context.
  7517. */
  7518. void free_netdev(struct net_device *dev)
  7519. {
  7520. struct napi_struct *p, *n;
  7521. might_sleep();
  7522. netif_free_tx_queues(dev);
  7523. netif_free_rx_queues(dev);
  7524. kfree(rcu_dereference_protected(dev->ingress_queue, 1));
  7525. /* Flush device addresses */
  7526. dev_addr_flush(dev);
  7527. list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
  7528. netif_napi_del(p);
  7529. free_percpu(dev->pcpu_refcnt);
  7530. dev->pcpu_refcnt = NULL;
  7531. /* Compatibility with error handling in drivers */
  7532. if (dev->reg_state == NETREG_UNINITIALIZED) {
  7533. netdev_freemem(dev);
  7534. return;
  7535. }
  7536. BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
  7537. dev->reg_state = NETREG_RELEASED;
  7538. /* will free via device release */
  7539. put_device(&dev->dev);
  7540. }
  7541. EXPORT_SYMBOL(free_netdev);
  7542. /**
  7543. * synchronize_net - Synchronize with packet receive processing
  7544. *
  7545. * Wait for packets currently being received to be done.
  7546. * Does not block later packets from starting.
  7547. */
  7548. void synchronize_net(void)
  7549. {
  7550. might_sleep();
  7551. if (rtnl_is_locked())
  7552. synchronize_rcu_expedited();
  7553. else
  7554. synchronize_rcu();
  7555. }
  7556. EXPORT_SYMBOL(synchronize_net);
  7557. /**
  7558. * unregister_netdevice_queue - remove device from the kernel
  7559. * @dev: device
  7560. * @head: list
  7561. *
  7562. * This function shuts down a device interface and removes it
  7563. * from the kernel tables.
  7564. * If head not NULL, device is queued to be unregistered later.
  7565. *
  7566. * Callers must hold the rtnl semaphore. You may want
  7567. * unregister_netdev() instead of this.
  7568. */
  7569. void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
  7570. {
  7571. ASSERT_RTNL();
  7572. if (head) {
  7573. list_move_tail(&dev->unreg_list, head);
  7574. } else {
  7575. rollback_registered(dev);
  7576. /* Finish processing unregister after unlock */
  7577. net_set_todo(dev);
  7578. }
  7579. }
  7580. EXPORT_SYMBOL(unregister_netdevice_queue);
  7581. /**
  7582. * unregister_netdevice_many - unregister many devices
  7583. * @head: list of devices
  7584. *
  7585. * Note: As most callers use a stack allocated list_head,
  7586. * we force a list_del() to make sure stack wont be corrupted later.
  7587. */
  7588. void unregister_netdevice_many(struct list_head *head)
  7589. {
  7590. struct net_device *dev;
  7591. if (!list_empty(head)) {
  7592. rollback_registered_many(head);
  7593. list_for_each_entry(dev, head, unreg_list)
  7594. net_set_todo(dev);
  7595. list_del(head);
  7596. }
  7597. }
  7598. EXPORT_SYMBOL(unregister_netdevice_many);
  7599. /**
  7600. * unregister_netdev - remove device from the kernel
  7601. * @dev: device
  7602. *
  7603. * This function shuts down a device interface and removes it
  7604. * from the kernel tables.
  7605. *
  7606. * This is just a wrapper for unregister_netdevice that takes
  7607. * the rtnl semaphore. In general you want to use this and not
  7608. * unregister_netdevice.
  7609. */
  7610. void unregister_netdev(struct net_device *dev)
  7611. {
  7612. rtnl_lock();
  7613. unregister_netdevice(dev);
  7614. rtnl_unlock();
  7615. }
  7616. EXPORT_SYMBOL(unregister_netdev);
  7617. /**
  7618. * dev_change_net_namespace - move device to different nethost namespace
  7619. * @dev: device
  7620. * @net: network namespace
  7621. * @pat: If not NULL name pattern to try if the current device name
  7622. * is already taken in the destination network namespace.
  7623. *
  7624. * This function shuts down a device interface and moves it
  7625. * to a new network namespace. On success 0 is returned, on
  7626. * a failure a netagive errno code is returned.
  7627. *
  7628. * Callers must hold the rtnl semaphore.
  7629. */
  7630. int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
  7631. {
  7632. int err, new_nsid, new_ifindex;
  7633. ASSERT_RTNL();
  7634. /* Don't allow namespace local devices to be moved. */
  7635. err = -EINVAL;
  7636. if (dev->features & NETIF_F_NETNS_LOCAL)
  7637. goto out;
  7638. /* Ensure the device has been registrered */
  7639. if (dev->reg_state != NETREG_REGISTERED)
  7640. goto out;
  7641. /* Get out if there is nothing todo */
  7642. err = 0;
  7643. if (net_eq(dev_net(dev), net))
  7644. goto out;
  7645. /* Pick the destination device name, and ensure
  7646. * we can use it in the destination network namespace.
  7647. */
  7648. err = -EEXIST;
  7649. if (__dev_get_by_name(net, dev->name)) {
  7650. /* We get here if we can't use the current device name */
  7651. if (!pat)
  7652. goto out;
  7653. err = dev_get_valid_name(net, dev, pat);
  7654. if (err < 0)
  7655. goto out;
  7656. }
  7657. /*
  7658. * And now a mini version of register_netdevice unregister_netdevice.
  7659. */
  7660. /* If device is running close it first. */
  7661. dev_close(dev);
  7662. /* And unlink it from device chain */
  7663. unlist_netdevice(dev);
  7664. synchronize_net();
  7665. /* Shutdown queueing discipline. */
  7666. dev_shutdown(dev);
  7667. /* Notify protocols, that we are about to destroy
  7668. * this device. They should clean all the things.
  7669. *
  7670. * Note that dev->reg_state stays at NETREG_REGISTERED.
  7671. * This is wanted because this way 8021q and macvlan know
  7672. * the device is just moving and can keep their slaves up.
  7673. */
  7674. call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
  7675. rcu_barrier();
  7676. new_nsid = peernet2id_alloc(dev_net(dev), net);
  7677. /* If there is an ifindex conflict assign a new one */
  7678. if (__dev_get_by_index(net, dev->ifindex))
  7679. new_ifindex = dev_new_index(net);
  7680. else
  7681. new_ifindex = dev->ifindex;
  7682. rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid,
  7683. new_ifindex);
  7684. /*
  7685. * Flush the unicast and multicast chains
  7686. */
  7687. dev_uc_flush(dev);
  7688. dev_mc_flush(dev);
  7689. /* Send a netdev-removed uevent to the old namespace */
  7690. kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
  7691. netdev_adjacent_del_links(dev);
  7692. /* Actually switch the network namespace */
  7693. dev_net_set(dev, net);
  7694. dev->ifindex = new_ifindex;
  7695. /* Send a netdev-add uevent to the new namespace */
  7696. kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
  7697. netdev_adjacent_add_links(dev);
  7698. /* Fixup kobjects */
  7699. err = device_rename(&dev->dev, dev->name);
  7700. WARN_ON(err);
  7701. /* Add the device back in the hashes */
  7702. list_netdevice(dev);
  7703. /* Notify protocols, that a new device appeared. */
  7704. call_netdevice_notifiers(NETDEV_REGISTER, dev);
  7705. /*
  7706. * Prevent userspace races by waiting until the network
  7707. * device is fully setup before sending notifications.
  7708. */
  7709. rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
  7710. synchronize_net();
  7711. err = 0;
  7712. out:
  7713. return err;
  7714. }
  7715. EXPORT_SYMBOL_GPL(dev_change_net_namespace);
  7716. static int dev_cpu_dead(unsigned int oldcpu)
  7717. {
  7718. struct sk_buff **list_skb;
  7719. struct sk_buff *skb;
  7720. unsigned int cpu;
  7721. struct softnet_data *sd, *oldsd, *remsd = NULL;
  7722. local_irq_disable();
  7723. cpu = smp_processor_id();
  7724. sd = &per_cpu(softnet_data, cpu);
  7725. oldsd = &per_cpu(softnet_data, oldcpu);
  7726. /* Find end of our completion_queue. */
  7727. list_skb = &sd->completion_queue;
  7728. while (*list_skb)
  7729. list_skb = &(*list_skb)->next;
  7730. /* Append completion queue from offline CPU. */
  7731. *list_skb = oldsd->completion_queue;
  7732. oldsd->completion_queue = NULL;
  7733. /* Append output queue from offline CPU. */
  7734. if (oldsd->output_queue) {
  7735. *sd->output_queue_tailp = oldsd->output_queue;
  7736. sd->output_queue_tailp = oldsd->output_queue_tailp;
  7737. oldsd->output_queue = NULL;
  7738. oldsd->output_queue_tailp = &oldsd->output_queue;
  7739. }
  7740. /* Append NAPI poll list from offline CPU, with one exception :
  7741. * process_backlog() must be called by cpu owning percpu backlog.
  7742. * We properly handle process_queue & input_pkt_queue later.
  7743. */
  7744. while (!list_empty(&oldsd->poll_list)) {
  7745. struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
  7746. struct napi_struct,
  7747. poll_list);
  7748. list_del_init(&napi->poll_list);
  7749. if (napi->poll == process_backlog)
  7750. napi->state = 0;
  7751. else
  7752. ____napi_schedule(sd, napi);
  7753. }
  7754. raise_softirq_irqoff(NET_TX_SOFTIRQ);
  7755. local_irq_enable();
  7756. #ifdef CONFIG_RPS
  7757. remsd = oldsd->rps_ipi_list;
  7758. oldsd->rps_ipi_list = NULL;
  7759. #endif
  7760. /* send out pending IPI's on offline CPU */
  7761. net_rps_send_ipi(remsd);
  7762. /* Process offline CPU's input_pkt_queue */
  7763. while ((skb = __skb_dequeue(&oldsd->process_queue))) {
  7764. netif_rx_ni(skb);
  7765. input_queue_head_incr(oldsd);
  7766. }
  7767. while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
  7768. netif_rx_ni(skb);
  7769. input_queue_head_incr(oldsd);
  7770. }
  7771. return 0;
  7772. }
  7773. /**
  7774. * netdev_increment_features - increment feature set by one
  7775. * @all: current feature set
  7776. * @one: new feature set
  7777. * @mask: mask feature set
  7778. *
  7779. * Computes a new feature set after adding a device with feature set
  7780. * @one to the master device with current feature set @all. Will not
  7781. * enable anything that is off in @mask. Returns the new feature set.
  7782. */
  7783. netdev_features_t netdev_increment_features(netdev_features_t all,
  7784. netdev_features_t one, netdev_features_t mask)
  7785. {
  7786. if (mask & NETIF_F_HW_CSUM)
  7787. mask |= NETIF_F_CSUM_MASK;
  7788. mask |= NETIF_F_VLAN_CHALLENGED;
  7789. all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;
  7790. all &= one | ~NETIF_F_ALL_FOR_ALL;
  7791. /* If one device supports hw checksumming, set for all. */
  7792. if (all & NETIF_F_HW_CSUM)
  7793. all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM);
  7794. return all;
  7795. }
  7796. EXPORT_SYMBOL(netdev_increment_features);
  7797. static struct hlist_head * __net_init netdev_create_hash(void)
  7798. {
  7799. int i;
  7800. struct hlist_head *hash;
  7801. hash = kmalloc_array(NETDEV_HASHENTRIES, sizeof(*hash), GFP_KERNEL);
  7802. if (hash != NULL)
  7803. for (i = 0; i < NETDEV_HASHENTRIES; i++)
  7804. INIT_HLIST_HEAD(&hash[i]);
  7805. return hash;
  7806. }
  7807. /* Initialize per network namespace state */
  7808. static int __net_init netdev_init(struct net *net)
  7809. {
  7810. if (net != &init_net)
  7811. INIT_LIST_HEAD(&net->dev_base_head);
  7812. net->dev_name_head = netdev_create_hash();
  7813. if (net->dev_name_head == NULL)
  7814. goto err_name;
  7815. net->dev_index_head = netdev_create_hash();
  7816. if (net->dev_index_head == NULL)
  7817. goto err_idx;
  7818. return 0;
  7819. err_idx:
  7820. kfree(net->dev_name_head);
  7821. err_name:
  7822. return -ENOMEM;
  7823. }
  7824. /**
  7825. * netdev_drivername - network driver for the device
  7826. * @dev: network device
  7827. *
  7828. * Determine network driver for device.
  7829. */
  7830. const char *netdev_drivername(const struct net_device *dev)
  7831. {
  7832. const struct device_driver *driver;
  7833. const struct device *parent;
  7834. const char *empty = "";
  7835. parent = dev->dev.parent;
  7836. if (!parent)
  7837. return empty;
  7838. driver = parent->driver;
  7839. if (driver && driver->name)
  7840. return driver->name;
  7841. return empty;
  7842. }
  7843. static void __netdev_printk(const char *level, const struct net_device *dev,
  7844. struct va_format *vaf)
  7845. {
  7846. if (dev && dev->dev.parent) {
  7847. dev_printk_emit(level[1] - '0',
  7848. dev->dev.parent,
  7849. "%s %s %s%s: %pV",
  7850. dev_driver_string(dev->dev.parent),
  7851. dev_name(dev->dev.parent),
  7852. netdev_name(dev), netdev_reg_state(dev),
  7853. vaf);
  7854. } else if (dev) {
  7855. printk("%s%s%s: %pV",
  7856. level, netdev_name(dev), netdev_reg_state(dev), vaf);
  7857. } else {
  7858. printk("%s(NULL net_device): %pV", level, vaf);
  7859. }
  7860. }
  7861. void netdev_printk(const char *level, const struct net_device *dev,
  7862. const char *format, ...)
  7863. {
  7864. struct va_format vaf;
  7865. va_list args;
  7866. va_start(args, format);
  7867. vaf.fmt = format;
  7868. vaf.va = &args;
  7869. __netdev_printk(level, dev, &vaf);
  7870. va_end(args);
  7871. }
  7872. EXPORT_SYMBOL(netdev_printk);
  7873. #define define_netdev_printk_level(func, level) \
  7874. void func(const struct net_device *dev, const char *fmt, ...) \
  7875. { \
  7876. struct va_format vaf; \
  7877. va_list args; \
  7878. \
  7879. va_start(args, fmt); \
  7880. \
  7881. vaf.fmt = fmt; \
  7882. vaf.va = &args; \
  7883. \
  7884. __netdev_printk(level, dev, &vaf); \
  7885. \
  7886. va_end(args); \
  7887. } \
  7888. EXPORT_SYMBOL(func);
  7889. define_netdev_printk_level(netdev_emerg, KERN_EMERG);
  7890. define_netdev_printk_level(netdev_alert, KERN_ALERT);
  7891. define_netdev_printk_level(netdev_crit, KERN_CRIT);
  7892. define_netdev_printk_level(netdev_err, KERN_ERR);
  7893. define_netdev_printk_level(netdev_warn, KERN_WARNING);
  7894. define_netdev_printk_level(netdev_notice, KERN_NOTICE);
  7895. define_netdev_printk_level(netdev_info, KERN_INFO);
  7896. static void __net_exit netdev_exit(struct net *net)
  7897. {
  7898. kfree(net->dev_name_head);
  7899. kfree(net->dev_index_head);
  7900. if (net != &init_net)
  7901. WARN_ON_ONCE(!list_empty(&net->dev_base_head));
  7902. }
  7903. static struct pernet_operations __net_initdata netdev_net_ops = {
  7904. .init = netdev_init,
  7905. .exit = netdev_exit,
  7906. };
  7907. static void __net_exit default_device_exit(struct net *net)
  7908. {
  7909. struct net_device *dev, *aux;
  7910. /*
  7911. * Push all migratable network devices back to the
  7912. * initial network namespace
  7913. */
  7914. rtnl_lock();
  7915. for_each_netdev_safe(net, dev, aux) {
  7916. int err;
  7917. char fb_name[IFNAMSIZ];
  7918. /* Ignore unmoveable devices (i.e. loopback) */
  7919. if (dev->features & NETIF_F_NETNS_LOCAL)
  7920. continue;
  7921. /* Leave virtual devices for the generic cleanup */
  7922. if (dev->rtnl_link_ops)
  7923. continue;
  7924. /* Push remaining network devices to init_net */
  7925. snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
  7926. err = dev_change_net_namespace(dev, &init_net, fb_name);
  7927. if (err) {
  7928. pr_emerg("%s: failed to move %s to init_net: %d\n",
  7929. __func__, dev->name, err);
  7930. BUG();
  7931. }
  7932. }
  7933. rtnl_unlock();
  7934. }
  7935. static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
  7936. {
  7937. /* Return with the rtnl_lock held when there are no network
  7938. * devices unregistering in any network namespace in net_list.
  7939. */
  7940. struct net *net;
  7941. bool unregistering;
  7942. DEFINE_WAIT_FUNC(wait, woken_wake_function);
  7943. add_wait_queue(&netdev_unregistering_wq, &wait);
  7944. for (;;) {
  7945. unregistering = false;
  7946. rtnl_lock();
  7947. list_for_each_entry(net, net_list, exit_list) {
  7948. if (net->dev_unreg_count > 0) {
  7949. unregistering = true;
  7950. break;
  7951. }
  7952. }
  7953. if (!unregistering)
  7954. break;
  7955. __rtnl_unlock();
  7956. wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
  7957. }
  7958. remove_wait_queue(&netdev_unregistering_wq, &wait);
  7959. }
  7960. static void __net_exit default_device_exit_batch(struct list_head *net_list)
  7961. {
  7962. /* At exit all network devices most be removed from a network
  7963. * namespace. Do this in the reverse order of registration.
  7964. * Do this across as many network namespaces as possible to
  7965. * improve batching efficiency.
  7966. */
  7967. struct net_device *dev;
  7968. struct net *net;
  7969. LIST_HEAD(dev_kill_list);
  7970. /* To prevent network device cleanup code from dereferencing
  7971. * loopback devices or network devices that have been freed
  7972. * wait here for all pending unregistrations to complete,
  7973. * before unregistring the loopback device and allowing the
  7974. * network namespace be freed.
  7975. *
  7976. * The netdev todo list containing all network devices
  7977. * unregistrations that happen in default_device_exit_batch
  7978. * will run in the rtnl_unlock() at the end of
  7979. * default_device_exit_batch.
  7980. */
  7981. rtnl_lock_unregistering(net_list);
  7982. list_for_each_entry(net, net_list, exit_list) {
  7983. for_each_netdev_reverse(net, dev) {
  7984. if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
  7985. dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
  7986. else
  7987. unregister_netdevice_queue(dev, &dev_kill_list);
  7988. }
  7989. }
  7990. unregister_netdevice_many(&dev_kill_list);
  7991. rtnl_unlock();
  7992. }
  7993. static struct pernet_operations __net_initdata default_device_ops = {
  7994. .exit = default_device_exit,
  7995. .exit_batch = default_device_exit_batch,
  7996. };
  7997. /*
  7998. * Initialize the DEV module. At boot time this walks the device list and
  7999. * unhooks any devices that fail to initialise (normally hardware not
  8000. * present) and leaves us with a valid list of present and active devices.
  8001. *
  8002. */
  8003. /*
  8004. * This is called single threaded during boot, so no need
  8005. * to take the rtnl semaphore.
  8006. */
  8007. static int __init net_dev_init(void)
  8008. {
  8009. int i, rc = -ENOMEM;
  8010. BUG_ON(!dev_boot_phase);
  8011. if (dev_proc_init())
  8012. goto out;
  8013. if (netdev_kobject_init())
  8014. goto out;
  8015. INIT_LIST_HEAD(&ptype_all);
  8016. for (i = 0; i < PTYPE_HASH_SIZE; i++)
  8017. INIT_LIST_HEAD(&ptype_base[i]);
  8018. INIT_LIST_HEAD(&offload_base);
  8019. if (register_pernet_subsys(&netdev_net_ops))
  8020. goto out;
  8021. /*
  8022. * Initialise the packet receive queues.
  8023. */
  8024. for_each_possible_cpu(i) {
  8025. struct work_struct *flush = per_cpu_ptr(&flush_works, i);
  8026. struct softnet_data *sd = &per_cpu(softnet_data, i);
  8027. INIT_WORK(flush, flush_backlog);
  8028. skb_queue_head_init(&sd->input_pkt_queue);
  8029. skb_queue_head_init(&sd->process_queue);
  8030. #ifdef CONFIG_XFRM_OFFLOAD
  8031. skb_queue_head_init(&sd->xfrm_backlog);
  8032. #endif
  8033. INIT_LIST_HEAD(&sd->poll_list);
  8034. sd->output_queue_tailp = &sd->output_queue;
  8035. #ifdef CONFIG_RPS
  8036. sd->csd.func = rps_trigger_softirq;
  8037. sd->csd.info = sd;
  8038. sd->cpu = i;
  8039. #endif
  8040. sd->backlog.poll = process_backlog;
  8041. sd->backlog.weight = weight_p;
  8042. }
  8043. dev_boot_phase = 0;
  8044. /* The loopback device is special if any other network devices
  8045. * is present in a network namespace the loopback device must
  8046. * be present. Since we now dynamically allocate and free the
  8047. * loopback device ensure this invariant is maintained by
  8048. * keeping the loopback device as the first device on the
  8049. * list of network devices. Ensuring the loopback devices
  8050. * is the first device that appears and the last network device
  8051. * that disappears.
  8052. */
  8053. if (register_pernet_device(&loopback_net_ops))
  8054. goto out;
  8055. if (register_pernet_device(&default_device_ops))
  8056. goto out;
  8057. open_softirq(NET_TX_SOFTIRQ, net_tx_action);
  8058. open_softirq(NET_RX_SOFTIRQ, net_rx_action);
  8059. rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead",
  8060. NULL, dev_cpu_dead);
  8061. WARN_ON(rc < 0);
  8062. rc = 0;
  8063. out:
  8064. return rc;
  8065. }
  8066. subsys_initcall(net_dev_init);