dev.c 242 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665
  1. /*
  2. * NET3 Protocol independent device support routines.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. *
  9. * Derived from the non IP parts of dev.c 1.0.19
  10. * Authors: Ross Biro
  11. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12. * Mark Evans, <evansmp@uhura.aston.ac.uk>
  13. *
  14. * Additional Authors:
  15. * Florian la Roche <rzsfl@rz.uni-sb.de>
  16. * Alan Cox <gw4pts@gw4pts.ampr.org>
  17. * David Hinds <dahinds@users.sourceforge.net>
  18. * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
  19. * Adam Sulmicki <adam@cfar.umd.edu>
  20. * Pekka Riikonen <priikone@poesidon.pspt.fi>
  21. *
  22. * Changes:
  23. * D.J. Barrow : Fixed bug where dev->refcnt gets set
  24. * to 2 if register_netdev gets called
  25. * before net_dev_init & also removed a
  26. * few lines of code in the process.
  27. * Alan Cox : device private ioctl copies fields back.
  28. * Alan Cox : Transmit queue code does relevant
  29. * stunts to keep the queue safe.
  30. * Alan Cox : Fixed double lock.
  31. * Alan Cox : Fixed promisc NULL pointer trap
  32. * ???????? : Support the full private ioctl range
  33. * Alan Cox : Moved ioctl permission check into
  34. * drivers
  35. * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
  36. * Alan Cox : 100 backlog just doesn't cut it when
  37. * you start doing multicast video 8)
  38. * Alan Cox : Rewrote net_bh and list manager.
  39. * Alan Cox : Fix ETH_P_ALL echoback lengths.
  40. * Alan Cox : Took out transmit every packet pass
  41. * Saved a few bytes in the ioctl handler
  42. * Alan Cox : Network driver sets packet type before
  43. * calling netif_rx. Saves a function
  44. * call a packet.
  45. * Alan Cox : Hashed net_bh()
  46. * Richard Kooijman: Timestamp fixes.
  47. * Alan Cox : Wrong field in SIOCGIFDSTADDR
  48. * Alan Cox : Device lock protection.
  49. * Alan Cox : Fixed nasty side effect of device close
  50. * changes.
  51. * Rudi Cilibrasi : Pass the right thing to
  52. * set_mac_address()
  53. * Dave Miller : 32bit quantity for the device lock to
  54. * make it work out on a Sparc.
  55. * Bjorn Ekwall : Added KERNELD hack.
  56. * Alan Cox : Cleaned up the backlog initialise.
  57. * Craig Metz : SIOCGIFCONF fix if space for under
  58. * 1 device.
  59. * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
  60. * is no device open function.
  61. * Andi Kleen : Fix error reporting for SIOCGIFCONF
  62. * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
  63. * Cyrus Durgin : Cleaned for KMOD
  64. * Adam Sulmicki : Bug Fix : Network Device Unload
  65. * A network device unload needs to purge
  66. * the backlog queue.
  67. * Paul Rusty Russell : SIOCSIFNAME
  68. * Pekka Riikonen : Netdev boot-time settings code
  69. * Andrew Morton : Make unregister_netdevice wait
  70. * indefinitely on dev->refcnt
  71. * J Hadi Salim : - Backlog queue sampling
  72. * - netif_rx() feedback
  73. */
  74. #include <linux/uaccess.h>
  75. #include <linux/bitops.h>
  76. #include <linux/capability.h>
  77. #include <linux/cpu.h>
  78. #include <linux/types.h>
  79. #include <linux/kernel.h>
  80. #include <linux/hash.h>
  81. #include <linux/slab.h>
  82. #include <linux/sched.h>
  83. #include <linux/sched/mm.h>
  84. #include <linux/mutex.h>
  85. #include <linux/string.h>
  86. #include <linux/mm.h>
  87. #include <linux/socket.h>
  88. #include <linux/sockios.h>
  89. #include <linux/errno.h>
  90. #include <linux/interrupt.h>
  91. #include <linux/if_ether.h>
  92. #include <linux/netdevice.h>
  93. #include <linux/etherdevice.h>
  94. #include <linux/ethtool.h>
  95. #include <linux/skbuff.h>
  96. #include <linux/bpf.h>
  97. #include <linux/bpf_trace.h>
  98. #include <net/net_namespace.h>
  99. #include <net/sock.h>
  100. #include <net/busy_poll.h>
  101. #include <linux/rtnetlink.h>
  102. #include <linux/stat.h>
  103. #include <net/dst.h>
  104. #include <net/dst_metadata.h>
  105. #include <net/pkt_sched.h>
  106. #include <net/pkt_cls.h>
  107. #include <net/checksum.h>
  108. #include <net/xfrm.h>
  109. #include <linux/highmem.h>
  110. #include <linux/init.h>
  111. #include <linux/module.h>
  112. #include <linux/netpoll.h>
  113. #include <linux/rcupdate.h>
  114. #include <linux/delay.h>
  115. #include <net/iw_handler.h>
  116. #include <asm/current.h>
  117. #include <linux/audit.h>
  118. #include <linux/dmaengine.h>
  119. #include <linux/err.h>
  120. #include <linux/ctype.h>
  121. #include <linux/if_arp.h>
  122. #include <linux/if_vlan.h>
  123. #include <linux/ip.h>
  124. #include <net/ip.h>
  125. #include <net/mpls.h>
  126. #include <linux/ipv6.h>
  127. #include <linux/in.h>
  128. #include <linux/jhash.h>
  129. #include <linux/random.h>
  130. #include <trace/events/napi.h>
  131. #include <trace/events/net.h>
  132. #include <trace/events/skb.h>
  133. #include <linux/pci.h>
  134. #include <linux/inetdevice.h>
  135. #include <linux/cpu_rmap.h>
  136. #include <linux/static_key.h>
  137. #include <linux/hashtable.h>
  138. #include <linux/vmalloc.h>
  139. #include <linux/if_macvlan.h>
  140. #include <linux/errqueue.h>
  141. #include <linux/hrtimer.h>
  142. #include <linux/netfilter_ingress.h>
  143. #include <linux/crash_dump.h>
  144. #include <linux/sctp.h>
  145. #include <net/udp_tunnel.h>
  146. #include <linux/net_namespace.h>
  147. #include "net-sysfs.h"
  148. #define MAX_GRO_SKBS 8
  149. /* This should be increased if a protocol with a bigger head is added. */
  150. #define GRO_MAX_HEAD (MAX_HEADER + 128)
  151. static DEFINE_SPINLOCK(ptype_lock);
  152. static DEFINE_SPINLOCK(offload_lock);
  153. struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
  154. struct list_head ptype_all __read_mostly; /* Taps */
  155. static struct list_head offload_base __read_mostly;
  156. static int netif_rx_internal(struct sk_buff *skb);
  157. static int call_netdevice_notifiers_info(unsigned long val,
  158. struct netdev_notifier_info *info);
  159. static struct napi_struct *napi_by_id(unsigned int napi_id);
  160. /*
  161. * The @dev_base_head list is protected by @dev_base_lock and the rtnl
  162. * semaphore.
  163. *
  164. * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
  165. *
  166. * Writers must hold the rtnl semaphore while they loop through the
  167. * dev_base_head list, and hold dev_base_lock for writing when they do the
  168. * actual updates. This allows pure readers to access the list even
  169. * while a writer is preparing to update it.
  170. *
  171. * To put it another way, dev_base_lock is held for writing only to
  172. * protect against pure readers; the rtnl semaphore provides the
  173. * protection against other writers.
  174. *
  175. * See, for example usages, register_netdevice() and
  176. * unregister_netdevice(), which must be called with the rtnl
  177. * semaphore held.
  178. */
  179. DEFINE_RWLOCK(dev_base_lock);
  180. EXPORT_SYMBOL(dev_base_lock);
  181. static DEFINE_MUTEX(ifalias_mutex);
  182. /* protects napi_hash addition/deletion and napi_gen_id */
  183. static DEFINE_SPINLOCK(napi_hash_lock);
  184. static unsigned int napi_gen_id = NR_CPUS;
  185. static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
  186. static seqcount_t devnet_rename_seq;
  187. static inline void dev_base_seq_inc(struct net *net)
  188. {
  189. while (++net->dev_base_seq == 0)
  190. ;
  191. }
  192. static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
  193. {
  194. unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ));
  195. return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
  196. }
  197. static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
  198. {
  199. return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
  200. }
  201. static inline void rps_lock(struct softnet_data *sd)
  202. {
  203. #ifdef CONFIG_RPS
  204. spin_lock(&sd->input_pkt_queue.lock);
  205. #endif
  206. }
  207. static inline void rps_unlock(struct softnet_data *sd)
  208. {
  209. #ifdef CONFIG_RPS
  210. spin_unlock(&sd->input_pkt_queue.lock);
  211. #endif
  212. }
  213. /* Device list insertion */
  214. static void list_netdevice(struct net_device *dev)
  215. {
  216. struct net *net = dev_net(dev);
  217. ASSERT_RTNL();
  218. write_lock_bh(&dev_base_lock);
  219. list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
  220. hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
  221. hlist_add_head_rcu(&dev->index_hlist,
  222. dev_index_hash(net, dev->ifindex));
  223. write_unlock_bh(&dev_base_lock);
  224. dev_base_seq_inc(net);
  225. }
  226. /* Device list removal
  227. * caller must respect a RCU grace period before freeing/reusing dev
  228. */
  229. static void unlist_netdevice(struct net_device *dev)
  230. {
  231. ASSERT_RTNL();
  232. /* Unlink dev from the device chain */
  233. write_lock_bh(&dev_base_lock);
  234. list_del_rcu(&dev->dev_list);
  235. hlist_del_rcu(&dev->name_hlist);
  236. hlist_del_rcu(&dev->index_hlist);
  237. write_unlock_bh(&dev_base_lock);
  238. dev_base_seq_inc(dev_net(dev));
  239. }
  240. /*
  241. * Our notifier list
  242. */
  243. static RAW_NOTIFIER_HEAD(netdev_chain);
  244. /*
  245. * Device drivers call our routines to queue packets here. We empty the
  246. * queue in the local softnet handler.
  247. */
  248. DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
  249. EXPORT_PER_CPU_SYMBOL(softnet_data);
  250. #ifdef CONFIG_LOCKDEP
  251. /*
  252. * register_netdevice() inits txq->_xmit_lock and sets lockdep class
  253. * according to dev->type
  254. */
  255. static const unsigned short netdev_lock_type[] = {
  256. ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
  257. ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
  258. ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
  259. ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
  260. ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
  261. ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
  262. ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
  263. ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
  264. ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
  265. ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
  266. ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
  267. ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
  268. ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
  269. ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
  270. ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
  271. static const char *const netdev_lock_name[] = {
  272. "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
  273. "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
  274. "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
  275. "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
  276. "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
  277. "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
  278. "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
  279. "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
  280. "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
  281. "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
  282. "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
  283. "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
  284. "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
  285. "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
  286. "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
  287. static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
  288. static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
  289. static inline unsigned short netdev_lock_pos(unsigned short dev_type)
  290. {
  291. int i;
  292. for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
  293. if (netdev_lock_type[i] == dev_type)
  294. return i;
  295. /* the last key is used by default */
  296. return ARRAY_SIZE(netdev_lock_type) - 1;
  297. }
  298. static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
  299. unsigned short dev_type)
  300. {
  301. int i;
  302. i = netdev_lock_pos(dev_type);
  303. lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
  304. netdev_lock_name[i]);
  305. }
  306. static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
  307. {
  308. int i;
  309. i = netdev_lock_pos(dev->type);
  310. lockdep_set_class_and_name(&dev->addr_list_lock,
  311. &netdev_addr_lock_key[i],
  312. netdev_lock_name[i]);
  313. }
  314. #else
  315. static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
  316. unsigned short dev_type)
  317. {
  318. }
  319. static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
  320. {
  321. }
  322. #endif
  323. /*******************************************************************************
  324. *
  325. * Protocol management and registration routines
  326. *
  327. *******************************************************************************/
  328. /*
  329. * Add a protocol ID to the list. Now that the input handler is
  330. * smarter we can dispense with all the messy stuff that used to be
  331. * here.
  332. *
  333. * BEWARE!!! Protocol handlers, mangling input packets,
  334. * MUST BE last in hash buckets and checking protocol handlers
  335. * MUST start from promiscuous ptype_all chain in net_bh.
  336. * It is true now, do not change it.
  337. * Explanation follows: if protocol handler, mangling packet, will
  338. * be the first on list, it is not able to sense, that packet
  339. * is cloned and should be copied-on-write, so that it will
  340. * change it and subsequent readers will get broken packet.
  341. * --ANK (980803)
  342. */
  343. static inline struct list_head *ptype_head(const struct packet_type *pt)
  344. {
  345. if (pt->type == htons(ETH_P_ALL))
  346. return pt->dev ? &pt->dev->ptype_all : &ptype_all;
  347. else
  348. return pt->dev ? &pt->dev->ptype_specific :
  349. &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
  350. }
  351. /**
  352. * dev_add_pack - add packet handler
  353. * @pt: packet type declaration
  354. *
  355. * Add a protocol handler to the networking stack. The passed &packet_type
  356. * is linked into kernel lists and may not be freed until it has been
  357. * removed from the kernel lists.
  358. *
  359. * This call does not sleep therefore it can not
  360. * guarantee all CPU's that are in middle of receiving packets
  361. * will see the new packet type (until the next received packet).
  362. */
  363. void dev_add_pack(struct packet_type *pt)
  364. {
  365. struct list_head *head = ptype_head(pt);
  366. spin_lock(&ptype_lock);
  367. list_add_rcu(&pt->list, head);
  368. spin_unlock(&ptype_lock);
  369. }
  370. EXPORT_SYMBOL(dev_add_pack);
  371. /**
  372. * __dev_remove_pack - remove packet handler
  373. * @pt: packet type declaration
  374. *
  375. * Remove a protocol handler that was previously added to the kernel
  376. * protocol handlers by dev_add_pack(). The passed &packet_type is removed
  377. * from the kernel lists and can be freed or reused once this function
  378. * returns.
  379. *
  380. * The packet type might still be in use by receivers
  381. * and must not be freed until after all the CPU's have gone
  382. * through a quiescent state.
  383. */
  384. void __dev_remove_pack(struct packet_type *pt)
  385. {
  386. struct list_head *head = ptype_head(pt);
  387. struct packet_type *pt1;
  388. spin_lock(&ptype_lock);
  389. list_for_each_entry(pt1, head, list) {
  390. if (pt == pt1) {
  391. list_del_rcu(&pt->list);
  392. goto out;
  393. }
  394. }
  395. pr_warn("dev_remove_pack: %p not found\n", pt);
  396. out:
  397. spin_unlock(&ptype_lock);
  398. }
  399. EXPORT_SYMBOL(__dev_remove_pack);
  400. /**
  401. * dev_remove_pack - remove packet handler
  402. * @pt: packet type declaration
  403. *
  404. * Remove a protocol handler that was previously added to the kernel
  405. * protocol handlers by dev_add_pack(). The passed &packet_type is removed
  406. * from the kernel lists and can be freed or reused once this function
  407. * returns.
  408. *
  409. * This call sleeps to guarantee that no CPU is looking at the packet
  410. * type after return.
  411. */
  412. void dev_remove_pack(struct packet_type *pt)
  413. {
  414. __dev_remove_pack(pt);
  415. synchronize_net();
  416. }
  417. EXPORT_SYMBOL(dev_remove_pack);
  418. /**
  419. * dev_add_offload - register offload handlers
  420. * @po: protocol offload declaration
  421. *
  422. * Add protocol offload handlers to the networking stack. The passed
  423. * &proto_offload is linked into kernel lists and may not be freed until
  424. * it has been removed from the kernel lists.
  425. *
  426. * This call does not sleep therefore it can not
  427. * guarantee all CPU's that are in middle of receiving packets
  428. * will see the new offload handlers (until the next received packet).
  429. */
  430. void dev_add_offload(struct packet_offload *po)
  431. {
  432. struct packet_offload *elem;
  433. spin_lock(&offload_lock);
  434. list_for_each_entry(elem, &offload_base, list) {
  435. if (po->priority < elem->priority)
  436. break;
  437. }
  438. list_add_rcu(&po->list, elem->list.prev);
  439. spin_unlock(&offload_lock);
  440. }
  441. EXPORT_SYMBOL(dev_add_offload);
  442. /**
  443. * __dev_remove_offload - remove offload handler
  444. * @po: packet offload declaration
  445. *
  446. * Remove a protocol offload handler that was previously added to the
  447. * kernel offload handlers by dev_add_offload(). The passed &offload_type
  448. * is removed from the kernel lists and can be freed or reused once this
  449. * function returns.
  450. *
  451. * The packet type might still be in use by receivers
  452. * and must not be freed until after all the CPU's have gone
  453. * through a quiescent state.
  454. */
  455. static void __dev_remove_offload(struct packet_offload *po)
  456. {
  457. struct list_head *head = &offload_base;
  458. struct packet_offload *po1;
  459. spin_lock(&offload_lock);
  460. list_for_each_entry(po1, head, list) {
  461. if (po == po1) {
  462. list_del_rcu(&po->list);
  463. goto out;
  464. }
  465. }
  466. pr_warn("dev_remove_offload: %p not found\n", po);
  467. out:
  468. spin_unlock(&offload_lock);
  469. }
  470. /**
  471. * dev_remove_offload - remove packet offload handler
  472. * @po: packet offload declaration
  473. *
  474. * Remove a packet offload handler that was previously added to the kernel
  475. * offload handlers by dev_add_offload(). The passed &offload_type is
  476. * removed from the kernel lists and can be freed or reused once this
  477. * function returns.
  478. *
  479. * This call sleeps to guarantee that no CPU is looking at the packet
  480. * type after return.
  481. */
  482. void dev_remove_offload(struct packet_offload *po)
  483. {
  484. __dev_remove_offload(po);
  485. synchronize_net();
  486. }
  487. EXPORT_SYMBOL(dev_remove_offload);
  488. /******************************************************************************
  489. *
  490. * Device Boot-time Settings Routines
  491. *
  492. ******************************************************************************/
  493. /* Boot time configuration table */
  494. static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
  495. /**
  496. * netdev_boot_setup_add - add new setup entry
  497. * @name: name of the device
  498. * @map: configured settings for the device
  499. *
  500. * Adds new setup entry to the dev_boot_setup list. The function
  501. * returns 0 on error and 1 on success. This is a generic routine to
  502. * all netdevices.
  503. */
  504. static int netdev_boot_setup_add(char *name, struct ifmap *map)
  505. {
  506. struct netdev_boot_setup *s;
  507. int i;
  508. s = dev_boot_setup;
  509. for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
  510. if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
  511. memset(s[i].name, 0, sizeof(s[i].name));
  512. strlcpy(s[i].name, name, IFNAMSIZ);
  513. memcpy(&s[i].map, map, sizeof(s[i].map));
  514. break;
  515. }
  516. }
  517. return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
  518. }
  519. /**
  520. * netdev_boot_setup_check - check boot time settings
  521. * @dev: the netdevice
  522. *
  523. * Check boot time settings for the device.
  524. * The found settings are set for the device to be used
  525. * later in the device probing.
  526. * Returns 0 if no settings found, 1 if they are.
  527. */
  528. int netdev_boot_setup_check(struct net_device *dev)
  529. {
  530. struct netdev_boot_setup *s = dev_boot_setup;
  531. int i;
  532. for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
  533. if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
  534. !strcmp(dev->name, s[i].name)) {
  535. dev->irq = s[i].map.irq;
  536. dev->base_addr = s[i].map.base_addr;
  537. dev->mem_start = s[i].map.mem_start;
  538. dev->mem_end = s[i].map.mem_end;
  539. return 1;
  540. }
  541. }
  542. return 0;
  543. }
  544. EXPORT_SYMBOL(netdev_boot_setup_check);
  545. /**
  546. * netdev_boot_base - get address from boot time settings
  547. * @prefix: prefix for network device
  548. * @unit: id for network device
  549. *
  550. * Check boot time settings for the base address of device.
  551. * The found settings are set for the device to be used
  552. * later in the device probing.
  553. * Returns 0 if no settings found.
  554. */
  555. unsigned long netdev_boot_base(const char *prefix, int unit)
  556. {
  557. const struct netdev_boot_setup *s = dev_boot_setup;
  558. char name[IFNAMSIZ];
  559. int i;
  560. sprintf(name, "%s%d", prefix, unit);
  561. /*
  562. * If device already registered then return base of 1
  563. * to indicate not to probe for this interface
  564. */
  565. if (__dev_get_by_name(&init_net, name))
  566. return 1;
  567. for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
  568. if (!strcmp(name, s[i].name))
  569. return s[i].map.base_addr;
  570. return 0;
  571. }
  572. /*
  573. * Saves at boot time configured settings for any netdevice.
  574. */
  575. int __init netdev_boot_setup(char *str)
  576. {
  577. int ints[5];
  578. struct ifmap map;
  579. str = get_options(str, ARRAY_SIZE(ints), ints);
  580. if (!str || !*str)
  581. return 0;
  582. /* Save settings */
  583. memset(&map, 0, sizeof(map));
  584. if (ints[0] > 0)
  585. map.irq = ints[1];
  586. if (ints[0] > 1)
  587. map.base_addr = ints[2];
  588. if (ints[0] > 2)
  589. map.mem_start = ints[3];
  590. if (ints[0] > 3)
  591. map.mem_end = ints[4];
  592. /* Add new entry to the list */
  593. return netdev_boot_setup_add(str, &map);
  594. }
  595. __setup("netdev=", netdev_boot_setup);
  596. /*******************************************************************************
  597. *
  598. * Device Interface Subroutines
  599. *
  600. *******************************************************************************/
  601. /**
  602. * dev_get_iflink - get 'iflink' value of a interface
  603. * @dev: targeted interface
  604. *
  605. * Indicates the ifindex the interface is linked to.
  606. * Physical interfaces have the same 'ifindex' and 'iflink' values.
  607. */
  608. int dev_get_iflink(const struct net_device *dev)
  609. {
  610. if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
  611. return dev->netdev_ops->ndo_get_iflink(dev);
  612. return dev->ifindex;
  613. }
  614. EXPORT_SYMBOL(dev_get_iflink);
  615. /**
  616. * dev_fill_metadata_dst - Retrieve tunnel egress information.
  617. * @dev: targeted interface
  618. * @skb: The packet.
  619. *
  620. * For better visibility of tunnel traffic OVS needs to retrieve
  621. * egress tunnel information for a packet. Following API allows
  622. * user to get this info.
  623. */
  624. int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
  625. {
  626. struct ip_tunnel_info *info;
  627. if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst)
  628. return -EINVAL;
  629. info = skb_tunnel_info_unclone(skb);
  630. if (!info)
  631. return -ENOMEM;
  632. if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
  633. return -EINVAL;
  634. return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
  635. }
  636. EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
  637. /**
  638. * __dev_get_by_name - find a device by its name
  639. * @net: the applicable net namespace
  640. * @name: name to find
  641. *
  642. * Find an interface by name. Must be called under RTNL semaphore
  643. * or @dev_base_lock. If the name is found a pointer to the device
  644. * is returned. If the name is not found then %NULL is returned. The
  645. * reference counters are not incremented so the caller must be
  646. * careful with locks.
  647. */
  648. struct net_device *__dev_get_by_name(struct net *net, const char *name)
  649. {
  650. struct net_device *dev;
  651. struct hlist_head *head = dev_name_hash(net, name);
  652. hlist_for_each_entry(dev, head, name_hlist)
  653. if (!strncmp(dev->name, name, IFNAMSIZ))
  654. return dev;
  655. return NULL;
  656. }
  657. EXPORT_SYMBOL(__dev_get_by_name);
  658. /**
  659. * dev_get_by_name_rcu - find a device by its name
  660. * @net: the applicable net namespace
  661. * @name: name to find
  662. *
  663. * Find an interface by name.
  664. * If the name is found a pointer to the device is returned.
  665. * If the name is not found then %NULL is returned.
  666. * The reference counters are not incremented so the caller must be
  667. * careful with locks. The caller must hold RCU lock.
  668. */
  669. struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
  670. {
  671. struct net_device *dev;
  672. struct hlist_head *head = dev_name_hash(net, name);
  673. hlist_for_each_entry_rcu(dev, head, name_hlist)
  674. if (!strncmp(dev->name, name, IFNAMSIZ))
  675. return dev;
  676. return NULL;
  677. }
  678. EXPORT_SYMBOL(dev_get_by_name_rcu);
  679. /**
  680. * dev_get_by_name - find a device by its name
  681. * @net: the applicable net namespace
  682. * @name: name to find
  683. *
  684. * Find an interface by name. This can be called from any
  685. * context and does its own locking. The returned handle has
  686. * the usage count incremented and the caller must use dev_put() to
  687. * release it when it is no longer needed. %NULL is returned if no
  688. * matching device is found.
  689. */
  690. struct net_device *dev_get_by_name(struct net *net, const char *name)
  691. {
  692. struct net_device *dev;
  693. rcu_read_lock();
  694. dev = dev_get_by_name_rcu(net, name);
  695. if (dev)
  696. dev_hold(dev);
  697. rcu_read_unlock();
  698. return dev;
  699. }
  700. EXPORT_SYMBOL(dev_get_by_name);
  701. /**
  702. * __dev_get_by_index - find a device by its ifindex
  703. * @net: the applicable net namespace
  704. * @ifindex: index of device
  705. *
  706. * Search for an interface by index. Returns %NULL if the device
  707. * is not found or a pointer to the device. The device has not
  708. * had its reference counter increased so the caller must be careful
  709. * about locking. The caller must hold either the RTNL semaphore
  710. * or @dev_base_lock.
  711. */
  712. struct net_device *__dev_get_by_index(struct net *net, int ifindex)
  713. {
  714. struct net_device *dev;
  715. struct hlist_head *head = dev_index_hash(net, ifindex);
  716. hlist_for_each_entry(dev, head, index_hlist)
  717. if (dev->ifindex == ifindex)
  718. return dev;
  719. return NULL;
  720. }
  721. EXPORT_SYMBOL(__dev_get_by_index);
  722. /**
  723. * dev_get_by_index_rcu - find a device by its ifindex
  724. * @net: the applicable net namespace
  725. * @ifindex: index of device
  726. *
  727. * Search for an interface by index. Returns %NULL if the device
  728. * is not found or a pointer to the device. The device has not
  729. * had its reference counter increased so the caller must be careful
  730. * about locking. The caller must hold RCU lock.
  731. */
  732. struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
  733. {
  734. struct net_device *dev;
  735. struct hlist_head *head = dev_index_hash(net, ifindex);
  736. hlist_for_each_entry_rcu(dev, head, index_hlist)
  737. if (dev->ifindex == ifindex)
  738. return dev;
  739. return NULL;
  740. }
  741. EXPORT_SYMBOL(dev_get_by_index_rcu);
  742. /**
  743. * dev_get_by_index - find a device by its ifindex
  744. * @net: the applicable net namespace
  745. * @ifindex: index of device
  746. *
  747. * Search for an interface by index. Returns NULL if the device
  748. * is not found or a pointer to the device. The device returned has
  749. * had a reference added and the pointer is safe until the user calls
  750. * dev_put to indicate they have finished with it.
  751. */
  752. struct net_device *dev_get_by_index(struct net *net, int ifindex)
  753. {
  754. struct net_device *dev;
  755. rcu_read_lock();
  756. dev = dev_get_by_index_rcu(net, ifindex);
  757. if (dev)
  758. dev_hold(dev);
  759. rcu_read_unlock();
  760. return dev;
  761. }
  762. EXPORT_SYMBOL(dev_get_by_index);
  763. /**
  764. * dev_get_by_napi_id - find a device by napi_id
  765. * @napi_id: ID of the NAPI struct
  766. *
  767. * Search for an interface by NAPI ID. Returns %NULL if the device
  768. * is not found or a pointer to the device. The device has not had
  769. * its reference counter increased so the caller must be careful
  770. * about locking. The caller must hold RCU lock.
  771. */
  772. struct net_device *dev_get_by_napi_id(unsigned int napi_id)
  773. {
  774. struct napi_struct *napi;
  775. WARN_ON_ONCE(!rcu_read_lock_held());
  776. if (napi_id < MIN_NAPI_ID)
  777. return NULL;
  778. napi = napi_by_id(napi_id);
  779. return napi ? napi->dev : NULL;
  780. }
  781. EXPORT_SYMBOL(dev_get_by_napi_id);
  782. /**
  783. * netdev_get_name - get a netdevice name, knowing its ifindex.
  784. * @net: network namespace
  785. * @name: a pointer to the buffer where the name will be stored.
  786. * @ifindex: the ifindex of the interface to get the name from.
  787. *
  788. * The use of raw_seqcount_begin() and cond_resched() before
  789. * retrying is required as we want to give the writers a chance
  790. * to complete when CONFIG_PREEMPT is not set.
  791. */
  792. int netdev_get_name(struct net *net, char *name, int ifindex)
  793. {
  794. struct net_device *dev;
  795. unsigned int seq;
  796. retry:
  797. seq = raw_seqcount_begin(&devnet_rename_seq);
  798. rcu_read_lock();
  799. dev = dev_get_by_index_rcu(net, ifindex);
  800. if (!dev) {
  801. rcu_read_unlock();
  802. return -ENODEV;
  803. }
  804. strcpy(name, dev->name);
  805. rcu_read_unlock();
  806. if (read_seqcount_retry(&devnet_rename_seq, seq)) {
  807. cond_resched();
  808. goto retry;
  809. }
  810. return 0;
  811. }
  812. /**
  813. * dev_getbyhwaddr_rcu - find a device by its hardware address
  814. * @net: the applicable net namespace
  815. * @type: media type of device
  816. * @ha: hardware address
  817. *
  818. * Search for an interface by MAC address. Returns NULL if the device
  819. * is not found or a pointer to the device.
  820. * The caller must hold RCU or RTNL.
  821. * The returned device has not had its ref count increased
  822. * and the caller must therefore be careful about locking
  823. *
  824. */
  825. struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
  826. const char *ha)
  827. {
  828. struct net_device *dev;
  829. for_each_netdev_rcu(net, dev)
  830. if (dev->type == type &&
  831. !memcmp(dev->dev_addr, ha, dev->addr_len))
  832. return dev;
  833. return NULL;
  834. }
  835. EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
  836. struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
  837. {
  838. struct net_device *dev;
  839. ASSERT_RTNL();
  840. for_each_netdev(net, dev)
  841. if (dev->type == type)
  842. return dev;
  843. return NULL;
  844. }
  845. EXPORT_SYMBOL(__dev_getfirstbyhwtype);
  846. struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
  847. {
  848. struct net_device *dev, *ret = NULL;
  849. rcu_read_lock();
  850. for_each_netdev_rcu(net, dev)
  851. if (dev->type == type) {
  852. dev_hold(dev);
  853. ret = dev;
  854. break;
  855. }
  856. rcu_read_unlock();
  857. return ret;
  858. }
  859. EXPORT_SYMBOL(dev_getfirstbyhwtype);
  860. /**
  861. * __dev_get_by_flags - find any device with given flags
  862. * @net: the applicable net namespace
  863. * @if_flags: IFF_* values
  864. * @mask: bitmask of bits in if_flags to check
  865. *
  866. * Search for any interface with the given flags. Returns NULL if a device
  867. * is not found or a pointer to the device. Must be called inside
  868. * rtnl_lock(), and result refcount is unchanged.
  869. */
  870. struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
  871. unsigned short mask)
  872. {
  873. struct net_device *dev, *ret;
  874. ASSERT_RTNL();
  875. ret = NULL;
  876. for_each_netdev(net, dev) {
  877. if (((dev->flags ^ if_flags) & mask) == 0) {
  878. ret = dev;
  879. break;
  880. }
  881. }
  882. return ret;
  883. }
  884. EXPORT_SYMBOL(__dev_get_by_flags);
  885. /**
  886. * dev_valid_name - check if name is okay for network device
  887. * @name: name string
  888. *
  889. * Network device names need to be valid file names to
  890. * to allow sysfs to work. We also disallow any kind of
  891. * whitespace.
  892. */
  893. bool dev_valid_name(const char *name)
  894. {
  895. if (*name == '\0')
  896. return false;
  897. if (strnlen(name, IFNAMSIZ) == IFNAMSIZ)
  898. return false;
  899. if (!strcmp(name, ".") || !strcmp(name, ".."))
  900. return false;
  901. while (*name) {
  902. if (*name == '/' || *name == ':' || isspace(*name))
  903. return false;
  904. name++;
  905. }
  906. return true;
  907. }
  908. EXPORT_SYMBOL(dev_valid_name);
  909. /**
  910. * __dev_alloc_name - allocate a name for a device
  911. * @net: network namespace to allocate the device name in
  912. * @name: name format string
  913. * @buf: scratch buffer and result name string
  914. *
  915. * Passed a format string - eg "lt%d" it will try and find a suitable
  916. * id. It scans list of devices to build up a free map, then chooses
  917. * the first empty slot. The caller must hold the dev_base or rtnl lock
  918. * while allocating the name and adding the device in order to avoid
  919. * duplicates.
  920. * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
  921. * Returns the number of the unit assigned or a negative errno code.
  922. */
  923. static int __dev_alloc_name(struct net *net, const char *name, char *buf)
  924. {
  925. int i = 0;
  926. const char *p;
  927. const int max_netdevices = 8*PAGE_SIZE;
  928. unsigned long *inuse;
  929. struct net_device *d;
  930. if (!dev_valid_name(name))
  931. return -EINVAL;
  932. p = strchr(name, '%');
  933. if (p) {
  934. /*
  935. * Verify the string as this thing may have come from
  936. * the user. There must be either one "%d" and no other "%"
  937. * characters.
  938. */
  939. if (p[1] != 'd' || strchr(p + 2, '%'))
  940. return -EINVAL;
  941. /* Use one page as a bit array of possible slots */
  942. inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
  943. if (!inuse)
  944. return -ENOMEM;
  945. for_each_netdev(net, d) {
  946. if (!sscanf(d->name, name, &i))
  947. continue;
  948. if (i < 0 || i >= max_netdevices)
  949. continue;
  950. /* avoid cases where sscanf is not exact inverse of printf */
  951. snprintf(buf, IFNAMSIZ, name, i);
  952. if (!strncmp(buf, d->name, IFNAMSIZ))
  953. set_bit(i, inuse);
  954. }
  955. i = find_first_zero_bit(inuse, max_netdevices);
  956. free_page((unsigned long) inuse);
  957. }
  958. snprintf(buf, IFNAMSIZ, name, i);
  959. if (!__dev_get_by_name(net, buf))
  960. return i;
  961. /* It is possible to run out of possible slots
  962. * when the name is long and there isn't enough space left
  963. * for the digits, or if all bits are used.
  964. */
  965. return -ENFILE;
  966. }
  967. static int dev_alloc_name_ns(struct net *net,
  968. struct net_device *dev,
  969. const char *name)
  970. {
  971. char buf[IFNAMSIZ];
  972. int ret;
  973. BUG_ON(!net);
  974. ret = __dev_alloc_name(net, name, buf);
  975. if (ret >= 0)
  976. strlcpy(dev->name, buf, IFNAMSIZ);
  977. return ret;
  978. }
  979. /**
  980. * dev_alloc_name - allocate a name for a device
  981. * @dev: device
  982. * @name: name format string
  983. *
  984. * Passed a format string - eg "lt%d" it will try and find a suitable
  985. * id. It scans list of devices to build up a free map, then chooses
  986. * the first empty slot. The caller must hold the dev_base or rtnl lock
  987. * while allocating the name and adding the device in order to avoid
  988. * duplicates.
  989. * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
  990. * Returns the number of the unit assigned or a negative errno code.
  991. */
  992. int dev_alloc_name(struct net_device *dev, const char *name)
  993. {
  994. return dev_alloc_name_ns(dev_net(dev), dev, name);
  995. }
  996. EXPORT_SYMBOL(dev_alloc_name);
  997. int dev_get_valid_name(struct net *net, struct net_device *dev,
  998. const char *name)
  999. {
  1000. BUG_ON(!net);
  1001. if (!dev_valid_name(name))
  1002. return -EINVAL;
  1003. if (strchr(name, '%'))
  1004. return dev_alloc_name_ns(net, dev, name);
  1005. else if (__dev_get_by_name(net, name))
  1006. return -EEXIST;
  1007. else if (dev->name != name)
  1008. strlcpy(dev->name, name, IFNAMSIZ);
  1009. return 0;
  1010. }
  1011. EXPORT_SYMBOL(dev_get_valid_name);
  1012. /**
  1013. * dev_change_name - change name of a device
  1014. * @dev: device
  1015. * @newname: name (or format string) must be at least IFNAMSIZ
  1016. *
  1017. * Change name of a device, can pass format strings "eth%d".
  1018. * for wildcarding.
  1019. */
  1020. int dev_change_name(struct net_device *dev, const char *newname)
  1021. {
  1022. unsigned char old_assign_type;
  1023. char oldname[IFNAMSIZ];
  1024. int err = 0;
  1025. int ret;
  1026. struct net *net;
  1027. ASSERT_RTNL();
  1028. BUG_ON(!dev_net(dev));
  1029. net = dev_net(dev);
  1030. if (dev->flags & IFF_UP)
  1031. return -EBUSY;
  1032. write_seqcount_begin(&devnet_rename_seq);
  1033. if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
  1034. write_seqcount_end(&devnet_rename_seq);
  1035. return 0;
  1036. }
  1037. memcpy(oldname, dev->name, IFNAMSIZ);
  1038. err = dev_get_valid_name(net, dev, newname);
  1039. if (err < 0) {
  1040. write_seqcount_end(&devnet_rename_seq);
  1041. return err;
  1042. }
  1043. if (oldname[0] && !strchr(oldname, '%'))
  1044. netdev_info(dev, "renamed from %s\n", oldname);
  1045. old_assign_type = dev->name_assign_type;
  1046. dev->name_assign_type = NET_NAME_RENAMED;
  1047. rollback:
  1048. ret = device_rename(&dev->dev, dev->name);
  1049. if (ret) {
  1050. memcpy(dev->name, oldname, IFNAMSIZ);
  1051. dev->name_assign_type = old_assign_type;
  1052. write_seqcount_end(&devnet_rename_seq);
  1053. return ret;
  1054. }
  1055. write_seqcount_end(&devnet_rename_seq);
  1056. netdev_adjacent_rename_links(dev, oldname);
  1057. write_lock_bh(&dev_base_lock);
  1058. hlist_del_rcu(&dev->name_hlist);
  1059. write_unlock_bh(&dev_base_lock);
  1060. synchronize_rcu();
  1061. write_lock_bh(&dev_base_lock);
  1062. hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
  1063. write_unlock_bh(&dev_base_lock);
  1064. ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
  1065. ret = notifier_to_errno(ret);
  1066. if (ret) {
  1067. /* err >= 0 after dev_alloc_name() or stores the first errno */
  1068. if (err >= 0) {
  1069. err = ret;
  1070. write_seqcount_begin(&devnet_rename_seq);
  1071. memcpy(dev->name, oldname, IFNAMSIZ);
  1072. memcpy(oldname, newname, IFNAMSIZ);
  1073. dev->name_assign_type = old_assign_type;
  1074. old_assign_type = NET_NAME_RENAMED;
  1075. goto rollback;
  1076. } else {
  1077. pr_err("%s: name change rollback failed: %d\n",
  1078. dev->name, ret);
  1079. }
  1080. }
  1081. return err;
  1082. }
  1083. /**
  1084. * dev_set_alias - change ifalias of a device
  1085. * @dev: device
  1086. * @alias: name up to IFALIASZ
  1087. * @len: limit of bytes to copy from info
  1088. *
  1089. * Set ifalias for a device,
  1090. */
  1091. int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
  1092. {
  1093. struct dev_ifalias *new_alias = NULL;
  1094. if (len >= IFALIASZ)
  1095. return -EINVAL;
  1096. if (len) {
  1097. new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL);
  1098. if (!new_alias)
  1099. return -ENOMEM;
  1100. memcpy(new_alias->ifalias, alias, len);
  1101. new_alias->ifalias[len] = 0;
  1102. }
  1103. mutex_lock(&ifalias_mutex);
  1104. rcu_swap_protected(dev->ifalias, new_alias,
  1105. mutex_is_locked(&ifalias_mutex));
  1106. mutex_unlock(&ifalias_mutex);
  1107. if (new_alias)
  1108. kfree_rcu(new_alias, rcuhead);
  1109. return len;
  1110. }
  1111. EXPORT_SYMBOL(dev_set_alias);
  1112. /**
  1113. * dev_get_alias - get ifalias of a device
  1114. * @dev: device
  1115. * @name: buffer to store name of ifalias
  1116. * @len: size of buffer
  1117. *
  1118. * get ifalias for a device. Caller must make sure dev cannot go
  1119. * away, e.g. rcu read lock or own a reference count to device.
  1120. */
  1121. int dev_get_alias(const struct net_device *dev, char *name, size_t len)
  1122. {
  1123. const struct dev_ifalias *alias;
  1124. int ret = 0;
  1125. rcu_read_lock();
  1126. alias = rcu_dereference(dev->ifalias);
  1127. if (alias)
  1128. ret = snprintf(name, len, "%s", alias->ifalias);
  1129. rcu_read_unlock();
  1130. return ret;
  1131. }
  1132. /**
  1133. * netdev_features_change - device changes features
  1134. * @dev: device to cause notification
  1135. *
  1136. * Called to indicate a device has changed features.
  1137. */
  1138. void netdev_features_change(struct net_device *dev)
  1139. {
  1140. call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
  1141. }
  1142. EXPORT_SYMBOL(netdev_features_change);
  1143. /**
  1144. * netdev_state_change - device changes state
  1145. * @dev: device to cause notification
  1146. *
  1147. * Called to indicate a device has changed state. This function calls
  1148. * the notifier chains for netdev_chain and sends a NEWLINK message
  1149. * to the routing socket.
  1150. */
  1151. void netdev_state_change(struct net_device *dev)
  1152. {
  1153. if (dev->flags & IFF_UP) {
  1154. struct netdev_notifier_change_info change_info = {
  1155. .info.dev = dev,
  1156. };
  1157. call_netdevice_notifiers_info(NETDEV_CHANGE,
  1158. &change_info.info);
  1159. rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
  1160. }
  1161. }
  1162. EXPORT_SYMBOL(netdev_state_change);
  1163. /**
  1164. * netdev_notify_peers - notify network peers about existence of @dev
  1165. * @dev: network device
  1166. *
  1167. * Generate traffic such that interested network peers are aware of
  1168. * @dev, such as by generating a gratuitous ARP. This may be used when
  1169. * a device wants to inform the rest of the network about some sort of
  1170. * reconfiguration such as a failover event or virtual machine
  1171. * migration.
  1172. */
  1173. void netdev_notify_peers(struct net_device *dev)
  1174. {
  1175. rtnl_lock();
  1176. call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
  1177. call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
  1178. rtnl_unlock();
  1179. }
  1180. EXPORT_SYMBOL(netdev_notify_peers);
  1181. static int __dev_open(struct net_device *dev)
  1182. {
  1183. const struct net_device_ops *ops = dev->netdev_ops;
  1184. int ret;
  1185. ASSERT_RTNL();
  1186. if (!netif_device_present(dev))
  1187. return -ENODEV;
  1188. /* Block netpoll from trying to do any rx path servicing.
  1189. * If we don't do this there is a chance ndo_poll_controller
  1190. * or ndo_poll may be running while we open the device
  1191. */
  1192. netpoll_poll_disable(dev);
  1193. ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
  1194. ret = notifier_to_errno(ret);
  1195. if (ret)
  1196. return ret;
  1197. set_bit(__LINK_STATE_START, &dev->state);
  1198. if (ops->ndo_validate_addr)
  1199. ret = ops->ndo_validate_addr(dev);
  1200. if (!ret && ops->ndo_open)
  1201. ret = ops->ndo_open(dev);
  1202. netpoll_poll_enable(dev);
  1203. if (ret)
  1204. clear_bit(__LINK_STATE_START, &dev->state);
  1205. else {
  1206. dev->flags |= IFF_UP;
  1207. dev_set_rx_mode(dev);
  1208. dev_activate(dev);
  1209. add_device_randomness(dev->dev_addr, dev->addr_len);
  1210. }
  1211. return ret;
  1212. }
  1213. /**
  1214. * dev_open - prepare an interface for use.
  1215. * @dev: device to open
  1216. *
  1217. * Takes a device from down to up state. The device's private open
  1218. * function is invoked and then the multicast lists are loaded. Finally
  1219. * the device is moved into the up state and a %NETDEV_UP message is
  1220. * sent to the netdev notifier chain.
  1221. *
  1222. * Calling this function on an active interface is a nop. On a failure
  1223. * a negative errno code is returned.
  1224. */
  1225. int dev_open(struct net_device *dev)
  1226. {
  1227. int ret;
  1228. if (dev->flags & IFF_UP)
  1229. return 0;
  1230. ret = __dev_open(dev);
  1231. if (ret < 0)
  1232. return ret;
  1233. rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
  1234. call_netdevice_notifiers(NETDEV_UP, dev);
  1235. return ret;
  1236. }
  1237. EXPORT_SYMBOL(dev_open);
  1238. static void __dev_close_many(struct list_head *head)
  1239. {
  1240. struct net_device *dev;
  1241. ASSERT_RTNL();
  1242. might_sleep();
  1243. list_for_each_entry(dev, head, close_list) {
  1244. /* Temporarily disable netpoll until the interface is down */
  1245. netpoll_poll_disable(dev);
  1246. call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
  1247. clear_bit(__LINK_STATE_START, &dev->state);
  1248. /* Synchronize to scheduled poll. We cannot touch poll list, it
  1249. * can be even on different cpu. So just clear netif_running().
  1250. *
  1251. * dev->stop() will invoke napi_disable() on all of it's
  1252. * napi_struct instances on this device.
  1253. */
  1254. smp_mb__after_atomic(); /* Commit netif_running(). */
  1255. }
  1256. dev_deactivate_many(head);
  1257. list_for_each_entry(dev, head, close_list) {
  1258. const struct net_device_ops *ops = dev->netdev_ops;
  1259. /*
  1260. * Call the device specific close. This cannot fail.
  1261. * Only if device is UP
  1262. *
  1263. * We allow it to be called even after a DETACH hot-plug
  1264. * event.
  1265. */
  1266. if (ops->ndo_stop)
  1267. ops->ndo_stop(dev);
  1268. dev->flags &= ~IFF_UP;
  1269. netpoll_poll_enable(dev);
  1270. }
  1271. }
  1272. static void __dev_close(struct net_device *dev)
  1273. {
  1274. LIST_HEAD(single);
  1275. list_add(&dev->close_list, &single);
  1276. __dev_close_many(&single);
  1277. list_del(&single);
  1278. }
  1279. void dev_close_many(struct list_head *head, bool unlink)
  1280. {
  1281. struct net_device *dev, *tmp;
  1282. /* Remove the devices that don't need to be closed */
  1283. list_for_each_entry_safe(dev, tmp, head, close_list)
  1284. if (!(dev->flags & IFF_UP))
  1285. list_del_init(&dev->close_list);
  1286. __dev_close_many(head);
  1287. list_for_each_entry_safe(dev, tmp, head, close_list) {
  1288. rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
  1289. call_netdevice_notifiers(NETDEV_DOWN, dev);
  1290. if (unlink)
  1291. list_del_init(&dev->close_list);
  1292. }
  1293. }
  1294. EXPORT_SYMBOL(dev_close_many);
  1295. /**
  1296. * dev_close - shutdown an interface.
  1297. * @dev: device to shutdown
  1298. *
  1299. * This function moves an active device into down state. A
  1300. * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
  1301. * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
  1302. * chain.
  1303. */
  1304. void dev_close(struct net_device *dev)
  1305. {
  1306. if (dev->flags & IFF_UP) {
  1307. LIST_HEAD(single);
  1308. list_add(&dev->close_list, &single);
  1309. dev_close_many(&single, true);
  1310. list_del(&single);
  1311. }
  1312. }
  1313. EXPORT_SYMBOL(dev_close);
  1314. /**
  1315. * dev_disable_lro - disable Large Receive Offload on a device
  1316. * @dev: device
  1317. *
  1318. * Disable Large Receive Offload (LRO) on a net device. Must be
  1319. * called under RTNL. This is needed if received packets may be
  1320. * forwarded to another interface.
  1321. */
  1322. void dev_disable_lro(struct net_device *dev)
  1323. {
  1324. struct net_device *lower_dev;
  1325. struct list_head *iter;
  1326. dev->wanted_features &= ~NETIF_F_LRO;
  1327. netdev_update_features(dev);
  1328. if (unlikely(dev->features & NETIF_F_LRO))
  1329. netdev_WARN(dev, "failed to disable LRO!\n");
  1330. netdev_for_each_lower_dev(dev, lower_dev, iter)
  1331. dev_disable_lro(lower_dev);
  1332. }
  1333. EXPORT_SYMBOL(dev_disable_lro);
  1334. /**
  1335. * dev_disable_gro_hw - disable HW Generic Receive Offload on a device
  1336. * @dev: device
  1337. *
  1338. * Disable HW Generic Receive Offload (GRO_HW) on a net device. Must be
  1339. * called under RTNL. This is needed if Generic XDP is installed on
  1340. * the device.
  1341. */
  1342. static void dev_disable_gro_hw(struct net_device *dev)
  1343. {
  1344. dev->wanted_features &= ~NETIF_F_GRO_HW;
  1345. netdev_update_features(dev);
  1346. if (unlikely(dev->features & NETIF_F_GRO_HW))
  1347. netdev_WARN(dev, "failed to disable GRO_HW!\n");
  1348. }
  1349. const char *netdev_cmd_to_name(enum netdev_cmd cmd)
  1350. {
  1351. #define N(val) \
  1352. case NETDEV_##val: \
  1353. return "NETDEV_" __stringify(val);
  1354. switch (cmd) {
  1355. N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER)
  1356. N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE)
  1357. N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE)
  1358. N(POST_INIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN) N(CHANGEUPPER)
  1359. N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA) N(BONDING_INFO)
  1360. N(PRECHANGEUPPER) N(CHANGELOWERSTATE) N(UDP_TUNNEL_PUSH_INFO)
  1361. N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN)
  1362. N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO)
  1363. N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO)
  1364. }
  1365. #undef N
  1366. return "UNKNOWN_NETDEV_EVENT";
  1367. }
  1368. EXPORT_SYMBOL_GPL(netdev_cmd_to_name);
  1369. static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
  1370. struct net_device *dev)
  1371. {
  1372. struct netdev_notifier_info info = {
  1373. .dev = dev,
  1374. };
  1375. return nb->notifier_call(nb, val, &info);
  1376. }
  1377. static int dev_boot_phase = 1;
  1378. /**
  1379. * register_netdevice_notifier - register a network notifier block
  1380. * @nb: notifier
  1381. *
  1382. * Register a notifier to be called when network device events occur.
  1383. * The notifier passed is linked into the kernel structures and must
  1384. * not be reused until it has been unregistered. A negative errno code
  1385. * is returned on a failure.
  1386. *
  1387. * When registered all registration and up events are replayed
  1388. * to the new notifier to allow device to have a race free
  1389. * view of the network device list.
  1390. */
  1391. int register_netdevice_notifier(struct notifier_block *nb)
  1392. {
  1393. struct net_device *dev;
  1394. struct net_device *last;
  1395. struct net *net;
  1396. int err;
  1397. /* Close race with setup_net() and cleanup_net() */
  1398. down_write(&pernet_ops_rwsem);
  1399. rtnl_lock();
  1400. err = raw_notifier_chain_register(&netdev_chain, nb);
  1401. if (err)
  1402. goto unlock;
  1403. if (dev_boot_phase)
  1404. goto unlock;
  1405. for_each_net(net) {
  1406. for_each_netdev(net, dev) {
  1407. err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
  1408. err = notifier_to_errno(err);
  1409. if (err)
  1410. goto rollback;
  1411. if (!(dev->flags & IFF_UP))
  1412. continue;
  1413. call_netdevice_notifier(nb, NETDEV_UP, dev);
  1414. }
  1415. }
  1416. unlock:
  1417. rtnl_unlock();
  1418. up_write(&pernet_ops_rwsem);
  1419. return err;
  1420. rollback:
  1421. last = dev;
  1422. for_each_net(net) {
  1423. for_each_netdev(net, dev) {
  1424. if (dev == last)
  1425. goto outroll;
  1426. if (dev->flags & IFF_UP) {
  1427. call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
  1428. dev);
  1429. call_netdevice_notifier(nb, NETDEV_DOWN, dev);
  1430. }
  1431. call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
  1432. }
  1433. }
  1434. outroll:
  1435. raw_notifier_chain_unregister(&netdev_chain, nb);
  1436. goto unlock;
  1437. }
  1438. EXPORT_SYMBOL(register_netdevice_notifier);
  1439. /**
  1440. * unregister_netdevice_notifier - unregister a network notifier block
  1441. * @nb: notifier
  1442. *
  1443. * Unregister a notifier previously registered by
  1444. * register_netdevice_notifier(). The notifier is unlinked into the
  1445. * kernel structures and may then be reused. A negative errno code
  1446. * is returned on a failure.
  1447. *
  1448. * After unregistering unregister and down device events are synthesized
  1449. * for all devices on the device list to the removed notifier to remove
  1450. * the need for special case cleanup code.
  1451. */
  1452. int unregister_netdevice_notifier(struct notifier_block *nb)
  1453. {
  1454. struct net_device *dev;
  1455. struct net *net;
  1456. int err;
  1457. /* Close race with setup_net() and cleanup_net() */
  1458. down_write(&pernet_ops_rwsem);
  1459. rtnl_lock();
  1460. err = raw_notifier_chain_unregister(&netdev_chain, nb);
  1461. if (err)
  1462. goto unlock;
  1463. for_each_net(net) {
  1464. for_each_netdev(net, dev) {
  1465. if (dev->flags & IFF_UP) {
  1466. call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
  1467. dev);
  1468. call_netdevice_notifier(nb, NETDEV_DOWN, dev);
  1469. }
  1470. call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
  1471. }
  1472. }
  1473. unlock:
  1474. rtnl_unlock();
  1475. up_write(&pernet_ops_rwsem);
  1476. return err;
  1477. }
  1478. EXPORT_SYMBOL(unregister_netdevice_notifier);
  1479. /**
  1480. * call_netdevice_notifiers_info - call all network notifier blocks
  1481. * @val: value passed unmodified to notifier function
  1482. * @info: notifier information data
  1483. *
  1484. * Call all network notifier blocks. Parameters and return value
  1485. * are as for raw_notifier_call_chain().
  1486. */
  1487. static int call_netdevice_notifiers_info(unsigned long val,
  1488. struct netdev_notifier_info *info)
  1489. {
  1490. ASSERT_RTNL();
  1491. return raw_notifier_call_chain(&netdev_chain, val, info);
  1492. }
  1493. /**
  1494. * call_netdevice_notifiers - call all network notifier blocks
  1495. * @val: value passed unmodified to notifier function
  1496. * @dev: net_device pointer passed unmodified to notifier function
  1497. *
  1498. * Call all network notifier blocks. Parameters and return value
  1499. * are as for raw_notifier_call_chain().
  1500. */
  1501. int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
  1502. {
  1503. struct netdev_notifier_info info = {
  1504. .dev = dev,
  1505. };
  1506. return call_netdevice_notifiers_info(val, &info);
  1507. }
  1508. EXPORT_SYMBOL(call_netdevice_notifiers);
  1509. /**
  1510. * call_netdevice_notifiers_mtu - call all network notifier blocks
  1511. * @val: value passed unmodified to notifier function
  1512. * @dev: net_device pointer passed unmodified to notifier function
  1513. * @arg: additional u32 argument passed to the notifier function
  1514. *
  1515. * Call all network notifier blocks. Parameters and return value
  1516. * are as for raw_notifier_call_chain().
  1517. */
  1518. static int call_netdevice_notifiers_mtu(unsigned long val,
  1519. struct net_device *dev, u32 arg)
  1520. {
  1521. struct netdev_notifier_info_ext info = {
  1522. .info.dev = dev,
  1523. .ext.mtu = arg,
  1524. };
  1525. BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0);
  1526. return call_netdevice_notifiers_info(val, &info.info);
  1527. }
  1528. #ifdef CONFIG_NET_INGRESS
  1529. static DEFINE_STATIC_KEY_FALSE(ingress_needed_key);
  1530. void net_inc_ingress_queue(void)
  1531. {
  1532. static_branch_inc(&ingress_needed_key);
  1533. }
  1534. EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
  1535. void net_dec_ingress_queue(void)
  1536. {
  1537. static_branch_dec(&ingress_needed_key);
  1538. }
  1539. EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
  1540. #endif
  1541. #ifdef CONFIG_NET_EGRESS
  1542. static DEFINE_STATIC_KEY_FALSE(egress_needed_key);
  1543. void net_inc_egress_queue(void)
  1544. {
  1545. static_branch_inc(&egress_needed_key);
  1546. }
  1547. EXPORT_SYMBOL_GPL(net_inc_egress_queue);
  1548. void net_dec_egress_queue(void)
  1549. {
  1550. static_branch_dec(&egress_needed_key);
  1551. }
  1552. EXPORT_SYMBOL_GPL(net_dec_egress_queue);
  1553. #endif
  1554. static DEFINE_STATIC_KEY_FALSE(netstamp_needed_key);
  1555. #ifdef HAVE_JUMP_LABEL
  1556. static atomic_t netstamp_needed_deferred;
  1557. static atomic_t netstamp_wanted;
  1558. static void netstamp_clear(struct work_struct *work)
  1559. {
  1560. int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
  1561. int wanted;
  1562. wanted = atomic_add_return(deferred, &netstamp_wanted);
  1563. if (wanted > 0)
  1564. static_branch_enable(&netstamp_needed_key);
  1565. else
  1566. static_branch_disable(&netstamp_needed_key);
  1567. }
  1568. static DECLARE_WORK(netstamp_work, netstamp_clear);
  1569. #endif
  1570. void net_enable_timestamp(void)
  1571. {
  1572. #ifdef HAVE_JUMP_LABEL
  1573. int wanted;
  1574. while (1) {
  1575. wanted = atomic_read(&netstamp_wanted);
  1576. if (wanted <= 0)
  1577. break;
  1578. if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted)
  1579. return;
  1580. }
  1581. atomic_inc(&netstamp_needed_deferred);
  1582. schedule_work(&netstamp_work);
  1583. #else
  1584. static_branch_inc(&netstamp_needed_key);
  1585. #endif
  1586. }
  1587. EXPORT_SYMBOL(net_enable_timestamp);
  1588. void net_disable_timestamp(void)
  1589. {
  1590. #ifdef HAVE_JUMP_LABEL
  1591. int wanted;
  1592. while (1) {
  1593. wanted = atomic_read(&netstamp_wanted);
  1594. if (wanted <= 1)
  1595. break;
  1596. if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted)
  1597. return;
  1598. }
  1599. atomic_dec(&netstamp_needed_deferred);
  1600. schedule_work(&netstamp_work);
  1601. #else
  1602. static_branch_dec(&netstamp_needed_key);
  1603. #endif
  1604. }
  1605. EXPORT_SYMBOL(net_disable_timestamp);
  1606. static inline void net_timestamp_set(struct sk_buff *skb)
  1607. {
  1608. skb->tstamp = 0;
  1609. if (static_branch_unlikely(&netstamp_needed_key))
  1610. __net_timestamp(skb);
  1611. }
  1612. #define net_timestamp_check(COND, SKB) \
  1613. if (static_branch_unlikely(&netstamp_needed_key)) { \
  1614. if ((COND) && !(SKB)->tstamp) \
  1615. __net_timestamp(SKB); \
  1616. } \
  1617. bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
  1618. {
  1619. unsigned int len;
  1620. if (!(dev->flags & IFF_UP))
  1621. return false;
  1622. len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
  1623. if (skb->len <= len)
  1624. return true;
  1625. /* if TSO is enabled, we don't care about the length as the packet
  1626. * could be forwarded without being segmented before
  1627. */
  1628. if (skb_is_gso(skb))
  1629. return true;
  1630. return false;
  1631. }
  1632. EXPORT_SYMBOL_GPL(is_skb_forwardable);
  1633. int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
  1634. {
  1635. int ret = ____dev_forward_skb(dev, skb);
  1636. if (likely(!ret)) {
  1637. skb->protocol = eth_type_trans(skb, dev);
  1638. skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
  1639. }
  1640. return ret;
  1641. }
  1642. EXPORT_SYMBOL_GPL(__dev_forward_skb);
  1643. /**
  1644. * dev_forward_skb - loopback an skb to another netif
  1645. *
  1646. * @dev: destination network device
  1647. * @skb: buffer to forward
  1648. *
  1649. * return values:
  1650. * NET_RX_SUCCESS (no congestion)
  1651. * NET_RX_DROP (packet was dropped, but freed)
  1652. *
  1653. * dev_forward_skb can be used for injecting an skb from the
  1654. * start_xmit function of one device into the receive queue
  1655. * of another device.
  1656. *
  1657. * The receiving device may be in another namespace, so
  1658. * we have to clear all information in the skb that could
  1659. * impact namespace isolation.
  1660. */
  1661. int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
  1662. {
  1663. return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
  1664. }
  1665. EXPORT_SYMBOL_GPL(dev_forward_skb);
  1666. static inline int deliver_skb(struct sk_buff *skb,
  1667. struct packet_type *pt_prev,
  1668. struct net_device *orig_dev)
  1669. {
  1670. if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
  1671. return -ENOMEM;
  1672. refcount_inc(&skb->users);
  1673. return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
  1674. }
  1675. static inline void deliver_ptype_list_skb(struct sk_buff *skb,
  1676. struct packet_type **pt,
  1677. struct net_device *orig_dev,
  1678. __be16 type,
  1679. struct list_head *ptype_list)
  1680. {
  1681. struct packet_type *ptype, *pt_prev = *pt;
  1682. list_for_each_entry_rcu(ptype, ptype_list, list) {
  1683. if (ptype->type != type)
  1684. continue;
  1685. if (pt_prev)
  1686. deliver_skb(skb, pt_prev, orig_dev);
  1687. pt_prev = ptype;
  1688. }
  1689. *pt = pt_prev;
  1690. }
  1691. static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
  1692. {
  1693. if (!ptype->af_packet_priv || !skb->sk)
  1694. return false;
  1695. if (ptype->id_match)
  1696. return ptype->id_match(ptype, skb->sk);
  1697. else if ((struct sock *)ptype->af_packet_priv == skb->sk)
  1698. return true;
  1699. return false;
  1700. }
  1701. /*
  1702. * Support routine. Sends outgoing frames to any network
  1703. * taps currently in use.
  1704. */
  1705. void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
  1706. {
  1707. struct packet_type *ptype;
  1708. struct sk_buff *skb2 = NULL;
  1709. struct packet_type *pt_prev = NULL;
  1710. struct list_head *ptype_list = &ptype_all;
  1711. rcu_read_lock();
  1712. again:
  1713. list_for_each_entry_rcu(ptype, ptype_list, list) {
  1714. /* Never send packets back to the socket
  1715. * they originated from - MvS (miquels@drinkel.ow.org)
  1716. */
  1717. if (skb_loop_sk(ptype, skb))
  1718. continue;
  1719. if (pt_prev) {
  1720. deliver_skb(skb2, pt_prev, skb->dev);
  1721. pt_prev = ptype;
  1722. continue;
  1723. }
  1724. /* need to clone skb, done only once */
  1725. skb2 = skb_clone(skb, GFP_ATOMIC);
  1726. if (!skb2)
  1727. goto out_unlock;
  1728. net_timestamp_set(skb2);
  1729. /* skb->nh should be correctly
  1730. * set by sender, so that the second statement is
  1731. * just protection against buggy protocols.
  1732. */
  1733. skb_reset_mac_header(skb2);
  1734. if (skb_network_header(skb2) < skb2->data ||
  1735. skb_network_header(skb2) > skb_tail_pointer(skb2)) {
  1736. net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
  1737. ntohs(skb2->protocol),
  1738. dev->name);
  1739. skb_reset_network_header(skb2);
  1740. }
  1741. skb2->transport_header = skb2->network_header;
  1742. skb2->pkt_type = PACKET_OUTGOING;
  1743. pt_prev = ptype;
  1744. }
  1745. if (ptype_list == &ptype_all) {
  1746. ptype_list = &dev->ptype_all;
  1747. goto again;
  1748. }
  1749. out_unlock:
  1750. if (pt_prev) {
  1751. if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC))
  1752. pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
  1753. else
  1754. kfree_skb(skb2);
  1755. }
  1756. rcu_read_unlock();
  1757. }
  1758. EXPORT_SYMBOL_GPL(dev_queue_xmit_nit);
  1759. /**
  1760. * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
  1761. * @dev: Network device
  1762. * @txq: number of queues available
  1763. *
  1764. * If real_num_tx_queues is changed the tc mappings may no longer be
  1765. * valid. To resolve this verify the tc mapping remains valid and if
  1766. * not NULL the mapping. With no priorities mapping to this
  1767. * offset/count pair it will no longer be used. In the worst case TC0
  1768. * is invalid nothing can be done so disable priority mappings. If is
  1769. * expected that drivers will fix this mapping if they can before
  1770. * calling netif_set_real_num_tx_queues.
  1771. */
  1772. static void netif_setup_tc(struct net_device *dev, unsigned int txq)
  1773. {
  1774. int i;
  1775. struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
  1776. /* If TC0 is invalidated disable TC mapping */
  1777. if (tc->offset + tc->count > txq) {
  1778. pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
  1779. dev->num_tc = 0;
  1780. return;
  1781. }
  1782. /* Invalidated prio to tc mappings set to TC0 */
  1783. for (i = 1; i < TC_BITMASK + 1; i++) {
  1784. int q = netdev_get_prio_tc_map(dev, i);
  1785. tc = &dev->tc_to_txq[q];
  1786. if (tc->offset + tc->count > txq) {
  1787. pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
  1788. i, q);
  1789. netdev_set_prio_tc_map(dev, i, 0);
  1790. }
  1791. }
  1792. }
  1793. int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
  1794. {
  1795. if (dev->num_tc) {
  1796. struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
  1797. int i;
  1798. /* walk through the TCs and see if it falls into any of them */
  1799. for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
  1800. if ((txq - tc->offset) < tc->count)
  1801. return i;
  1802. }
  1803. /* didn't find it, just return -1 to indicate no match */
  1804. return -1;
  1805. }
  1806. return 0;
  1807. }
  1808. EXPORT_SYMBOL(netdev_txq_to_tc);
  1809. #ifdef CONFIG_XPS
  1810. struct static_key xps_needed __read_mostly;
  1811. EXPORT_SYMBOL(xps_needed);
  1812. struct static_key xps_rxqs_needed __read_mostly;
  1813. EXPORT_SYMBOL(xps_rxqs_needed);
  1814. static DEFINE_MUTEX(xps_map_mutex);
  1815. #define xmap_dereference(P) \
  1816. rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
  1817. static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
  1818. int tci, u16 index)
  1819. {
  1820. struct xps_map *map = NULL;
  1821. int pos;
  1822. if (dev_maps)
  1823. map = xmap_dereference(dev_maps->attr_map[tci]);
  1824. if (!map)
  1825. return false;
  1826. for (pos = map->len; pos--;) {
  1827. if (map->queues[pos] != index)
  1828. continue;
  1829. if (map->len > 1) {
  1830. map->queues[pos] = map->queues[--map->len];
  1831. break;
  1832. }
  1833. RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
  1834. kfree_rcu(map, rcu);
  1835. return false;
  1836. }
  1837. return true;
  1838. }
  1839. static bool remove_xps_queue_cpu(struct net_device *dev,
  1840. struct xps_dev_maps *dev_maps,
  1841. int cpu, u16 offset, u16 count)
  1842. {
  1843. int num_tc = dev->num_tc ? : 1;
  1844. bool active = false;
  1845. int tci;
  1846. for (tci = cpu * num_tc; num_tc--; tci++) {
  1847. int i, j;
  1848. for (i = count, j = offset; i--; j++) {
  1849. if (!remove_xps_queue(dev_maps, tci, j))
  1850. break;
  1851. }
  1852. active |= i < 0;
  1853. }
  1854. return active;
  1855. }
  1856. static void reset_xps_maps(struct net_device *dev,
  1857. struct xps_dev_maps *dev_maps,
  1858. bool is_rxqs_map)
  1859. {
  1860. if (is_rxqs_map) {
  1861. static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
  1862. RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
  1863. } else {
  1864. RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
  1865. }
  1866. static_key_slow_dec_cpuslocked(&xps_needed);
  1867. kfree_rcu(dev_maps, rcu);
  1868. }
  1869. static void clean_xps_maps(struct net_device *dev, const unsigned long *mask,
  1870. struct xps_dev_maps *dev_maps, unsigned int nr_ids,
  1871. u16 offset, u16 count, bool is_rxqs_map)
  1872. {
  1873. bool active = false;
  1874. int i, j;
  1875. for (j = -1; j = netif_attrmask_next(j, mask, nr_ids),
  1876. j < nr_ids;)
  1877. active |= remove_xps_queue_cpu(dev, dev_maps, j, offset,
  1878. count);
  1879. if (!active)
  1880. reset_xps_maps(dev, dev_maps, is_rxqs_map);
  1881. if (!is_rxqs_map) {
  1882. for (i = offset + (count - 1); count--; i--) {
  1883. netdev_queue_numa_node_write(
  1884. netdev_get_tx_queue(dev, i),
  1885. NUMA_NO_NODE);
  1886. }
  1887. }
  1888. }
  1889. static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
  1890. u16 count)
  1891. {
  1892. const unsigned long *possible_mask = NULL;
  1893. struct xps_dev_maps *dev_maps;
  1894. unsigned int nr_ids;
  1895. if (!static_key_false(&xps_needed))
  1896. return;
  1897. cpus_read_lock();
  1898. mutex_lock(&xps_map_mutex);
  1899. if (static_key_false(&xps_rxqs_needed)) {
  1900. dev_maps = xmap_dereference(dev->xps_rxqs_map);
  1901. if (dev_maps) {
  1902. nr_ids = dev->num_rx_queues;
  1903. clean_xps_maps(dev, possible_mask, dev_maps, nr_ids,
  1904. offset, count, true);
  1905. }
  1906. }
  1907. dev_maps = xmap_dereference(dev->xps_cpus_map);
  1908. if (!dev_maps)
  1909. goto out_no_maps;
  1910. if (num_possible_cpus() > 1)
  1911. possible_mask = cpumask_bits(cpu_possible_mask);
  1912. nr_ids = nr_cpu_ids;
  1913. clean_xps_maps(dev, possible_mask, dev_maps, nr_ids, offset, count,
  1914. false);
  1915. out_no_maps:
  1916. mutex_unlock(&xps_map_mutex);
  1917. cpus_read_unlock();
  1918. }
  1919. static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
  1920. {
  1921. netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
  1922. }
  1923. static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index,
  1924. u16 index, bool is_rxqs_map)
  1925. {
  1926. struct xps_map *new_map;
  1927. int alloc_len = XPS_MIN_MAP_ALLOC;
  1928. int i, pos;
  1929. for (pos = 0; map && pos < map->len; pos++) {
  1930. if (map->queues[pos] != index)
  1931. continue;
  1932. return map;
  1933. }
  1934. /* Need to add tx-queue to this CPU's/rx-queue's existing map */
  1935. if (map) {
  1936. if (pos < map->alloc_len)
  1937. return map;
  1938. alloc_len = map->alloc_len * 2;
  1939. }
  1940. /* Need to allocate new map to store tx-queue on this CPU's/rx-queue's
  1941. * map
  1942. */
  1943. if (is_rxqs_map)
  1944. new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL);
  1945. else
  1946. new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
  1947. cpu_to_node(attr_index));
  1948. if (!new_map)
  1949. return NULL;
  1950. for (i = 0; i < pos; i++)
  1951. new_map->queues[i] = map->queues[i];
  1952. new_map->alloc_len = alloc_len;
  1953. new_map->len = pos;
  1954. return new_map;
  1955. }
  1956. /* Must be called under cpus_read_lock */
  1957. int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
  1958. u16 index, bool is_rxqs_map)
  1959. {
  1960. const unsigned long *online_mask = NULL, *possible_mask = NULL;
  1961. struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
  1962. int i, j, tci, numa_node_id = -2;
  1963. int maps_sz, num_tc = 1, tc = 0;
  1964. struct xps_map *map, *new_map;
  1965. bool active = false;
  1966. unsigned int nr_ids;
  1967. if (dev->num_tc) {
  1968. /* Do not allow XPS on subordinate device directly */
  1969. num_tc = dev->num_tc;
  1970. if (num_tc < 0)
  1971. return -EINVAL;
  1972. /* If queue belongs to subordinate dev use its map */
  1973. dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
  1974. tc = netdev_txq_to_tc(dev, index);
  1975. if (tc < 0)
  1976. return -EINVAL;
  1977. }
  1978. mutex_lock(&xps_map_mutex);
  1979. if (is_rxqs_map) {
  1980. maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues);
  1981. dev_maps = xmap_dereference(dev->xps_rxqs_map);
  1982. nr_ids = dev->num_rx_queues;
  1983. } else {
  1984. maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc);
  1985. if (num_possible_cpus() > 1) {
  1986. online_mask = cpumask_bits(cpu_online_mask);
  1987. possible_mask = cpumask_bits(cpu_possible_mask);
  1988. }
  1989. dev_maps = xmap_dereference(dev->xps_cpus_map);
  1990. nr_ids = nr_cpu_ids;
  1991. }
  1992. if (maps_sz < L1_CACHE_BYTES)
  1993. maps_sz = L1_CACHE_BYTES;
  1994. /* allocate memory for queue storage */
  1995. for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids),
  1996. j < nr_ids;) {
  1997. if (!new_dev_maps)
  1998. new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
  1999. if (!new_dev_maps) {
  2000. mutex_unlock(&xps_map_mutex);
  2001. return -ENOMEM;
  2002. }
  2003. tci = j * num_tc + tc;
  2004. map = dev_maps ? xmap_dereference(dev_maps->attr_map[tci]) :
  2005. NULL;
  2006. map = expand_xps_map(map, j, index, is_rxqs_map);
  2007. if (!map)
  2008. goto error;
  2009. RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
  2010. }
  2011. if (!new_dev_maps)
  2012. goto out_no_new_maps;
  2013. if (!dev_maps) {
  2014. /* Increment static keys at most once per type */
  2015. static_key_slow_inc_cpuslocked(&xps_needed);
  2016. if (is_rxqs_map)
  2017. static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
  2018. }
  2019. for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
  2020. j < nr_ids;) {
  2021. /* copy maps belonging to foreign traffic classes */
  2022. for (i = tc, tci = j * num_tc; dev_maps && i--; tci++) {
  2023. /* fill in the new device map from the old device map */
  2024. map = xmap_dereference(dev_maps->attr_map[tci]);
  2025. RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
  2026. }
  2027. /* We need to explicitly update tci as prevous loop
  2028. * could break out early if dev_maps is NULL.
  2029. */
  2030. tci = j * num_tc + tc;
  2031. if (netif_attr_test_mask(j, mask, nr_ids) &&
  2032. netif_attr_test_online(j, online_mask, nr_ids)) {
  2033. /* add tx-queue to CPU/rx-queue maps */
  2034. int pos = 0;
  2035. map = xmap_dereference(new_dev_maps->attr_map[tci]);
  2036. while ((pos < map->len) && (map->queues[pos] != index))
  2037. pos++;
  2038. if (pos == map->len)
  2039. map->queues[map->len++] = index;
  2040. #ifdef CONFIG_NUMA
  2041. if (!is_rxqs_map) {
  2042. if (numa_node_id == -2)
  2043. numa_node_id = cpu_to_node(j);
  2044. else if (numa_node_id != cpu_to_node(j))
  2045. numa_node_id = -1;
  2046. }
  2047. #endif
  2048. } else if (dev_maps) {
  2049. /* fill in the new device map from the old device map */
  2050. map = xmap_dereference(dev_maps->attr_map[tci]);
  2051. RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
  2052. }
  2053. /* copy maps belonging to foreign traffic classes */
  2054. for (i = num_tc - tc, tci++; dev_maps && --i; tci++) {
  2055. /* fill in the new device map from the old device map */
  2056. map = xmap_dereference(dev_maps->attr_map[tci]);
  2057. RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
  2058. }
  2059. }
  2060. if (is_rxqs_map)
  2061. rcu_assign_pointer(dev->xps_rxqs_map, new_dev_maps);
  2062. else
  2063. rcu_assign_pointer(dev->xps_cpus_map, new_dev_maps);
  2064. /* Cleanup old maps */
  2065. if (!dev_maps)
  2066. goto out_no_old_maps;
  2067. for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
  2068. j < nr_ids;) {
  2069. for (i = num_tc, tci = j * num_tc; i--; tci++) {
  2070. new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
  2071. map = xmap_dereference(dev_maps->attr_map[tci]);
  2072. if (map && map != new_map)
  2073. kfree_rcu(map, rcu);
  2074. }
  2075. }
  2076. kfree_rcu(dev_maps, rcu);
  2077. out_no_old_maps:
  2078. dev_maps = new_dev_maps;
  2079. active = true;
  2080. out_no_new_maps:
  2081. if (!is_rxqs_map) {
  2082. /* update Tx queue numa node */
  2083. netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
  2084. (numa_node_id >= 0) ?
  2085. numa_node_id : NUMA_NO_NODE);
  2086. }
  2087. if (!dev_maps)
  2088. goto out_no_maps;
  2089. /* removes tx-queue from unused CPUs/rx-queues */
  2090. for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
  2091. j < nr_ids;) {
  2092. for (i = tc, tci = j * num_tc; i--; tci++)
  2093. active |= remove_xps_queue(dev_maps, tci, index);
  2094. if (!netif_attr_test_mask(j, mask, nr_ids) ||
  2095. !netif_attr_test_online(j, online_mask, nr_ids))
  2096. active |= remove_xps_queue(dev_maps, tci, index);
  2097. for (i = num_tc - tc, tci++; --i; tci++)
  2098. active |= remove_xps_queue(dev_maps, tci, index);
  2099. }
  2100. /* free map if not active */
  2101. if (!active)
  2102. reset_xps_maps(dev, dev_maps, is_rxqs_map);
  2103. out_no_maps:
  2104. mutex_unlock(&xps_map_mutex);
  2105. return 0;
  2106. error:
  2107. /* remove any maps that we added */
  2108. for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
  2109. j < nr_ids;) {
  2110. for (i = num_tc, tci = j * num_tc; i--; tci++) {
  2111. new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
  2112. map = dev_maps ?
  2113. xmap_dereference(dev_maps->attr_map[tci]) :
  2114. NULL;
  2115. if (new_map && new_map != map)
  2116. kfree(new_map);
  2117. }
  2118. }
  2119. mutex_unlock(&xps_map_mutex);
  2120. kfree(new_dev_maps);
  2121. return -ENOMEM;
  2122. }
  2123. EXPORT_SYMBOL_GPL(__netif_set_xps_queue);
  2124. int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
  2125. u16 index)
  2126. {
  2127. int ret;
  2128. cpus_read_lock();
  2129. ret = __netif_set_xps_queue(dev, cpumask_bits(mask), index, false);
  2130. cpus_read_unlock();
  2131. return ret;
  2132. }
  2133. EXPORT_SYMBOL(netif_set_xps_queue);
  2134. #endif
  2135. static void netdev_unbind_all_sb_channels(struct net_device *dev)
  2136. {
  2137. struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
  2138. /* Unbind any subordinate channels */
  2139. while (txq-- != &dev->_tx[0]) {
  2140. if (txq->sb_dev)
  2141. netdev_unbind_sb_channel(dev, txq->sb_dev);
  2142. }
  2143. }
  2144. void netdev_reset_tc(struct net_device *dev)
  2145. {
  2146. #ifdef CONFIG_XPS
  2147. netif_reset_xps_queues_gt(dev, 0);
  2148. #endif
  2149. netdev_unbind_all_sb_channels(dev);
  2150. /* Reset TC configuration of device */
  2151. dev->num_tc = 0;
  2152. memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
  2153. memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
  2154. }
  2155. EXPORT_SYMBOL(netdev_reset_tc);
  2156. int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
  2157. {
  2158. if (tc >= dev->num_tc)
  2159. return -EINVAL;
  2160. #ifdef CONFIG_XPS
  2161. netif_reset_xps_queues(dev, offset, count);
  2162. #endif
  2163. dev->tc_to_txq[tc].count = count;
  2164. dev->tc_to_txq[tc].offset = offset;
  2165. return 0;
  2166. }
  2167. EXPORT_SYMBOL(netdev_set_tc_queue);
  2168. int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
  2169. {
  2170. if (num_tc > TC_MAX_QUEUE)
  2171. return -EINVAL;
  2172. #ifdef CONFIG_XPS
  2173. netif_reset_xps_queues_gt(dev, 0);
  2174. #endif
  2175. netdev_unbind_all_sb_channels(dev);
  2176. dev->num_tc = num_tc;
  2177. return 0;
  2178. }
  2179. EXPORT_SYMBOL(netdev_set_num_tc);
  2180. void netdev_unbind_sb_channel(struct net_device *dev,
  2181. struct net_device *sb_dev)
  2182. {
  2183. struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
  2184. #ifdef CONFIG_XPS
  2185. netif_reset_xps_queues_gt(sb_dev, 0);
  2186. #endif
  2187. memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq));
  2188. memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map));
  2189. while (txq-- != &dev->_tx[0]) {
  2190. if (txq->sb_dev == sb_dev)
  2191. txq->sb_dev = NULL;
  2192. }
  2193. }
  2194. EXPORT_SYMBOL(netdev_unbind_sb_channel);
  2195. int netdev_bind_sb_channel_queue(struct net_device *dev,
  2196. struct net_device *sb_dev,
  2197. u8 tc, u16 count, u16 offset)
  2198. {
  2199. /* Make certain the sb_dev and dev are already configured */
  2200. if (sb_dev->num_tc >= 0 || tc >= dev->num_tc)
  2201. return -EINVAL;
  2202. /* We cannot hand out queues we don't have */
  2203. if ((offset + count) > dev->real_num_tx_queues)
  2204. return -EINVAL;
  2205. /* Record the mapping */
  2206. sb_dev->tc_to_txq[tc].count = count;
  2207. sb_dev->tc_to_txq[tc].offset = offset;
  2208. /* Provide a way for Tx queue to find the tc_to_txq map or
  2209. * XPS map for itself.
  2210. */
  2211. while (count--)
  2212. netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev;
  2213. return 0;
  2214. }
  2215. EXPORT_SYMBOL(netdev_bind_sb_channel_queue);
  2216. int netdev_set_sb_channel(struct net_device *dev, u16 channel)
  2217. {
  2218. /* Do not use a multiqueue device to represent a subordinate channel */
  2219. if (netif_is_multiqueue(dev))
  2220. return -ENODEV;
  2221. /* We allow channels 1 - 32767 to be used for subordinate channels.
  2222. * Channel 0 is meant to be "native" mode and used only to represent
  2223. * the main root device. We allow writing 0 to reset the device back
  2224. * to normal mode after being used as a subordinate channel.
  2225. */
  2226. if (channel > S16_MAX)
  2227. return -EINVAL;
  2228. dev->num_tc = -channel;
  2229. return 0;
  2230. }
  2231. EXPORT_SYMBOL(netdev_set_sb_channel);
  2232. /*
  2233. * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
  2234. * greater than real_num_tx_queues stale skbs on the qdisc must be flushed.
  2235. */
  2236. int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
  2237. {
  2238. bool disabling;
  2239. int rc;
  2240. disabling = txq < dev->real_num_tx_queues;
  2241. if (txq < 1 || txq > dev->num_tx_queues)
  2242. return -EINVAL;
  2243. if (dev->reg_state == NETREG_REGISTERED ||
  2244. dev->reg_state == NETREG_UNREGISTERING) {
  2245. ASSERT_RTNL();
  2246. rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
  2247. txq);
  2248. if (rc)
  2249. return rc;
  2250. if (dev->num_tc)
  2251. netif_setup_tc(dev, txq);
  2252. dev->real_num_tx_queues = txq;
  2253. if (disabling) {
  2254. synchronize_net();
  2255. qdisc_reset_all_tx_gt(dev, txq);
  2256. #ifdef CONFIG_XPS
  2257. netif_reset_xps_queues_gt(dev, txq);
  2258. #endif
  2259. }
  2260. } else {
  2261. dev->real_num_tx_queues = txq;
  2262. }
  2263. return 0;
  2264. }
  2265. EXPORT_SYMBOL(netif_set_real_num_tx_queues);
  2266. #ifdef CONFIG_SYSFS
  2267. /**
  2268. * netif_set_real_num_rx_queues - set actual number of RX queues used
  2269. * @dev: Network device
  2270. * @rxq: Actual number of RX queues
  2271. *
  2272. * This must be called either with the rtnl_lock held or before
  2273. * registration of the net device. Returns 0 on success, or a
  2274. * negative error code. If called before registration, it always
  2275. * succeeds.
  2276. */
  2277. int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
  2278. {
  2279. int rc;
  2280. if (rxq < 1 || rxq > dev->num_rx_queues)
  2281. return -EINVAL;
  2282. if (dev->reg_state == NETREG_REGISTERED) {
  2283. ASSERT_RTNL();
  2284. rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
  2285. rxq);
  2286. if (rc)
  2287. return rc;
  2288. }
  2289. dev->real_num_rx_queues = rxq;
  2290. return 0;
  2291. }
  2292. EXPORT_SYMBOL(netif_set_real_num_rx_queues);
  2293. #endif
  2294. /**
  2295. * netif_get_num_default_rss_queues - default number of RSS queues
  2296. *
  2297. * This routine should set an upper limit on the number of RSS queues
  2298. * used by default by multiqueue devices.
  2299. */
  2300. int netif_get_num_default_rss_queues(void)
  2301. {
  2302. return is_kdump_kernel() ?
  2303. 1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
  2304. }
  2305. EXPORT_SYMBOL(netif_get_num_default_rss_queues);
  2306. static void __netif_reschedule(struct Qdisc *q)
  2307. {
  2308. struct softnet_data *sd;
  2309. unsigned long flags;
  2310. local_irq_save(flags);
  2311. sd = this_cpu_ptr(&softnet_data);
  2312. q->next_sched = NULL;
  2313. *sd->output_queue_tailp = q;
  2314. sd->output_queue_tailp = &q->next_sched;
  2315. raise_softirq_irqoff(NET_TX_SOFTIRQ);
  2316. local_irq_restore(flags);
  2317. }
  2318. void __netif_schedule(struct Qdisc *q)
  2319. {
  2320. if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
  2321. __netif_reschedule(q);
  2322. }
  2323. EXPORT_SYMBOL(__netif_schedule);
  2324. struct dev_kfree_skb_cb {
  2325. enum skb_free_reason reason;
  2326. };
  2327. static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
  2328. {
  2329. return (struct dev_kfree_skb_cb *)skb->cb;
  2330. }
  2331. void netif_schedule_queue(struct netdev_queue *txq)
  2332. {
  2333. rcu_read_lock();
  2334. if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
  2335. struct Qdisc *q = rcu_dereference(txq->qdisc);
  2336. __netif_schedule(q);
  2337. }
  2338. rcu_read_unlock();
  2339. }
  2340. EXPORT_SYMBOL(netif_schedule_queue);
  2341. void netif_tx_wake_queue(struct netdev_queue *dev_queue)
  2342. {
  2343. if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
  2344. struct Qdisc *q;
  2345. rcu_read_lock();
  2346. q = rcu_dereference(dev_queue->qdisc);
  2347. __netif_schedule(q);
  2348. rcu_read_unlock();
  2349. }
  2350. }
  2351. EXPORT_SYMBOL(netif_tx_wake_queue);
  2352. void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
  2353. {
  2354. unsigned long flags;
  2355. if (unlikely(!skb))
  2356. return;
  2357. if (likely(refcount_read(&skb->users) == 1)) {
  2358. smp_rmb();
  2359. refcount_set(&skb->users, 0);
  2360. } else if (likely(!refcount_dec_and_test(&skb->users))) {
  2361. return;
  2362. }
  2363. get_kfree_skb_cb(skb)->reason = reason;
  2364. local_irq_save(flags);
  2365. skb->next = __this_cpu_read(softnet_data.completion_queue);
  2366. __this_cpu_write(softnet_data.completion_queue, skb);
  2367. raise_softirq_irqoff(NET_TX_SOFTIRQ);
  2368. local_irq_restore(flags);
  2369. }
  2370. EXPORT_SYMBOL(__dev_kfree_skb_irq);
  2371. void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
  2372. {
  2373. if (in_irq() || irqs_disabled())
  2374. __dev_kfree_skb_irq(skb, reason);
  2375. else
  2376. dev_kfree_skb(skb);
  2377. }
  2378. EXPORT_SYMBOL(__dev_kfree_skb_any);
  2379. /**
  2380. * netif_device_detach - mark device as removed
  2381. * @dev: network device
  2382. *
  2383. * Mark device as removed from system and therefore no longer available.
  2384. */
  2385. void netif_device_detach(struct net_device *dev)
  2386. {
  2387. if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
  2388. netif_running(dev)) {
  2389. netif_tx_stop_all_queues(dev);
  2390. }
  2391. }
  2392. EXPORT_SYMBOL(netif_device_detach);
  2393. /**
  2394. * netif_device_attach - mark device as attached
  2395. * @dev: network device
  2396. *
  2397. * Mark device as attached from system and restart if needed.
  2398. */
  2399. void netif_device_attach(struct net_device *dev)
  2400. {
  2401. if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
  2402. netif_running(dev)) {
  2403. netif_tx_wake_all_queues(dev);
  2404. __netdev_watchdog_up(dev);
  2405. }
  2406. }
  2407. EXPORT_SYMBOL(netif_device_attach);
  2408. /*
  2409. * Returns a Tx hash based on the given packet descriptor a Tx queues' number
  2410. * to be used as a distribution range.
  2411. */
  2412. static u16 skb_tx_hash(const struct net_device *dev,
  2413. const struct net_device *sb_dev,
  2414. struct sk_buff *skb)
  2415. {
  2416. u32 hash;
  2417. u16 qoffset = 0;
  2418. u16 qcount = dev->real_num_tx_queues;
  2419. if (dev->num_tc) {
  2420. u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
  2421. qoffset = sb_dev->tc_to_txq[tc].offset;
  2422. qcount = sb_dev->tc_to_txq[tc].count;
  2423. }
  2424. if (skb_rx_queue_recorded(skb)) {
  2425. hash = skb_get_rx_queue(skb);
  2426. while (unlikely(hash >= qcount))
  2427. hash -= qcount;
  2428. return hash + qoffset;
  2429. }
  2430. return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
  2431. }
  2432. static void skb_warn_bad_offload(const struct sk_buff *skb)
  2433. {
  2434. static const netdev_features_t null_features;
  2435. struct net_device *dev = skb->dev;
  2436. const char *name = "";
  2437. if (!net_ratelimit())
  2438. return;
  2439. if (dev) {
  2440. if (dev->dev.parent)
  2441. name = dev_driver_string(dev->dev.parent);
  2442. else
  2443. name = netdev_name(dev);
  2444. }
  2445. WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
  2446. "gso_type=%d ip_summed=%d\n",
  2447. name, dev ? &dev->features : &null_features,
  2448. skb->sk ? &skb->sk->sk_route_caps : &null_features,
  2449. skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
  2450. skb_shinfo(skb)->gso_type, skb->ip_summed);
  2451. }
  2452. /*
  2453. * Invalidate hardware checksum when packet is to be mangled, and
  2454. * complete checksum manually on outgoing path.
  2455. */
  2456. int skb_checksum_help(struct sk_buff *skb)
  2457. {
  2458. __wsum csum;
  2459. int ret = 0, offset;
  2460. if (skb->ip_summed == CHECKSUM_COMPLETE)
  2461. goto out_set_summed;
  2462. if (unlikely(skb_shinfo(skb)->gso_size)) {
  2463. skb_warn_bad_offload(skb);
  2464. return -EINVAL;
  2465. }
  2466. /* Before computing a checksum, we should make sure no frag could
  2467. * be modified by an external entity : checksum could be wrong.
  2468. */
  2469. if (skb_has_shared_frag(skb)) {
  2470. ret = __skb_linearize(skb);
  2471. if (ret)
  2472. goto out;
  2473. }
  2474. offset = skb_checksum_start_offset(skb);
  2475. BUG_ON(offset >= skb_headlen(skb));
  2476. csum = skb_checksum(skb, offset, skb->len - offset, 0);
  2477. offset += skb->csum_offset;
  2478. BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
  2479. if (skb_cloned(skb) &&
  2480. !skb_clone_writable(skb, offset + sizeof(__sum16))) {
  2481. ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
  2482. if (ret)
  2483. goto out;
  2484. }
  2485. *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
  2486. out_set_summed:
  2487. skb->ip_summed = CHECKSUM_NONE;
  2488. out:
  2489. return ret;
  2490. }
  2491. EXPORT_SYMBOL(skb_checksum_help);
  2492. int skb_crc32c_csum_help(struct sk_buff *skb)
  2493. {
  2494. __le32 crc32c_csum;
  2495. int ret = 0, offset, start;
  2496. if (skb->ip_summed != CHECKSUM_PARTIAL)
  2497. goto out;
  2498. if (unlikely(skb_is_gso(skb)))
  2499. goto out;
  2500. /* Before computing a checksum, we should make sure no frag could
  2501. * be modified by an external entity : checksum could be wrong.
  2502. */
  2503. if (unlikely(skb_has_shared_frag(skb))) {
  2504. ret = __skb_linearize(skb);
  2505. if (ret)
  2506. goto out;
  2507. }
  2508. start = skb_checksum_start_offset(skb);
  2509. offset = start + offsetof(struct sctphdr, checksum);
  2510. if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
  2511. ret = -EINVAL;
  2512. goto out;
  2513. }
  2514. if (skb_cloned(skb) &&
  2515. !skb_clone_writable(skb, offset + sizeof(__le32))) {
  2516. ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
  2517. if (ret)
  2518. goto out;
  2519. }
  2520. crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start,
  2521. skb->len - start, ~(__u32)0,
  2522. crc32c_csum_stub));
  2523. *(__le32 *)(skb->data + offset) = crc32c_csum;
  2524. skb->ip_summed = CHECKSUM_NONE;
  2525. skb->csum_not_inet = 0;
  2526. out:
  2527. return ret;
  2528. }
  2529. __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
  2530. {
  2531. __be16 type = skb->protocol;
  2532. /* Tunnel gso handlers can set protocol to ethernet. */
  2533. if (type == htons(ETH_P_TEB)) {
  2534. struct ethhdr *eth;
  2535. if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
  2536. return 0;
  2537. eth = (struct ethhdr *)skb->data;
  2538. type = eth->h_proto;
  2539. }
  2540. return __vlan_get_protocol(skb, type, depth);
  2541. }
  2542. /**
  2543. * skb_mac_gso_segment - mac layer segmentation handler.
  2544. * @skb: buffer to segment
  2545. * @features: features for the output path (see dev->features)
  2546. */
  2547. struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
  2548. netdev_features_t features)
  2549. {
  2550. struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
  2551. struct packet_offload *ptype;
  2552. int vlan_depth = skb->mac_len;
  2553. __be16 type = skb_network_protocol(skb, &vlan_depth);
  2554. if (unlikely(!type))
  2555. return ERR_PTR(-EINVAL);
  2556. __skb_pull(skb, vlan_depth);
  2557. rcu_read_lock();
  2558. list_for_each_entry_rcu(ptype, &offload_base, list) {
  2559. if (ptype->type == type && ptype->callbacks.gso_segment) {
  2560. segs = ptype->callbacks.gso_segment(skb, features);
  2561. break;
  2562. }
  2563. }
  2564. rcu_read_unlock();
  2565. __skb_push(skb, skb->data - skb_mac_header(skb));
  2566. return segs;
  2567. }
  2568. EXPORT_SYMBOL(skb_mac_gso_segment);
  2569. /* openvswitch calls this on rx path, so we need a different check.
  2570. */
  2571. static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
  2572. {
  2573. if (tx_path)
  2574. return skb->ip_summed != CHECKSUM_PARTIAL &&
  2575. skb->ip_summed != CHECKSUM_UNNECESSARY;
  2576. return skb->ip_summed == CHECKSUM_NONE;
  2577. }
  2578. /**
  2579. * __skb_gso_segment - Perform segmentation on skb.
  2580. * @skb: buffer to segment
  2581. * @features: features for the output path (see dev->features)
  2582. * @tx_path: whether it is called in TX path
  2583. *
  2584. * This function segments the given skb and returns a list of segments.
  2585. *
  2586. * It may return NULL if the skb requires no segmentation. This is
  2587. * only possible when GSO is used for verifying header integrity.
  2588. *
  2589. * Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb.
  2590. */
  2591. struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
  2592. netdev_features_t features, bool tx_path)
  2593. {
  2594. struct sk_buff *segs;
  2595. if (unlikely(skb_needs_check(skb, tx_path))) {
  2596. int err;
  2597. /* We're going to init ->check field in TCP or UDP header */
  2598. err = skb_cow_head(skb, 0);
  2599. if (err < 0)
  2600. return ERR_PTR(err);
  2601. }
  2602. /* Only report GSO partial support if it will enable us to
  2603. * support segmentation on this frame without needing additional
  2604. * work.
  2605. */
  2606. if (features & NETIF_F_GSO_PARTIAL) {
  2607. netdev_features_t partial_features = NETIF_F_GSO_ROBUST;
  2608. struct net_device *dev = skb->dev;
  2609. partial_features |= dev->features & dev->gso_partial_features;
  2610. if (!skb_gso_ok(skb, features | partial_features))
  2611. features &= ~NETIF_F_GSO_PARTIAL;
  2612. }
  2613. BUILD_BUG_ON(SKB_SGO_CB_OFFSET +
  2614. sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
  2615. SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
  2616. SKB_GSO_CB(skb)->encap_level = 0;
  2617. skb_reset_mac_header(skb);
  2618. skb_reset_mac_len(skb);
  2619. segs = skb_mac_gso_segment(skb, features);
  2620. if (unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs)))
  2621. skb_warn_bad_offload(skb);
  2622. return segs;
  2623. }
  2624. EXPORT_SYMBOL(__skb_gso_segment);
  2625. /* Take action when hardware reception checksum errors are detected. */
  2626. #ifdef CONFIG_BUG
  2627. void netdev_rx_csum_fault(struct net_device *dev)
  2628. {
  2629. if (net_ratelimit()) {
  2630. pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
  2631. dump_stack();
  2632. }
  2633. }
  2634. EXPORT_SYMBOL(netdev_rx_csum_fault);
  2635. #endif
  2636. /* XXX: check that highmem exists at all on the given machine. */
  2637. static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
  2638. {
  2639. #ifdef CONFIG_HIGHMEM
  2640. int i;
  2641. if (!(dev->features & NETIF_F_HIGHDMA)) {
  2642. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  2643. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  2644. if (PageHighMem(skb_frag_page(frag)))
  2645. return 1;
  2646. }
  2647. }
  2648. #endif
  2649. return 0;
  2650. }
  2651. /* If MPLS offload request, verify we are testing hardware MPLS features
  2652. * instead of standard features for the netdev.
  2653. */
  2654. #if IS_ENABLED(CONFIG_NET_MPLS_GSO)
  2655. static netdev_features_t net_mpls_features(struct sk_buff *skb,
  2656. netdev_features_t features,
  2657. __be16 type)
  2658. {
  2659. if (eth_p_mpls(type))
  2660. features &= skb->dev->mpls_features;
  2661. return features;
  2662. }
  2663. #else
  2664. static netdev_features_t net_mpls_features(struct sk_buff *skb,
  2665. netdev_features_t features,
  2666. __be16 type)
  2667. {
  2668. return features;
  2669. }
  2670. #endif
  2671. static netdev_features_t harmonize_features(struct sk_buff *skb,
  2672. netdev_features_t features)
  2673. {
  2674. int tmp;
  2675. __be16 type;
  2676. type = skb_network_protocol(skb, &tmp);
  2677. features = net_mpls_features(skb, features, type);
  2678. if (skb->ip_summed != CHECKSUM_NONE &&
  2679. !can_checksum_protocol(features, type)) {
  2680. features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
  2681. }
  2682. if (illegal_highdma(skb->dev, skb))
  2683. features &= ~NETIF_F_SG;
  2684. return features;
  2685. }
  2686. netdev_features_t passthru_features_check(struct sk_buff *skb,
  2687. struct net_device *dev,
  2688. netdev_features_t features)
  2689. {
  2690. return features;
  2691. }
  2692. EXPORT_SYMBOL(passthru_features_check);
  2693. static netdev_features_t dflt_features_check(struct sk_buff *skb,
  2694. struct net_device *dev,
  2695. netdev_features_t features)
  2696. {
  2697. return vlan_features_check(skb, features);
  2698. }
  2699. static netdev_features_t gso_features_check(const struct sk_buff *skb,
  2700. struct net_device *dev,
  2701. netdev_features_t features)
  2702. {
  2703. u16 gso_segs = skb_shinfo(skb)->gso_segs;
  2704. if (gso_segs > dev->gso_max_segs)
  2705. return features & ~NETIF_F_GSO_MASK;
  2706. /* Support for GSO partial features requires software
  2707. * intervention before we can actually process the packets
  2708. * so we need to strip support for any partial features now
  2709. * and we can pull them back in after we have partially
  2710. * segmented the frame.
  2711. */
  2712. if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
  2713. features &= ~dev->gso_partial_features;
  2714. /* Make sure to clear the IPv4 ID mangling feature if the
  2715. * IPv4 header has the potential to be fragmented.
  2716. */
  2717. if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
  2718. struct iphdr *iph = skb->encapsulation ?
  2719. inner_ip_hdr(skb) : ip_hdr(skb);
  2720. if (!(iph->frag_off & htons(IP_DF)))
  2721. features &= ~NETIF_F_TSO_MANGLEID;
  2722. }
  2723. return features;
  2724. }
  2725. netdev_features_t netif_skb_features(struct sk_buff *skb)
  2726. {
  2727. struct net_device *dev = skb->dev;
  2728. netdev_features_t features = dev->features;
  2729. if (skb_is_gso(skb))
  2730. features = gso_features_check(skb, dev, features);
  2731. /* If encapsulation offload request, verify we are testing
  2732. * hardware encapsulation features instead of standard
  2733. * features for the netdev
  2734. */
  2735. if (skb->encapsulation)
  2736. features &= dev->hw_enc_features;
  2737. if (skb_vlan_tagged(skb))
  2738. features = netdev_intersect_features(features,
  2739. dev->vlan_features |
  2740. NETIF_F_HW_VLAN_CTAG_TX |
  2741. NETIF_F_HW_VLAN_STAG_TX);
  2742. if (dev->netdev_ops->ndo_features_check)
  2743. features &= dev->netdev_ops->ndo_features_check(skb, dev,
  2744. features);
  2745. else
  2746. features &= dflt_features_check(skb, dev, features);
  2747. return harmonize_features(skb, features);
  2748. }
  2749. EXPORT_SYMBOL(netif_skb_features);
  2750. static int xmit_one(struct sk_buff *skb, struct net_device *dev,
  2751. struct netdev_queue *txq, bool more)
  2752. {
  2753. unsigned int len;
  2754. int rc;
  2755. if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
  2756. dev_queue_xmit_nit(skb, dev);
  2757. len = skb->len;
  2758. trace_net_dev_start_xmit(skb, dev);
  2759. rc = netdev_start_xmit(skb, dev, txq, more);
  2760. trace_net_dev_xmit(skb, rc, dev, len);
  2761. return rc;
  2762. }
  2763. struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
  2764. struct netdev_queue *txq, int *ret)
  2765. {
  2766. struct sk_buff *skb = first;
  2767. int rc = NETDEV_TX_OK;
  2768. while (skb) {
  2769. struct sk_buff *next = skb->next;
  2770. skb->next = NULL;
  2771. rc = xmit_one(skb, dev, txq, next != NULL);
  2772. if (unlikely(!dev_xmit_complete(rc))) {
  2773. skb->next = next;
  2774. goto out;
  2775. }
  2776. skb = next;
  2777. if (netif_xmit_stopped(txq) && skb) {
  2778. rc = NETDEV_TX_BUSY;
  2779. break;
  2780. }
  2781. }
  2782. out:
  2783. *ret = rc;
  2784. return skb;
  2785. }
  2786. static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
  2787. netdev_features_t features)
  2788. {
  2789. if (skb_vlan_tag_present(skb) &&
  2790. !vlan_hw_offload_capable(features, skb->vlan_proto))
  2791. skb = __vlan_hwaccel_push_inside(skb);
  2792. return skb;
  2793. }
  2794. int skb_csum_hwoffload_help(struct sk_buff *skb,
  2795. const netdev_features_t features)
  2796. {
  2797. if (unlikely(skb->csum_not_inet))
  2798. return !!(features & NETIF_F_SCTP_CRC) ? 0 :
  2799. skb_crc32c_csum_help(skb);
  2800. return !!(features & NETIF_F_CSUM_MASK) ? 0 : skb_checksum_help(skb);
  2801. }
  2802. EXPORT_SYMBOL(skb_csum_hwoffload_help);
  2803. static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
  2804. {
  2805. netdev_features_t features;
  2806. features = netif_skb_features(skb);
  2807. skb = validate_xmit_vlan(skb, features);
  2808. if (unlikely(!skb))
  2809. goto out_null;
  2810. skb = sk_validate_xmit_skb(skb, dev);
  2811. if (unlikely(!skb))
  2812. goto out_null;
  2813. if (netif_needs_gso(skb, features)) {
  2814. struct sk_buff *segs;
  2815. segs = skb_gso_segment(skb, features);
  2816. if (IS_ERR(segs)) {
  2817. goto out_kfree_skb;
  2818. } else if (segs) {
  2819. consume_skb(skb);
  2820. skb = segs;
  2821. }
  2822. } else {
  2823. if (skb_needs_linearize(skb, features) &&
  2824. __skb_linearize(skb))
  2825. goto out_kfree_skb;
  2826. /* If packet is not checksummed and device does not
  2827. * support checksumming for this protocol, complete
  2828. * checksumming here.
  2829. */
  2830. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  2831. if (skb->encapsulation)
  2832. skb_set_inner_transport_header(skb,
  2833. skb_checksum_start_offset(skb));
  2834. else
  2835. skb_set_transport_header(skb,
  2836. skb_checksum_start_offset(skb));
  2837. if (skb_csum_hwoffload_help(skb, features))
  2838. goto out_kfree_skb;
  2839. }
  2840. }
  2841. skb = validate_xmit_xfrm(skb, features, again);
  2842. return skb;
  2843. out_kfree_skb:
  2844. kfree_skb(skb);
  2845. out_null:
  2846. atomic_long_inc(&dev->tx_dropped);
  2847. return NULL;
  2848. }
  2849. struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
  2850. {
  2851. struct sk_buff *next, *head = NULL, *tail;
  2852. for (; skb != NULL; skb = next) {
  2853. next = skb->next;
  2854. skb->next = NULL;
  2855. /* in case skb wont be segmented, point to itself */
  2856. skb->prev = skb;
  2857. skb = validate_xmit_skb(skb, dev, again);
  2858. if (!skb)
  2859. continue;
  2860. if (!head)
  2861. head = skb;
  2862. else
  2863. tail->next = skb;
  2864. /* If skb was segmented, skb->prev points to
  2865. * the last segment. If not, it still contains skb.
  2866. */
  2867. tail = skb->prev;
  2868. }
  2869. return head;
  2870. }
  2871. EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
  2872. static void qdisc_pkt_len_init(struct sk_buff *skb)
  2873. {
  2874. const struct skb_shared_info *shinfo = skb_shinfo(skb);
  2875. qdisc_skb_cb(skb)->pkt_len = skb->len;
  2876. /* To get more precise estimation of bytes sent on wire,
  2877. * we add to pkt_len the headers size of all segments
  2878. */
  2879. if (shinfo->gso_size) {
  2880. unsigned int hdr_len;
  2881. u16 gso_segs = shinfo->gso_segs;
  2882. /* mac layer + network layer */
  2883. hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
  2884. /* + transport layer */
  2885. if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
  2886. const struct tcphdr *th;
  2887. struct tcphdr _tcphdr;
  2888. th = skb_header_pointer(skb, skb_transport_offset(skb),
  2889. sizeof(_tcphdr), &_tcphdr);
  2890. if (likely(th))
  2891. hdr_len += __tcp_hdrlen(th);
  2892. } else {
  2893. struct udphdr _udphdr;
  2894. if (skb_header_pointer(skb, skb_transport_offset(skb),
  2895. sizeof(_udphdr), &_udphdr))
  2896. hdr_len += sizeof(struct udphdr);
  2897. }
  2898. if (shinfo->gso_type & SKB_GSO_DODGY)
  2899. gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
  2900. shinfo->gso_size);
  2901. qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
  2902. }
  2903. }
  2904. static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
  2905. struct net_device *dev,
  2906. struct netdev_queue *txq)
  2907. {
  2908. spinlock_t *root_lock = qdisc_lock(q);
  2909. struct sk_buff *to_free = NULL;
  2910. bool contended;
  2911. int rc;
  2912. qdisc_calculate_pkt_len(skb, q);
  2913. if (q->flags & TCQ_F_NOLOCK) {
  2914. if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
  2915. __qdisc_drop(skb, &to_free);
  2916. rc = NET_XMIT_DROP;
  2917. } else {
  2918. rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
  2919. qdisc_run(q);
  2920. }
  2921. if (unlikely(to_free))
  2922. kfree_skb_list(to_free);
  2923. return rc;
  2924. }
  2925. /*
  2926. * Heuristic to force contended enqueues to serialize on a
  2927. * separate lock before trying to get qdisc main lock.
  2928. * This permits qdisc->running owner to get the lock more
  2929. * often and dequeue packets faster.
  2930. */
  2931. contended = qdisc_is_running(q);
  2932. if (unlikely(contended))
  2933. spin_lock(&q->busylock);
  2934. spin_lock(root_lock);
  2935. if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
  2936. __qdisc_drop(skb, &to_free);
  2937. rc = NET_XMIT_DROP;
  2938. } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
  2939. qdisc_run_begin(q)) {
  2940. /*
  2941. * This is a work-conserving queue; there are no old skbs
  2942. * waiting to be sent out; and the qdisc is not running -
  2943. * xmit the skb directly.
  2944. */
  2945. qdisc_bstats_update(q, skb);
  2946. if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
  2947. if (unlikely(contended)) {
  2948. spin_unlock(&q->busylock);
  2949. contended = false;
  2950. }
  2951. __qdisc_run(q);
  2952. }
  2953. qdisc_run_end(q);
  2954. rc = NET_XMIT_SUCCESS;
  2955. } else {
  2956. rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
  2957. if (qdisc_run_begin(q)) {
  2958. if (unlikely(contended)) {
  2959. spin_unlock(&q->busylock);
  2960. contended = false;
  2961. }
  2962. __qdisc_run(q);
  2963. qdisc_run_end(q);
  2964. }
  2965. }
  2966. spin_unlock(root_lock);
  2967. if (unlikely(to_free))
  2968. kfree_skb_list(to_free);
  2969. if (unlikely(contended))
  2970. spin_unlock(&q->busylock);
  2971. return rc;
  2972. }
  2973. #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
  2974. static void skb_update_prio(struct sk_buff *skb)
  2975. {
  2976. const struct netprio_map *map;
  2977. const struct sock *sk;
  2978. unsigned int prioidx;
  2979. if (skb->priority)
  2980. return;
  2981. map = rcu_dereference_bh(skb->dev->priomap);
  2982. if (!map)
  2983. return;
  2984. sk = skb_to_full_sk(skb);
  2985. if (!sk)
  2986. return;
  2987. prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data);
  2988. if (prioidx < map->priomap_len)
  2989. skb->priority = map->priomap[prioidx];
  2990. }
  2991. #else
  2992. #define skb_update_prio(skb)
  2993. #endif
  2994. DEFINE_PER_CPU(int, xmit_recursion);
  2995. EXPORT_SYMBOL(xmit_recursion);
  2996. /**
  2997. * dev_loopback_xmit - loop back @skb
  2998. * @net: network namespace this loopback is happening in
  2999. * @sk: sk needed to be a netfilter okfn
  3000. * @skb: buffer to transmit
  3001. */
  3002. int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
  3003. {
  3004. skb_reset_mac_header(skb);
  3005. __skb_pull(skb, skb_network_offset(skb));
  3006. skb->pkt_type = PACKET_LOOPBACK;
  3007. skb->ip_summed = CHECKSUM_UNNECESSARY;
  3008. WARN_ON(!skb_dst(skb));
  3009. skb_dst_force(skb);
  3010. netif_rx_ni(skb);
  3011. return 0;
  3012. }
  3013. EXPORT_SYMBOL(dev_loopback_xmit);
  3014. #ifdef CONFIG_NET_EGRESS
  3015. static struct sk_buff *
  3016. sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
  3017. {
  3018. struct mini_Qdisc *miniq = rcu_dereference_bh(dev->miniq_egress);
  3019. struct tcf_result cl_res;
  3020. if (!miniq)
  3021. return skb;
  3022. /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
  3023. mini_qdisc_bstats_cpu_update(miniq, skb);
  3024. switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) {
  3025. case TC_ACT_OK:
  3026. case TC_ACT_RECLASSIFY:
  3027. skb->tc_index = TC_H_MIN(cl_res.classid);
  3028. break;
  3029. case TC_ACT_SHOT:
  3030. mini_qdisc_qstats_cpu_drop(miniq);
  3031. *ret = NET_XMIT_DROP;
  3032. kfree_skb(skb);
  3033. return NULL;
  3034. case TC_ACT_STOLEN:
  3035. case TC_ACT_QUEUED:
  3036. case TC_ACT_TRAP:
  3037. *ret = NET_XMIT_SUCCESS;
  3038. consume_skb(skb);
  3039. return NULL;
  3040. case TC_ACT_REDIRECT:
  3041. /* No need to push/pop skb's mac_header here on egress! */
  3042. skb_do_redirect(skb);
  3043. *ret = NET_XMIT_SUCCESS;
  3044. return NULL;
  3045. default:
  3046. break;
  3047. }
  3048. return skb;
  3049. }
  3050. #endif /* CONFIG_NET_EGRESS */
  3051. #ifdef CONFIG_XPS
  3052. static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb,
  3053. struct xps_dev_maps *dev_maps, unsigned int tci)
  3054. {
  3055. struct xps_map *map;
  3056. int queue_index = -1;
  3057. if (dev->num_tc) {
  3058. tci *= dev->num_tc;
  3059. tci += netdev_get_prio_tc_map(dev, skb->priority);
  3060. }
  3061. map = rcu_dereference(dev_maps->attr_map[tci]);
  3062. if (map) {
  3063. if (map->len == 1)
  3064. queue_index = map->queues[0];
  3065. else
  3066. queue_index = map->queues[reciprocal_scale(
  3067. skb_get_hash(skb), map->len)];
  3068. if (unlikely(queue_index >= dev->real_num_tx_queues))
  3069. queue_index = -1;
  3070. }
  3071. return queue_index;
  3072. }
  3073. #endif
  3074. static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev,
  3075. struct sk_buff *skb)
  3076. {
  3077. #ifdef CONFIG_XPS
  3078. struct xps_dev_maps *dev_maps;
  3079. struct sock *sk = skb->sk;
  3080. int queue_index = -1;
  3081. if (!static_key_false(&xps_needed))
  3082. return -1;
  3083. rcu_read_lock();
  3084. if (!static_key_false(&xps_rxqs_needed))
  3085. goto get_cpus_map;
  3086. dev_maps = rcu_dereference(sb_dev->xps_rxqs_map);
  3087. if (dev_maps) {
  3088. int tci = sk_rx_queue_get(sk);
  3089. if (tci >= 0 && tci < dev->num_rx_queues)
  3090. queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
  3091. tci);
  3092. }
  3093. get_cpus_map:
  3094. if (queue_index < 0) {
  3095. dev_maps = rcu_dereference(sb_dev->xps_cpus_map);
  3096. if (dev_maps) {
  3097. unsigned int tci = skb->sender_cpu - 1;
  3098. queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
  3099. tci);
  3100. }
  3101. }
  3102. rcu_read_unlock();
  3103. return queue_index;
  3104. #else
  3105. return -1;
  3106. #endif
  3107. }
  3108. u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
  3109. struct net_device *sb_dev,
  3110. select_queue_fallback_t fallback)
  3111. {
  3112. return 0;
  3113. }
  3114. EXPORT_SYMBOL(dev_pick_tx_zero);
  3115. u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
  3116. struct net_device *sb_dev,
  3117. select_queue_fallback_t fallback)
  3118. {
  3119. return (u16)raw_smp_processor_id() % dev->real_num_tx_queues;
  3120. }
  3121. EXPORT_SYMBOL(dev_pick_tx_cpu_id);
  3122. static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
  3123. struct net_device *sb_dev)
  3124. {
  3125. struct sock *sk = skb->sk;
  3126. int queue_index = sk_tx_queue_get(sk);
  3127. sb_dev = sb_dev ? : dev;
  3128. if (queue_index < 0 || skb->ooo_okay ||
  3129. queue_index >= dev->real_num_tx_queues) {
  3130. int new_index = get_xps_queue(dev, sb_dev, skb);
  3131. if (new_index < 0)
  3132. new_index = skb_tx_hash(dev, sb_dev, skb);
  3133. if (queue_index != new_index && sk &&
  3134. sk_fullsock(sk) &&
  3135. rcu_access_pointer(sk->sk_dst_cache))
  3136. sk_tx_queue_set(sk, new_index);
  3137. queue_index = new_index;
  3138. }
  3139. return queue_index;
  3140. }
  3141. struct netdev_queue *netdev_pick_tx(struct net_device *dev,
  3142. struct sk_buff *skb,
  3143. struct net_device *sb_dev)
  3144. {
  3145. int queue_index = 0;
  3146. #ifdef CONFIG_XPS
  3147. u32 sender_cpu = skb->sender_cpu - 1;
  3148. if (sender_cpu >= (u32)NR_CPUS)
  3149. skb->sender_cpu = raw_smp_processor_id() + 1;
  3150. #endif
  3151. if (dev->real_num_tx_queues != 1) {
  3152. const struct net_device_ops *ops = dev->netdev_ops;
  3153. if (ops->ndo_select_queue)
  3154. queue_index = ops->ndo_select_queue(dev, skb, sb_dev,
  3155. __netdev_pick_tx);
  3156. else
  3157. queue_index = __netdev_pick_tx(dev, skb, sb_dev);
  3158. queue_index = netdev_cap_txqueue(dev, queue_index);
  3159. }
  3160. skb_set_queue_mapping(skb, queue_index);
  3161. return netdev_get_tx_queue(dev, queue_index);
  3162. }
  3163. /**
  3164. * __dev_queue_xmit - transmit a buffer
  3165. * @skb: buffer to transmit
  3166. * @sb_dev: suboordinate device used for L2 forwarding offload
  3167. *
  3168. * Queue a buffer for transmission to a network device. The caller must
  3169. * have set the device and priority and built the buffer before calling
  3170. * this function. The function can be called from an interrupt.
  3171. *
  3172. * A negative errno code is returned on a failure. A success does not
  3173. * guarantee the frame will be transmitted as it may be dropped due
  3174. * to congestion or traffic shaping.
  3175. *
  3176. * -----------------------------------------------------------------------------------
  3177. * I notice this method can also return errors from the queue disciplines,
  3178. * including NET_XMIT_DROP, which is a positive value. So, errors can also
  3179. * be positive.
  3180. *
  3181. * Regardless of the return value, the skb is consumed, so it is currently
  3182. * difficult to retry a send to this method. (You can bump the ref count
  3183. * before sending to hold a reference for retry if you are careful.)
  3184. *
  3185. * When calling this method, interrupts MUST be enabled. This is because
  3186. * the BH enable code must have IRQs enabled so that it will not deadlock.
  3187. * --BLG
  3188. */
  3189. static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
  3190. {
  3191. struct net_device *dev = skb->dev;
  3192. struct netdev_queue *txq;
  3193. struct Qdisc *q;
  3194. int rc = -ENOMEM;
  3195. bool again = false;
  3196. skb_reset_mac_header(skb);
  3197. if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
  3198. __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
  3199. /* Disable soft irqs for various locks below. Also
  3200. * stops preemption for RCU.
  3201. */
  3202. rcu_read_lock_bh();
  3203. skb_update_prio(skb);
  3204. qdisc_pkt_len_init(skb);
  3205. #ifdef CONFIG_NET_CLS_ACT
  3206. skb->tc_at_ingress = 0;
  3207. # ifdef CONFIG_NET_EGRESS
  3208. if (static_branch_unlikely(&egress_needed_key)) {
  3209. skb = sch_handle_egress(skb, &rc, dev);
  3210. if (!skb)
  3211. goto out;
  3212. }
  3213. # endif
  3214. #endif
  3215. /* If device/qdisc don't need skb->dst, release it right now while
  3216. * its hot in this cpu cache.
  3217. */
  3218. if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
  3219. skb_dst_drop(skb);
  3220. else
  3221. skb_dst_force(skb);
  3222. txq = netdev_pick_tx(dev, skb, sb_dev);
  3223. q = rcu_dereference_bh(txq->qdisc);
  3224. trace_net_dev_queue(skb);
  3225. if (q->enqueue) {
  3226. rc = __dev_xmit_skb(skb, q, dev, txq);
  3227. goto out;
  3228. }
  3229. /* The device has no queue. Common case for software devices:
  3230. * loopback, all the sorts of tunnels...
  3231. * Really, it is unlikely that netif_tx_lock protection is necessary
  3232. * here. (f.e. loopback and IP tunnels are clean ignoring statistics
  3233. * counters.)
  3234. * However, it is possible, that they rely on protection
  3235. * made by us here.
  3236. * Check this and shot the lock. It is not prone from deadlocks.
  3237. *Either shot noqueue qdisc, it is even simpler 8)
  3238. */
  3239. if (dev->flags & IFF_UP) {
  3240. int cpu = smp_processor_id(); /* ok because BHs are off */
  3241. if (txq->xmit_lock_owner != cpu) {
  3242. if (unlikely(__this_cpu_read(xmit_recursion) >
  3243. XMIT_RECURSION_LIMIT))
  3244. goto recursion_alert;
  3245. skb = validate_xmit_skb(skb, dev, &again);
  3246. if (!skb)
  3247. goto out;
  3248. HARD_TX_LOCK(dev, txq, cpu);
  3249. if (!netif_xmit_stopped(txq)) {
  3250. __this_cpu_inc(xmit_recursion);
  3251. skb = dev_hard_start_xmit(skb, dev, txq, &rc);
  3252. __this_cpu_dec(xmit_recursion);
  3253. if (dev_xmit_complete(rc)) {
  3254. HARD_TX_UNLOCK(dev, txq);
  3255. goto out;
  3256. }
  3257. }
  3258. HARD_TX_UNLOCK(dev, txq);
  3259. net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
  3260. dev->name);
  3261. } else {
  3262. /* Recursion is detected! It is possible,
  3263. * unfortunately
  3264. */
  3265. recursion_alert:
  3266. net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
  3267. dev->name);
  3268. }
  3269. }
  3270. rc = -ENETDOWN;
  3271. rcu_read_unlock_bh();
  3272. atomic_long_inc(&dev->tx_dropped);
  3273. kfree_skb_list(skb);
  3274. return rc;
  3275. out:
  3276. rcu_read_unlock_bh();
  3277. return rc;
  3278. }
  3279. int dev_queue_xmit(struct sk_buff *skb)
  3280. {
  3281. return __dev_queue_xmit(skb, NULL);
  3282. }
  3283. EXPORT_SYMBOL(dev_queue_xmit);
  3284. int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev)
  3285. {
  3286. return __dev_queue_xmit(skb, sb_dev);
  3287. }
  3288. EXPORT_SYMBOL(dev_queue_xmit_accel);
  3289. int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
  3290. {
  3291. struct net_device *dev = skb->dev;
  3292. struct sk_buff *orig_skb = skb;
  3293. struct netdev_queue *txq;
  3294. int ret = NETDEV_TX_BUSY;
  3295. bool again = false;
  3296. if (unlikely(!netif_running(dev) ||
  3297. !netif_carrier_ok(dev)))
  3298. goto drop;
  3299. skb = validate_xmit_skb_list(skb, dev, &again);
  3300. if (skb != orig_skb)
  3301. goto drop;
  3302. skb_set_queue_mapping(skb, queue_id);
  3303. txq = skb_get_tx_queue(dev, skb);
  3304. local_bh_disable();
  3305. HARD_TX_LOCK(dev, txq, smp_processor_id());
  3306. if (!netif_xmit_frozen_or_drv_stopped(txq))
  3307. ret = netdev_start_xmit(skb, dev, txq, false);
  3308. HARD_TX_UNLOCK(dev, txq);
  3309. local_bh_enable();
  3310. if (!dev_xmit_complete(ret))
  3311. kfree_skb(skb);
  3312. return ret;
  3313. drop:
  3314. atomic_long_inc(&dev->tx_dropped);
  3315. kfree_skb_list(skb);
  3316. return NET_XMIT_DROP;
  3317. }
  3318. EXPORT_SYMBOL(dev_direct_xmit);
  3319. /*************************************************************************
  3320. * Receiver routines
  3321. *************************************************************************/
  3322. int netdev_max_backlog __read_mostly = 1000;
  3323. EXPORT_SYMBOL(netdev_max_backlog);
  3324. int netdev_tstamp_prequeue __read_mostly = 1;
  3325. int netdev_budget __read_mostly = 300;
  3326. unsigned int __read_mostly netdev_budget_usecs = 2000;
  3327. int weight_p __read_mostly = 64; /* old backlog weight */
  3328. int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */
  3329. int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */
  3330. int dev_rx_weight __read_mostly = 64;
  3331. int dev_tx_weight __read_mostly = 64;
  3332. /* Called with irq disabled */
  3333. static inline void ____napi_schedule(struct softnet_data *sd,
  3334. struct napi_struct *napi)
  3335. {
  3336. list_add_tail(&napi->poll_list, &sd->poll_list);
  3337. __raise_softirq_irqoff(NET_RX_SOFTIRQ);
  3338. }
  3339. #ifdef CONFIG_RPS
  3340. /* One global table that all flow-based protocols share. */
  3341. struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
  3342. EXPORT_SYMBOL(rps_sock_flow_table);
  3343. u32 rps_cpu_mask __read_mostly;
  3344. EXPORT_SYMBOL(rps_cpu_mask);
  3345. struct static_key rps_needed __read_mostly;
  3346. EXPORT_SYMBOL(rps_needed);
  3347. struct static_key rfs_needed __read_mostly;
  3348. EXPORT_SYMBOL(rfs_needed);
  3349. static struct rps_dev_flow *
  3350. set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
  3351. struct rps_dev_flow *rflow, u16 next_cpu)
  3352. {
  3353. if (next_cpu < nr_cpu_ids) {
  3354. #ifdef CONFIG_RFS_ACCEL
  3355. struct netdev_rx_queue *rxqueue;
  3356. struct rps_dev_flow_table *flow_table;
  3357. struct rps_dev_flow *old_rflow;
  3358. u32 flow_id;
  3359. u16 rxq_index;
  3360. int rc;
  3361. /* Should we steer this flow to a different hardware queue? */
  3362. if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
  3363. !(dev->features & NETIF_F_NTUPLE))
  3364. goto out;
  3365. rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
  3366. if (rxq_index == skb_get_rx_queue(skb))
  3367. goto out;
  3368. rxqueue = dev->_rx + rxq_index;
  3369. flow_table = rcu_dereference(rxqueue->rps_flow_table);
  3370. if (!flow_table)
  3371. goto out;
  3372. flow_id = skb_get_hash(skb) & flow_table->mask;
  3373. rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
  3374. rxq_index, flow_id);
  3375. if (rc < 0)
  3376. goto out;
  3377. old_rflow = rflow;
  3378. rflow = &flow_table->flows[flow_id];
  3379. rflow->filter = rc;
  3380. if (old_rflow->filter == rflow->filter)
  3381. old_rflow->filter = RPS_NO_FILTER;
  3382. out:
  3383. #endif
  3384. rflow->last_qtail =
  3385. per_cpu(softnet_data, next_cpu).input_queue_head;
  3386. }
  3387. rflow->cpu = next_cpu;
  3388. return rflow;
  3389. }
  3390. /*
  3391. * get_rps_cpu is called from netif_receive_skb and returns the target
  3392. * CPU from the RPS map of the receiving queue for a given skb.
  3393. * rcu_read_lock must be held on entry.
  3394. */
  3395. static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
  3396. struct rps_dev_flow **rflowp)
  3397. {
  3398. const struct rps_sock_flow_table *sock_flow_table;
  3399. struct netdev_rx_queue *rxqueue = dev->_rx;
  3400. struct rps_dev_flow_table *flow_table;
  3401. struct rps_map *map;
  3402. int cpu = -1;
  3403. u32 tcpu;
  3404. u32 hash;
  3405. if (skb_rx_queue_recorded(skb)) {
  3406. u16 index = skb_get_rx_queue(skb);
  3407. if (unlikely(index >= dev->real_num_rx_queues)) {
  3408. WARN_ONCE(dev->real_num_rx_queues > 1,
  3409. "%s received packet on queue %u, but number "
  3410. "of RX queues is %u\n",
  3411. dev->name, index, dev->real_num_rx_queues);
  3412. goto done;
  3413. }
  3414. rxqueue += index;
  3415. }
  3416. /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
  3417. flow_table = rcu_dereference(rxqueue->rps_flow_table);
  3418. map = rcu_dereference(rxqueue->rps_map);
  3419. if (!flow_table && !map)
  3420. goto done;
  3421. skb_reset_network_header(skb);
  3422. hash = skb_get_hash(skb);
  3423. if (!hash)
  3424. goto done;
  3425. sock_flow_table = rcu_dereference(rps_sock_flow_table);
  3426. if (flow_table && sock_flow_table) {
  3427. struct rps_dev_flow *rflow;
  3428. u32 next_cpu;
  3429. u32 ident;
  3430. /* First check into global flow table if there is a match */
  3431. ident = sock_flow_table->ents[hash & sock_flow_table->mask];
  3432. if ((ident ^ hash) & ~rps_cpu_mask)
  3433. goto try_rps;
  3434. next_cpu = ident & rps_cpu_mask;
  3435. /* OK, now we know there is a match,
  3436. * we can look at the local (per receive queue) flow table
  3437. */
  3438. rflow = &flow_table->flows[hash & flow_table->mask];
  3439. tcpu = rflow->cpu;
  3440. /*
  3441. * If the desired CPU (where last recvmsg was done) is
  3442. * different from current CPU (one in the rx-queue flow
  3443. * table entry), switch if one of the following holds:
  3444. * - Current CPU is unset (>= nr_cpu_ids).
  3445. * - Current CPU is offline.
  3446. * - The current CPU's queue tail has advanced beyond the
  3447. * last packet that was enqueued using this table entry.
  3448. * This guarantees that all previous packets for the flow
  3449. * have been dequeued, thus preserving in order delivery.
  3450. */
  3451. if (unlikely(tcpu != next_cpu) &&
  3452. (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
  3453. ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
  3454. rflow->last_qtail)) >= 0)) {
  3455. tcpu = next_cpu;
  3456. rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
  3457. }
  3458. if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
  3459. *rflowp = rflow;
  3460. cpu = tcpu;
  3461. goto done;
  3462. }
  3463. }
  3464. try_rps:
  3465. if (map) {
  3466. tcpu = map->cpus[reciprocal_scale(hash, map->len)];
  3467. if (cpu_online(tcpu)) {
  3468. cpu = tcpu;
  3469. goto done;
  3470. }
  3471. }
  3472. done:
  3473. return cpu;
  3474. }
  3475. #ifdef CONFIG_RFS_ACCEL
  3476. /**
  3477. * rps_may_expire_flow - check whether an RFS hardware filter may be removed
  3478. * @dev: Device on which the filter was set
  3479. * @rxq_index: RX queue index
  3480. * @flow_id: Flow ID passed to ndo_rx_flow_steer()
  3481. * @filter_id: Filter ID returned by ndo_rx_flow_steer()
  3482. *
  3483. * Drivers that implement ndo_rx_flow_steer() should periodically call
  3484. * this function for each installed filter and remove the filters for
  3485. * which it returns %true.
  3486. */
  3487. bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
  3488. u32 flow_id, u16 filter_id)
  3489. {
  3490. struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
  3491. struct rps_dev_flow_table *flow_table;
  3492. struct rps_dev_flow *rflow;
  3493. bool expire = true;
  3494. unsigned int cpu;
  3495. rcu_read_lock();
  3496. flow_table = rcu_dereference(rxqueue->rps_flow_table);
  3497. if (flow_table && flow_id <= flow_table->mask) {
  3498. rflow = &flow_table->flows[flow_id];
  3499. cpu = READ_ONCE(rflow->cpu);
  3500. if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
  3501. ((int)(per_cpu(softnet_data, cpu).input_queue_head -
  3502. rflow->last_qtail) <
  3503. (int)(10 * flow_table->mask)))
  3504. expire = false;
  3505. }
  3506. rcu_read_unlock();
  3507. return expire;
  3508. }
  3509. EXPORT_SYMBOL(rps_may_expire_flow);
  3510. #endif /* CONFIG_RFS_ACCEL */
  3511. /* Called from hardirq (IPI) context */
  3512. static void rps_trigger_softirq(void *data)
  3513. {
  3514. struct softnet_data *sd = data;
  3515. ____napi_schedule(sd, &sd->backlog);
  3516. sd->received_rps++;
  3517. }
  3518. #endif /* CONFIG_RPS */
  3519. /*
  3520. * Check if this softnet_data structure is another cpu one
  3521. * If yes, queue it to our IPI list and return 1
  3522. * If no, return 0
  3523. */
  3524. static int rps_ipi_queued(struct softnet_data *sd)
  3525. {
  3526. #ifdef CONFIG_RPS
  3527. struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
  3528. if (sd != mysd) {
  3529. sd->rps_ipi_next = mysd->rps_ipi_list;
  3530. mysd->rps_ipi_list = sd;
  3531. __raise_softirq_irqoff(NET_RX_SOFTIRQ);
  3532. return 1;
  3533. }
  3534. #endif /* CONFIG_RPS */
  3535. return 0;
  3536. }
  3537. #ifdef CONFIG_NET_FLOW_LIMIT
  3538. int netdev_flow_limit_table_len __read_mostly = (1 << 12);
  3539. #endif
  3540. static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
  3541. {
  3542. #ifdef CONFIG_NET_FLOW_LIMIT
  3543. struct sd_flow_limit *fl;
  3544. struct softnet_data *sd;
  3545. unsigned int old_flow, new_flow;
  3546. if (qlen < (netdev_max_backlog >> 1))
  3547. return false;
  3548. sd = this_cpu_ptr(&softnet_data);
  3549. rcu_read_lock();
  3550. fl = rcu_dereference(sd->flow_limit);
  3551. if (fl) {
  3552. new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
  3553. old_flow = fl->history[fl->history_head];
  3554. fl->history[fl->history_head] = new_flow;
  3555. fl->history_head++;
  3556. fl->history_head &= FLOW_LIMIT_HISTORY - 1;
  3557. if (likely(fl->buckets[old_flow]))
  3558. fl->buckets[old_flow]--;
  3559. if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
  3560. fl->count++;
  3561. rcu_read_unlock();
  3562. return true;
  3563. }
  3564. }
  3565. rcu_read_unlock();
  3566. #endif
  3567. return false;
  3568. }
  3569. /*
  3570. * enqueue_to_backlog is called to queue an skb to a per CPU backlog
  3571. * queue (may be a remote CPU queue).
  3572. */
  3573. static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
  3574. unsigned int *qtail)
  3575. {
  3576. struct softnet_data *sd;
  3577. unsigned long flags;
  3578. unsigned int qlen;
  3579. sd = &per_cpu(softnet_data, cpu);
  3580. local_irq_save(flags);
  3581. rps_lock(sd);
  3582. if (!netif_running(skb->dev))
  3583. goto drop;
  3584. qlen = skb_queue_len(&sd->input_pkt_queue);
  3585. if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
  3586. if (qlen) {
  3587. enqueue:
  3588. __skb_queue_tail(&sd->input_pkt_queue, skb);
  3589. input_queue_tail_incr_save(sd, qtail);
  3590. rps_unlock(sd);
  3591. local_irq_restore(flags);
  3592. return NET_RX_SUCCESS;
  3593. }
  3594. /* Schedule NAPI for backlog device
  3595. * We can use non atomic operation since we own the queue lock
  3596. */
  3597. if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
  3598. if (!rps_ipi_queued(sd))
  3599. ____napi_schedule(sd, &sd->backlog);
  3600. }
  3601. goto enqueue;
  3602. }
  3603. drop:
  3604. sd->dropped++;
  3605. rps_unlock(sd);
  3606. local_irq_restore(flags);
  3607. atomic_long_inc(&skb->dev->rx_dropped);
  3608. kfree_skb(skb);
  3609. return NET_RX_DROP;
  3610. }
  3611. static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb)
  3612. {
  3613. struct net_device *dev = skb->dev;
  3614. struct netdev_rx_queue *rxqueue;
  3615. rxqueue = dev->_rx;
  3616. if (skb_rx_queue_recorded(skb)) {
  3617. u16 index = skb_get_rx_queue(skb);
  3618. if (unlikely(index >= dev->real_num_rx_queues)) {
  3619. WARN_ONCE(dev->real_num_rx_queues > 1,
  3620. "%s received packet on queue %u, but number "
  3621. "of RX queues is %u\n",
  3622. dev->name, index, dev->real_num_rx_queues);
  3623. return rxqueue; /* Return first rxqueue */
  3624. }
  3625. rxqueue += index;
  3626. }
  3627. return rxqueue;
  3628. }
  3629. static u32 netif_receive_generic_xdp(struct sk_buff *skb,
  3630. struct xdp_buff *xdp,
  3631. struct bpf_prog *xdp_prog)
  3632. {
  3633. struct netdev_rx_queue *rxqueue;
  3634. void *orig_data, *orig_data_end;
  3635. u32 metalen, act = XDP_DROP;
  3636. int hlen, off;
  3637. u32 mac_len;
  3638. /* Reinjected packets coming from act_mirred or similar should
  3639. * not get XDP generic processing.
  3640. */
  3641. if (skb_cloned(skb) || skb_is_tc_redirected(skb))
  3642. return XDP_PASS;
  3643. /* XDP packets must be linear and must have sufficient headroom
  3644. * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
  3645. * native XDP provides, thus we need to do it here as well.
  3646. */
  3647. if (skb_is_nonlinear(skb) ||
  3648. skb_headroom(skb) < XDP_PACKET_HEADROOM) {
  3649. int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
  3650. int troom = skb->tail + skb->data_len - skb->end;
  3651. /* In case we have to go down the path and also linearize,
  3652. * then lets do the pskb_expand_head() work just once here.
  3653. */
  3654. if (pskb_expand_head(skb,
  3655. hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
  3656. troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
  3657. goto do_drop;
  3658. if (skb_linearize(skb))
  3659. goto do_drop;
  3660. }
  3661. /* The XDP program wants to see the packet starting at the MAC
  3662. * header.
  3663. */
  3664. mac_len = skb->data - skb_mac_header(skb);
  3665. hlen = skb_headlen(skb) + mac_len;
  3666. xdp->data = skb->data - mac_len;
  3667. xdp->data_meta = xdp->data;
  3668. xdp->data_end = xdp->data + hlen;
  3669. xdp->data_hard_start = skb->data - skb_headroom(skb);
  3670. orig_data_end = xdp->data_end;
  3671. orig_data = xdp->data;
  3672. rxqueue = netif_get_rxqueue(skb);
  3673. xdp->rxq = &rxqueue->xdp_rxq;
  3674. act = bpf_prog_run_xdp(xdp_prog, xdp);
  3675. off = xdp->data - orig_data;
  3676. if (off > 0)
  3677. __skb_pull(skb, off);
  3678. else if (off < 0)
  3679. __skb_push(skb, -off);
  3680. skb->mac_header += off;
  3681. /* check if bpf_xdp_adjust_tail was used. it can only "shrink"
  3682. * pckt.
  3683. */
  3684. off = orig_data_end - xdp->data_end;
  3685. if (off != 0) {
  3686. skb_set_tail_pointer(skb, xdp->data_end - xdp->data);
  3687. skb->len -= off;
  3688. }
  3689. switch (act) {
  3690. case XDP_REDIRECT:
  3691. case XDP_TX:
  3692. __skb_push(skb, mac_len);
  3693. break;
  3694. case XDP_PASS:
  3695. metalen = xdp->data - xdp->data_meta;
  3696. if (metalen)
  3697. skb_metadata_set(skb, metalen);
  3698. break;
  3699. default:
  3700. bpf_warn_invalid_xdp_action(act);
  3701. /* fall through */
  3702. case XDP_ABORTED:
  3703. trace_xdp_exception(skb->dev, xdp_prog, act);
  3704. /* fall through */
  3705. case XDP_DROP:
  3706. do_drop:
  3707. kfree_skb(skb);
  3708. break;
  3709. }
  3710. return act;
  3711. }
  3712. /* When doing generic XDP we have to bypass the qdisc layer and the
  3713. * network taps in order to match in-driver-XDP behavior.
  3714. */
  3715. void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
  3716. {
  3717. struct net_device *dev = skb->dev;
  3718. struct netdev_queue *txq;
  3719. bool free_skb = true;
  3720. int cpu, rc;
  3721. txq = netdev_pick_tx(dev, skb, NULL);
  3722. cpu = smp_processor_id();
  3723. HARD_TX_LOCK(dev, txq, cpu);
  3724. if (!netif_xmit_stopped(txq)) {
  3725. rc = netdev_start_xmit(skb, dev, txq, 0);
  3726. if (dev_xmit_complete(rc))
  3727. free_skb = false;
  3728. }
  3729. HARD_TX_UNLOCK(dev, txq);
  3730. if (free_skb) {
  3731. trace_xdp_exception(dev, xdp_prog, XDP_TX);
  3732. kfree_skb(skb);
  3733. }
  3734. }
  3735. EXPORT_SYMBOL_GPL(generic_xdp_tx);
  3736. static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key);
  3737. int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
  3738. {
  3739. if (xdp_prog) {
  3740. struct xdp_buff xdp;
  3741. u32 act;
  3742. int err;
  3743. act = netif_receive_generic_xdp(skb, &xdp, xdp_prog);
  3744. if (act != XDP_PASS) {
  3745. switch (act) {
  3746. case XDP_REDIRECT:
  3747. err = xdp_do_generic_redirect(skb->dev, skb,
  3748. &xdp, xdp_prog);
  3749. if (err)
  3750. goto out_redir;
  3751. break;
  3752. case XDP_TX:
  3753. generic_xdp_tx(skb, xdp_prog);
  3754. break;
  3755. }
  3756. return XDP_DROP;
  3757. }
  3758. }
  3759. return XDP_PASS;
  3760. out_redir:
  3761. kfree_skb(skb);
  3762. return XDP_DROP;
  3763. }
  3764. EXPORT_SYMBOL_GPL(do_xdp_generic);
  3765. static int netif_rx_internal(struct sk_buff *skb)
  3766. {
  3767. int ret;
  3768. net_timestamp_check(netdev_tstamp_prequeue, skb);
  3769. trace_netif_rx(skb);
  3770. if (static_branch_unlikely(&generic_xdp_needed_key)) {
  3771. int ret;
  3772. preempt_disable();
  3773. rcu_read_lock();
  3774. ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
  3775. rcu_read_unlock();
  3776. preempt_enable();
  3777. /* Consider XDP consuming the packet a success from
  3778. * the netdev point of view we do not want to count
  3779. * this as an error.
  3780. */
  3781. if (ret != XDP_PASS)
  3782. return NET_RX_SUCCESS;
  3783. }
  3784. #ifdef CONFIG_RPS
  3785. if (static_key_false(&rps_needed)) {
  3786. struct rps_dev_flow voidflow, *rflow = &voidflow;
  3787. int cpu;
  3788. preempt_disable();
  3789. rcu_read_lock();
  3790. cpu = get_rps_cpu(skb->dev, skb, &rflow);
  3791. if (cpu < 0)
  3792. cpu = smp_processor_id();
  3793. ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
  3794. rcu_read_unlock();
  3795. preempt_enable();
  3796. } else
  3797. #endif
  3798. {
  3799. unsigned int qtail;
  3800. ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
  3801. put_cpu();
  3802. }
  3803. return ret;
  3804. }
  3805. /**
  3806. * netif_rx - post buffer to the network code
  3807. * @skb: buffer to post
  3808. *
  3809. * This function receives a packet from a device driver and queues it for
  3810. * the upper (protocol) levels to process. It always succeeds. The buffer
  3811. * may be dropped during processing for congestion control or by the
  3812. * protocol layers.
  3813. *
  3814. * return values:
  3815. * NET_RX_SUCCESS (no congestion)
  3816. * NET_RX_DROP (packet was dropped)
  3817. *
  3818. */
  3819. int netif_rx(struct sk_buff *skb)
  3820. {
  3821. trace_netif_rx_entry(skb);
  3822. return netif_rx_internal(skb);
  3823. }
  3824. EXPORT_SYMBOL(netif_rx);
  3825. int netif_rx_ni(struct sk_buff *skb)
  3826. {
  3827. int err;
  3828. trace_netif_rx_ni_entry(skb);
  3829. preempt_disable();
  3830. err = netif_rx_internal(skb);
  3831. if (local_softirq_pending())
  3832. do_softirq();
  3833. preempt_enable();
  3834. return err;
  3835. }
  3836. EXPORT_SYMBOL(netif_rx_ni);
  3837. static __latent_entropy void net_tx_action(struct softirq_action *h)
  3838. {
  3839. struct softnet_data *sd = this_cpu_ptr(&softnet_data);
  3840. if (sd->completion_queue) {
  3841. struct sk_buff *clist;
  3842. local_irq_disable();
  3843. clist = sd->completion_queue;
  3844. sd->completion_queue = NULL;
  3845. local_irq_enable();
  3846. while (clist) {
  3847. struct sk_buff *skb = clist;
  3848. clist = clist->next;
  3849. WARN_ON(refcount_read(&skb->users));
  3850. if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
  3851. trace_consume_skb(skb);
  3852. else
  3853. trace_kfree_skb(skb, net_tx_action);
  3854. if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
  3855. __kfree_skb(skb);
  3856. else
  3857. __kfree_skb_defer(skb);
  3858. }
  3859. __kfree_skb_flush();
  3860. }
  3861. if (sd->output_queue) {
  3862. struct Qdisc *head;
  3863. local_irq_disable();
  3864. head = sd->output_queue;
  3865. sd->output_queue = NULL;
  3866. sd->output_queue_tailp = &sd->output_queue;
  3867. local_irq_enable();
  3868. while (head) {
  3869. struct Qdisc *q = head;
  3870. spinlock_t *root_lock = NULL;
  3871. head = head->next_sched;
  3872. if (!(q->flags & TCQ_F_NOLOCK)) {
  3873. root_lock = qdisc_lock(q);
  3874. spin_lock(root_lock);
  3875. }
  3876. /* We need to make sure head->next_sched is read
  3877. * before clearing __QDISC_STATE_SCHED
  3878. */
  3879. smp_mb__before_atomic();
  3880. clear_bit(__QDISC_STATE_SCHED, &q->state);
  3881. qdisc_run(q);
  3882. if (root_lock)
  3883. spin_unlock(root_lock);
  3884. }
  3885. }
  3886. xfrm_dev_backlog(sd);
  3887. }
  3888. #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
  3889. /* This hook is defined here for ATM LANE */
  3890. int (*br_fdb_test_addr_hook)(struct net_device *dev,
  3891. unsigned char *addr) __read_mostly;
  3892. EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
  3893. #endif
  3894. static inline struct sk_buff *
  3895. sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
  3896. struct net_device *orig_dev)
  3897. {
  3898. #ifdef CONFIG_NET_CLS_ACT
  3899. struct mini_Qdisc *miniq = rcu_dereference_bh(skb->dev->miniq_ingress);
  3900. struct tcf_result cl_res;
  3901. /* If there's at least one ingress present somewhere (so
  3902. * we get here via enabled static key), remaining devices
  3903. * that are not configured with an ingress qdisc will bail
  3904. * out here.
  3905. */
  3906. if (!miniq)
  3907. return skb;
  3908. if (*pt_prev) {
  3909. *ret = deliver_skb(skb, *pt_prev, orig_dev);
  3910. *pt_prev = NULL;
  3911. }
  3912. qdisc_skb_cb(skb)->pkt_len = skb->len;
  3913. skb->tc_at_ingress = 1;
  3914. mini_qdisc_bstats_cpu_update(miniq, skb);
  3915. switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) {
  3916. case TC_ACT_OK:
  3917. case TC_ACT_RECLASSIFY:
  3918. skb->tc_index = TC_H_MIN(cl_res.classid);
  3919. break;
  3920. case TC_ACT_SHOT:
  3921. mini_qdisc_qstats_cpu_drop(miniq);
  3922. kfree_skb(skb);
  3923. return NULL;
  3924. case TC_ACT_STOLEN:
  3925. case TC_ACT_QUEUED:
  3926. case TC_ACT_TRAP:
  3927. consume_skb(skb);
  3928. return NULL;
  3929. case TC_ACT_REDIRECT:
  3930. /* skb_mac_header check was done by cls/act_bpf, so
  3931. * we can safely push the L2 header back before
  3932. * redirecting to another netdev
  3933. */
  3934. __skb_push(skb, skb->mac_len);
  3935. skb_do_redirect(skb);
  3936. return NULL;
  3937. case TC_ACT_REINSERT:
  3938. /* this does not scrub the packet, and updates stats on error */
  3939. skb_tc_reinsert(skb, &cl_res);
  3940. return NULL;
  3941. default:
  3942. break;
  3943. }
  3944. #endif /* CONFIG_NET_CLS_ACT */
  3945. return skb;
  3946. }
  3947. /**
  3948. * netdev_is_rx_handler_busy - check if receive handler is registered
  3949. * @dev: device to check
  3950. *
  3951. * Check if a receive handler is already registered for a given device.
  3952. * Return true if there one.
  3953. *
  3954. * The caller must hold the rtnl_mutex.
  3955. */
  3956. bool netdev_is_rx_handler_busy(struct net_device *dev)
  3957. {
  3958. ASSERT_RTNL();
  3959. return dev && rtnl_dereference(dev->rx_handler);
  3960. }
  3961. EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
  3962. /**
  3963. * netdev_rx_handler_register - register receive handler
  3964. * @dev: device to register a handler for
  3965. * @rx_handler: receive handler to register
  3966. * @rx_handler_data: data pointer that is used by rx handler
  3967. *
  3968. * Register a receive handler for a device. This handler will then be
  3969. * called from __netif_receive_skb. A negative errno code is returned
  3970. * on a failure.
  3971. *
  3972. * The caller must hold the rtnl_mutex.
  3973. *
  3974. * For a general description of rx_handler, see enum rx_handler_result.
  3975. */
  3976. int netdev_rx_handler_register(struct net_device *dev,
  3977. rx_handler_func_t *rx_handler,
  3978. void *rx_handler_data)
  3979. {
  3980. if (netdev_is_rx_handler_busy(dev))
  3981. return -EBUSY;
  3982. if (dev->priv_flags & IFF_NO_RX_HANDLER)
  3983. return -EINVAL;
  3984. /* Note: rx_handler_data must be set before rx_handler */
  3985. rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
  3986. rcu_assign_pointer(dev->rx_handler, rx_handler);
  3987. return 0;
  3988. }
  3989. EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
  3990. /**
  3991. * netdev_rx_handler_unregister - unregister receive handler
  3992. * @dev: device to unregister a handler from
  3993. *
  3994. * Unregister a receive handler from a device.
  3995. *
  3996. * The caller must hold the rtnl_mutex.
  3997. */
  3998. void netdev_rx_handler_unregister(struct net_device *dev)
  3999. {
  4000. ASSERT_RTNL();
  4001. RCU_INIT_POINTER(dev->rx_handler, NULL);
  4002. /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
  4003. * section has a guarantee to see a non NULL rx_handler_data
  4004. * as well.
  4005. */
  4006. synchronize_net();
  4007. RCU_INIT_POINTER(dev->rx_handler_data, NULL);
  4008. }
  4009. EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
  4010. /*
  4011. * Limit the use of PFMEMALLOC reserves to those protocols that implement
  4012. * the special handling of PFMEMALLOC skbs.
  4013. */
  4014. static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
  4015. {
  4016. switch (skb->protocol) {
  4017. case htons(ETH_P_ARP):
  4018. case htons(ETH_P_IP):
  4019. case htons(ETH_P_IPV6):
  4020. case htons(ETH_P_8021Q):
  4021. case htons(ETH_P_8021AD):
  4022. return true;
  4023. default:
  4024. return false;
  4025. }
  4026. }
  4027. static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
  4028. int *ret, struct net_device *orig_dev)
  4029. {
  4030. #ifdef CONFIG_NETFILTER_INGRESS
  4031. if (nf_hook_ingress_active(skb)) {
  4032. int ingress_retval;
  4033. if (*pt_prev) {
  4034. *ret = deliver_skb(skb, *pt_prev, orig_dev);
  4035. *pt_prev = NULL;
  4036. }
  4037. rcu_read_lock();
  4038. ingress_retval = nf_hook_ingress(skb);
  4039. rcu_read_unlock();
  4040. return ingress_retval;
  4041. }
  4042. #endif /* CONFIG_NETFILTER_INGRESS */
  4043. return 0;
  4044. }
  4045. static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc,
  4046. struct packet_type **ppt_prev)
  4047. {
  4048. struct packet_type *ptype, *pt_prev;
  4049. rx_handler_func_t *rx_handler;
  4050. struct net_device *orig_dev;
  4051. bool deliver_exact = false;
  4052. int ret = NET_RX_DROP;
  4053. __be16 type;
  4054. net_timestamp_check(!netdev_tstamp_prequeue, skb);
  4055. trace_netif_receive_skb(skb);
  4056. orig_dev = skb->dev;
  4057. skb_reset_network_header(skb);
  4058. if (!skb_transport_header_was_set(skb))
  4059. skb_reset_transport_header(skb);
  4060. skb_reset_mac_len(skb);
  4061. pt_prev = NULL;
  4062. another_round:
  4063. skb->skb_iif = skb->dev->ifindex;
  4064. __this_cpu_inc(softnet_data.processed);
  4065. if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
  4066. skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
  4067. skb = skb_vlan_untag(skb);
  4068. if (unlikely(!skb))
  4069. goto out;
  4070. }
  4071. if (skb_skip_tc_classify(skb))
  4072. goto skip_classify;
  4073. if (pfmemalloc)
  4074. goto skip_taps;
  4075. list_for_each_entry_rcu(ptype, &ptype_all, list) {
  4076. if (pt_prev)
  4077. ret = deliver_skb(skb, pt_prev, orig_dev);
  4078. pt_prev = ptype;
  4079. }
  4080. list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
  4081. if (pt_prev)
  4082. ret = deliver_skb(skb, pt_prev, orig_dev);
  4083. pt_prev = ptype;
  4084. }
  4085. skip_taps:
  4086. #ifdef CONFIG_NET_INGRESS
  4087. if (static_branch_unlikely(&ingress_needed_key)) {
  4088. skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev);
  4089. if (!skb)
  4090. goto out;
  4091. if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
  4092. goto out;
  4093. }
  4094. #endif
  4095. skb_reset_tc(skb);
  4096. skip_classify:
  4097. if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
  4098. goto drop;
  4099. if (skb_vlan_tag_present(skb)) {
  4100. if (pt_prev) {
  4101. ret = deliver_skb(skb, pt_prev, orig_dev);
  4102. pt_prev = NULL;
  4103. }
  4104. if (vlan_do_receive(&skb))
  4105. goto another_round;
  4106. else if (unlikely(!skb))
  4107. goto out;
  4108. }
  4109. rx_handler = rcu_dereference(skb->dev->rx_handler);
  4110. if (rx_handler) {
  4111. if (pt_prev) {
  4112. ret = deliver_skb(skb, pt_prev, orig_dev);
  4113. pt_prev = NULL;
  4114. }
  4115. switch (rx_handler(&skb)) {
  4116. case RX_HANDLER_CONSUMED:
  4117. ret = NET_RX_SUCCESS;
  4118. goto out;
  4119. case RX_HANDLER_ANOTHER:
  4120. goto another_round;
  4121. case RX_HANDLER_EXACT:
  4122. deliver_exact = true;
  4123. case RX_HANDLER_PASS:
  4124. break;
  4125. default:
  4126. BUG();
  4127. }
  4128. }
  4129. if (unlikely(skb_vlan_tag_present(skb))) {
  4130. if (skb_vlan_tag_get_id(skb))
  4131. skb->pkt_type = PACKET_OTHERHOST;
  4132. /* Note: we might in the future use prio bits
  4133. * and set skb->priority like in vlan_do_receive()
  4134. * For the time being, just ignore Priority Code Point
  4135. */
  4136. skb->vlan_tci = 0;
  4137. }
  4138. type = skb->protocol;
  4139. /* deliver only exact match when indicated */
  4140. if (likely(!deliver_exact)) {
  4141. deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
  4142. &ptype_base[ntohs(type) &
  4143. PTYPE_HASH_MASK]);
  4144. }
  4145. deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
  4146. &orig_dev->ptype_specific);
  4147. if (unlikely(skb->dev != orig_dev)) {
  4148. deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
  4149. &skb->dev->ptype_specific);
  4150. }
  4151. if (pt_prev) {
  4152. if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
  4153. goto drop;
  4154. *ppt_prev = pt_prev;
  4155. } else {
  4156. drop:
  4157. if (!deliver_exact)
  4158. atomic_long_inc(&skb->dev->rx_dropped);
  4159. else
  4160. atomic_long_inc(&skb->dev->rx_nohandler);
  4161. kfree_skb(skb);
  4162. /* Jamal, now you will not able to escape explaining
  4163. * me how you were going to use this. :-)
  4164. */
  4165. ret = NET_RX_DROP;
  4166. }
  4167. out:
  4168. return ret;
  4169. }
  4170. static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc)
  4171. {
  4172. struct net_device *orig_dev = skb->dev;
  4173. struct packet_type *pt_prev = NULL;
  4174. int ret;
  4175. ret = __netif_receive_skb_core(skb, pfmemalloc, &pt_prev);
  4176. if (pt_prev)
  4177. ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
  4178. return ret;
  4179. }
  4180. /**
  4181. * netif_receive_skb_core - special purpose version of netif_receive_skb
  4182. * @skb: buffer to process
  4183. *
  4184. * More direct receive version of netif_receive_skb(). It should
  4185. * only be used by callers that have a need to skip RPS and Generic XDP.
  4186. * Caller must also take care of handling if (page_is_)pfmemalloc.
  4187. *
  4188. * This function may only be called from softirq context and interrupts
  4189. * should be enabled.
  4190. *
  4191. * Return values (usually ignored):
  4192. * NET_RX_SUCCESS: no congestion
  4193. * NET_RX_DROP: packet was dropped
  4194. */
  4195. int netif_receive_skb_core(struct sk_buff *skb)
  4196. {
  4197. int ret;
  4198. rcu_read_lock();
  4199. ret = __netif_receive_skb_one_core(skb, false);
  4200. rcu_read_unlock();
  4201. return ret;
  4202. }
  4203. EXPORT_SYMBOL(netif_receive_skb_core);
  4204. static inline void __netif_receive_skb_list_ptype(struct list_head *head,
  4205. struct packet_type *pt_prev,
  4206. struct net_device *orig_dev)
  4207. {
  4208. struct sk_buff *skb, *next;
  4209. if (!pt_prev)
  4210. return;
  4211. if (list_empty(head))
  4212. return;
  4213. if (pt_prev->list_func != NULL)
  4214. pt_prev->list_func(head, pt_prev, orig_dev);
  4215. else
  4216. list_for_each_entry_safe(skb, next, head, list) {
  4217. skb_list_del_init(skb);
  4218. pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
  4219. }
  4220. }
  4221. static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
  4222. {
  4223. /* Fast-path assumptions:
  4224. * - There is no RX handler.
  4225. * - Only one packet_type matches.
  4226. * If either of these fails, we will end up doing some per-packet
  4227. * processing in-line, then handling the 'last ptype' for the whole
  4228. * sublist. This can't cause out-of-order delivery to any single ptype,
  4229. * because the 'last ptype' must be constant across the sublist, and all
  4230. * other ptypes are handled per-packet.
  4231. */
  4232. /* Current (common) ptype of sublist */
  4233. struct packet_type *pt_curr = NULL;
  4234. /* Current (common) orig_dev of sublist */
  4235. struct net_device *od_curr = NULL;
  4236. struct list_head sublist;
  4237. struct sk_buff *skb, *next;
  4238. INIT_LIST_HEAD(&sublist);
  4239. list_for_each_entry_safe(skb, next, head, list) {
  4240. struct net_device *orig_dev = skb->dev;
  4241. struct packet_type *pt_prev = NULL;
  4242. skb_list_del_init(skb);
  4243. __netif_receive_skb_core(skb, pfmemalloc, &pt_prev);
  4244. if (!pt_prev)
  4245. continue;
  4246. if (pt_curr != pt_prev || od_curr != orig_dev) {
  4247. /* dispatch old sublist */
  4248. __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
  4249. /* start new sublist */
  4250. INIT_LIST_HEAD(&sublist);
  4251. pt_curr = pt_prev;
  4252. od_curr = orig_dev;
  4253. }
  4254. list_add_tail(&skb->list, &sublist);
  4255. }
  4256. /* dispatch final sublist */
  4257. __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
  4258. }
  4259. static int __netif_receive_skb(struct sk_buff *skb)
  4260. {
  4261. int ret;
  4262. if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
  4263. unsigned int noreclaim_flag;
  4264. /*
  4265. * PFMEMALLOC skbs are special, they should
  4266. * - be delivered to SOCK_MEMALLOC sockets only
  4267. * - stay away from userspace
  4268. * - have bounded memory usage
  4269. *
  4270. * Use PF_MEMALLOC as this saves us from propagating the allocation
  4271. * context down to all allocation sites.
  4272. */
  4273. noreclaim_flag = memalloc_noreclaim_save();
  4274. ret = __netif_receive_skb_one_core(skb, true);
  4275. memalloc_noreclaim_restore(noreclaim_flag);
  4276. } else
  4277. ret = __netif_receive_skb_one_core(skb, false);
  4278. return ret;
  4279. }
  4280. static void __netif_receive_skb_list(struct list_head *head)
  4281. {
  4282. unsigned long noreclaim_flag = 0;
  4283. struct sk_buff *skb, *next;
  4284. bool pfmemalloc = false; /* Is current sublist PF_MEMALLOC? */
  4285. list_for_each_entry_safe(skb, next, head, list) {
  4286. if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) {
  4287. struct list_head sublist;
  4288. /* Handle the previous sublist */
  4289. list_cut_before(&sublist, head, &skb->list);
  4290. if (!list_empty(&sublist))
  4291. __netif_receive_skb_list_core(&sublist, pfmemalloc);
  4292. pfmemalloc = !pfmemalloc;
  4293. /* See comments in __netif_receive_skb */
  4294. if (pfmemalloc)
  4295. noreclaim_flag = memalloc_noreclaim_save();
  4296. else
  4297. memalloc_noreclaim_restore(noreclaim_flag);
  4298. }
  4299. }
  4300. /* Handle the remaining sublist */
  4301. if (!list_empty(head))
  4302. __netif_receive_skb_list_core(head, pfmemalloc);
  4303. /* Restore pflags */
  4304. if (pfmemalloc)
  4305. memalloc_noreclaim_restore(noreclaim_flag);
  4306. }
  4307. static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
  4308. {
  4309. struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
  4310. struct bpf_prog *new = xdp->prog;
  4311. int ret = 0;
  4312. switch (xdp->command) {
  4313. case XDP_SETUP_PROG:
  4314. rcu_assign_pointer(dev->xdp_prog, new);
  4315. if (old)
  4316. bpf_prog_put(old);
  4317. if (old && !new) {
  4318. static_branch_dec(&generic_xdp_needed_key);
  4319. } else if (new && !old) {
  4320. static_branch_inc(&generic_xdp_needed_key);
  4321. dev_disable_lro(dev);
  4322. dev_disable_gro_hw(dev);
  4323. }
  4324. break;
  4325. case XDP_QUERY_PROG:
  4326. xdp->prog_id = old ? old->aux->id : 0;
  4327. break;
  4328. default:
  4329. ret = -EINVAL;
  4330. break;
  4331. }
  4332. return ret;
  4333. }
  4334. static int netif_receive_skb_internal(struct sk_buff *skb)
  4335. {
  4336. int ret;
  4337. net_timestamp_check(netdev_tstamp_prequeue, skb);
  4338. if (skb_defer_rx_timestamp(skb))
  4339. return NET_RX_SUCCESS;
  4340. if (static_branch_unlikely(&generic_xdp_needed_key)) {
  4341. int ret;
  4342. preempt_disable();
  4343. rcu_read_lock();
  4344. ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
  4345. rcu_read_unlock();
  4346. preempt_enable();
  4347. if (ret != XDP_PASS)
  4348. return NET_RX_DROP;
  4349. }
  4350. rcu_read_lock();
  4351. #ifdef CONFIG_RPS
  4352. if (static_key_false(&rps_needed)) {
  4353. struct rps_dev_flow voidflow, *rflow = &voidflow;
  4354. int cpu = get_rps_cpu(skb->dev, skb, &rflow);
  4355. if (cpu >= 0) {
  4356. ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
  4357. rcu_read_unlock();
  4358. return ret;
  4359. }
  4360. }
  4361. #endif
  4362. ret = __netif_receive_skb(skb);
  4363. rcu_read_unlock();
  4364. return ret;
  4365. }
  4366. static void netif_receive_skb_list_internal(struct list_head *head)
  4367. {
  4368. struct bpf_prog *xdp_prog = NULL;
  4369. struct sk_buff *skb, *next;
  4370. struct list_head sublist;
  4371. INIT_LIST_HEAD(&sublist);
  4372. list_for_each_entry_safe(skb, next, head, list) {
  4373. net_timestamp_check(netdev_tstamp_prequeue, skb);
  4374. skb_list_del_init(skb);
  4375. if (!skb_defer_rx_timestamp(skb))
  4376. list_add_tail(&skb->list, &sublist);
  4377. }
  4378. list_splice_init(&sublist, head);
  4379. if (static_branch_unlikely(&generic_xdp_needed_key)) {
  4380. preempt_disable();
  4381. rcu_read_lock();
  4382. list_for_each_entry_safe(skb, next, head, list) {
  4383. xdp_prog = rcu_dereference(skb->dev->xdp_prog);
  4384. skb_list_del_init(skb);
  4385. if (do_xdp_generic(xdp_prog, skb) == XDP_PASS)
  4386. list_add_tail(&skb->list, &sublist);
  4387. }
  4388. rcu_read_unlock();
  4389. preempt_enable();
  4390. /* Put passed packets back on main list */
  4391. list_splice_init(&sublist, head);
  4392. }
  4393. rcu_read_lock();
  4394. #ifdef CONFIG_RPS
  4395. if (static_key_false(&rps_needed)) {
  4396. list_for_each_entry_safe(skb, next, head, list) {
  4397. struct rps_dev_flow voidflow, *rflow = &voidflow;
  4398. int cpu = get_rps_cpu(skb->dev, skb, &rflow);
  4399. if (cpu >= 0) {
  4400. /* Will be handled, remove from list */
  4401. skb_list_del_init(skb);
  4402. enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
  4403. }
  4404. }
  4405. }
  4406. #endif
  4407. __netif_receive_skb_list(head);
  4408. rcu_read_unlock();
  4409. }
  4410. /**
  4411. * netif_receive_skb - process receive buffer from network
  4412. * @skb: buffer to process
  4413. *
  4414. * netif_receive_skb() is the main receive data processing function.
  4415. * It always succeeds. The buffer may be dropped during processing
  4416. * for congestion control or by the protocol layers.
  4417. *
  4418. * This function may only be called from softirq context and interrupts
  4419. * should be enabled.
  4420. *
  4421. * Return values (usually ignored):
  4422. * NET_RX_SUCCESS: no congestion
  4423. * NET_RX_DROP: packet was dropped
  4424. */
  4425. int netif_receive_skb(struct sk_buff *skb)
  4426. {
  4427. trace_netif_receive_skb_entry(skb);
  4428. return netif_receive_skb_internal(skb);
  4429. }
  4430. EXPORT_SYMBOL(netif_receive_skb);
  4431. /**
  4432. * netif_receive_skb_list - process many receive buffers from network
  4433. * @head: list of skbs to process.
  4434. *
  4435. * Since return value of netif_receive_skb() is normally ignored, and
  4436. * wouldn't be meaningful for a list, this function returns void.
  4437. *
  4438. * This function may only be called from softirq context and interrupts
  4439. * should be enabled.
  4440. */
  4441. void netif_receive_skb_list(struct list_head *head)
  4442. {
  4443. struct sk_buff *skb;
  4444. if (list_empty(head))
  4445. return;
  4446. list_for_each_entry(skb, head, list)
  4447. trace_netif_receive_skb_list_entry(skb);
  4448. netif_receive_skb_list_internal(head);
  4449. }
  4450. EXPORT_SYMBOL(netif_receive_skb_list);
  4451. DEFINE_PER_CPU(struct work_struct, flush_works);
  4452. /* Network device is going away, flush any packets still pending */
  4453. static void flush_backlog(struct work_struct *work)
  4454. {
  4455. struct sk_buff *skb, *tmp;
  4456. struct softnet_data *sd;
  4457. local_bh_disable();
  4458. sd = this_cpu_ptr(&softnet_data);
  4459. local_irq_disable();
  4460. rps_lock(sd);
  4461. skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
  4462. if (skb->dev->reg_state == NETREG_UNREGISTERING) {
  4463. __skb_unlink(skb, &sd->input_pkt_queue);
  4464. kfree_skb(skb);
  4465. input_queue_head_incr(sd);
  4466. }
  4467. }
  4468. rps_unlock(sd);
  4469. local_irq_enable();
  4470. skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
  4471. if (skb->dev->reg_state == NETREG_UNREGISTERING) {
  4472. __skb_unlink(skb, &sd->process_queue);
  4473. kfree_skb(skb);
  4474. input_queue_head_incr(sd);
  4475. }
  4476. }
  4477. local_bh_enable();
  4478. }
  4479. static void flush_all_backlogs(void)
  4480. {
  4481. unsigned int cpu;
  4482. get_online_cpus();
  4483. for_each_online_cpu(cpu)
  4484. queue_work_on(cpu, system_highpri_wq,
  4485. per_cpu_ptr(&flush_works, cpu));
  4486. for_each_online_cpu(cpu)
  4487. flush_work(per_cpu_ptr(&flush_works, cpu));
  4488. put_online_cpus();
  4489. }
  4490. static int napi_gro_complete(struct sk_buff *skb)
  4491. {
  4492. struct packet_offload *ptype;
  4493. __be16 type = skb->protocol;
  4494. struct list_head *head = &offload_base;
  4495. int err = -ENOENT;
  4496. BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
  4497. if (NAPI_GRO_CB(skb)->count == 1) {
  4498. skb_shinfo(skb)->gso_size = 0;
  4499. goto out;
  4500. }
  4501. rcu_read_lock();
  4502. list_for_each_entry_rcu(ptype, head, list) {
  4503. if (ptype->type != type || !ptype->callbacks.gro_complete)
  4504. continue;
  4505. err = ptype->callbacks.gro_complete(skb, 0);
  4506. break;
  4507. }
  4508. rcu_read_unlock();
  4509. if (err) {
  4510. WARN_ON(&ptype->list == head);
  4511. kfree_skb(skb);
  4512. return NET_RX_SUCCESS;
  4513. }
  4514. out:
  4515. return netif_receive_skb_internal(skb);
  4516. }
  4517. static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
  4518. bool flush_old)
  4519. {
  4520. struct list_head *head = &napi->gro_hash[index].list;
  4521. struct sk_buff *skb, *p;
  4522. list_for_each_entry_safe_reverse(skb, p, head, list) {
  4523. if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
  4524. return;
  4525. list_del(&skb->list);
  4526. skb->next = NULL;
  4527. napi_gro_complete(skb);
  4528. napi->gro_hash[index].count--;
  4529. }
  4530. if (!napi->gro_hash[index].count)
  4531. __clear_bit(index, &napi->gro_bitmask);
  4532. }
  4533. /* napi->gro_hash[].list contains packets ordered by age.
  4534. * youngest packets at the head of it.
  4535. * Complete skbs in reverse order to reduce latencies.
  4536. */
  4537. void napi_gro_flush(struct napi_struct *napi, bool flush_old)
  4538. {
  4539. u32 i;
  4540. for (i = 0; i < GRO_HASH_BUCKETS; i++) {
  4541. if (test_bit(i, &napi->gro_bitmask))
  4542. __napi_gro_flush_chain(napi, i, flush_old);
  4543. }
  4544. }
  4545. EXPORT_SYMBOL(napi_gro_flush);
  4546. static struct list_head *gro_list_prepare(struct napi_struct *napi,
  4547. struct sk_buff *skb)
  4548. {
  4549. unsigned int maclen = skb->dev->hard_header_len;
  4550. u32 hash = skb_get_hash_raw(skb);
  4551. struct list_head *head;
  4552. struct sk_buff *p;
  4553. head = &napi->gro_hash[hash & (GRO_HASH_BUCKETS - 1)].list;
  4554. list_for_each_entry(p, head, list) {
  4555. unsigned long diffs;
  4556. NAPI_GRO_CB(p)->flush = 0;
  4557. if (hash != skb_get_hash_raw(p)) {
  4558. NAPI_GRO_CB(p)->same_flow = 0;
  4559. continue;
  4560. }
  4561. diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
  4562. diffs |= p->vlan_tci ^ skb->vlan_tci;
  4563. diffs |= skb_metadata_dst_cmp(p, skb);
  4564. diffs |= skb_metadata_differs(p, skb);
  4565. if (maclen == ETH_HLEN)
  4566. diffs |= compare_ether_header(skb_mac_header(p),
  4567. skb_mac_header(skb));
  4568. else if (!diffs)
  4569. diffs = memcmp(skb_mac_header(p),
  4570. skb_mac_header(skb),
  4571. maclen);
  4572. NAPI_GRO_CB(p)->same_flow = !diffs;
  4573. }
  4574. return head;
  4575. }
  4576. static void skb_gro_reset_offset(struct sk_buff *skb)
  4577. {
  4578. const struct skb_shared_info *pinfo = skb_shinfo(skb);
  4579. const skb_frag_t *frag0 = &pinfo->frags[0];
  4580. NAPI_GRO_CB(skb)->data_offset = 0;
  4581. NAPI_GRO_CB(skb)->frag0 = NULL;
  4582. NAPI_GRO_CB(skb)->frag0_len = 0;
  4583. if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
  4584. pinfo->nr_frags &&
  4585. !PageHighMem(skb_frag_page(frag0))) {
  4586. NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
  4587. NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
  4588. skb_frag_size(frag0),
  4589. skb->end - skb->tail);
  4590. }
  4591. }
  4592. static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
  4593. {
  4594. struct skb_shared_info *pinfo = skb_shinfo(skb);
  4595. BUG_ON(skb->end - skb->tail < grow);
  4596. memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
  4597. skb->data_len -= grow;
  4598. skb->tail += grow;
  4599. pinfo->frags[0].page_offset += grow;
  4600. skb_frag_size_sub(&pinfo->frags[0], grow);
  4601. if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
  4602. skb_frag_unref(skb, 0);
  4603. memmove(pinfo->frags, pinfo->frags + 1,
  4604. --pinfo->nr_frags * sizeof(pinfo->frags[0]));
  4605. }
  4606. }
  4607. static void gro_flush_oldest(struct list_head *head)
  4608. {
  4609. struct sk_buff *oldest;
  4610. oldest = list_last_entry(head, struct sk_buff, list);
  4611. /* We are called with head length >= MAX_GRO_SKBS, so this is
  4612. * impossible.
  4613. */
  4614. if (WARN_ON_ONCE(!oldest))
  4615. return;
  4616. /* Do not adjust napi->gro_hash[].count, caller is adding a new
  4617. * SKB to the chain.
  4618. */
  4619. list_del(&oldest->list);
  4620. oldest->next = NULL;
  4621. napi_gro_complete(oldest);
  4622. }
  4623. static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
  4624. {
  4625. u32 hash = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
  4626. struct list_head *head = &offload_base;
  4627. struct packet_offload *ptype;
  4628. __be16 type = skb->protocol;
  4629. struct list_head *gro_head;
  4630. struct sk_buff *pp = NULL;
  4631. enum gro_result ret;
  4632. int same_flow;
  4633. int grow;
  4634. if (netif_elide_gro(skb->dev))
  4635. goto normal;
  4636. gro_head = gro_list_prepare(napi, skb);
  4637. rcu_read_lock();
  4638. list_for_each_entry_rcu(ptype, head, list) {
  4639. if (ptype->type != type || !ptype->callbacks.gro_receive)
  4640. continue;
  4641. skb_set_network_header(skb, skb_gro_offset(skb));
  4642. skb_reset_mac_len(skb);
  4643. NAPI_GRO_CB(skb)->same_flow = 0;
  4644. NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb);
  4645. NAPI_GRO_CB(skb)->free = 0;
  4646. NAPI_GRO_CB(skb)->encap_mark = 0;
  4647. NAPI_GRO_CB(skb)->recursion_counter = 0;
  4648. NAPI_GRO_CB(skb)->is_fou = 0;
  4649. NAPI_GRO_CB(skb)->is_atomic = 1;
  4650. NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
  4651. /* Setup for GRO checksum validation */
  4652. switch (skb->ip_summed) {
  4653. case CHECKSUM_COMPLETE:
  4654. NAPI_GRO_CB(skb)->csum = skb->csum;
  4655. NAPI_GRO_CB(skb)->csum_valid = 1;
  4656. NAPI_GRO_CB(skb)->csum_cnt = 0;
  4657. break;
  4658. case CHECKSUM_UNNECESSARY:
  4659. NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
  4660. NAPI_GRO_CB(skb)->csum_valid = 0;
  4661. break;
  4662. default:
  4663. NAPI_GRO_CB(skb)->csum_cnt = 0;
  4664. NAPI_GRO_CB(skb)->csum_valid = 0;
  4665. }
  4666. pp = ptype->callbacks.gro_receive(gro_head, skb);
  4667. break;
  4668. }
  4669. rcu_read_unlock();
  4670. if (&ptype->list == head)
  4671. goto normal;
  4672. if (IS_ERR(pp) && PTR_ERR(pp) == -EINPROGRESS) {
  4673. ret = GRO_CONSUMED;
  4674. goto ok;
  4675. }
  4676. same_flow = NAPI_GRO_CB(skb)->same_flow;
  4677. ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
  4678. if (pp) {
  4679. list_del(&pp->list);
  4680. pp->next = NULL;
  4681. napi_gro_complete(pp);
  4682. napi->gro_hash[hash].count--;
  4683. }
  4684. if (same_flow)
  4685. goto ok;
  4686. if (NAPI_GRO_CB(skb)->flush)
  4687. goto normal;
  4688. if (unlikely(napi->gro_hash[hash].count >= MAX_GRO_SKBS)) {
  4689. gro_flush_oldest(gro_head);
  4690. } else {
  4691. napi->gro_hash[hash].count++;
  4692. }
  4693. NAPI_GRO_CB(skb)->count = 1;
  4694. NAPI_GRO_CB(skb)->age = jiffies;
  4695. NAPI_GRO_CB(skb)->last = skb;
  4696. skb_shinfo(skb)->gso_size = skb_gro_len(skb);
  4697. list_add(&skb->list, gro_head);
  4698. ret = GRO_HELD;
  4699. pull:
  4700. grow = skb_gro_offset(skb) - skb_headlen(skb);
  4701. if (grow > 0)
  4702. gro_pull_from_frag0(skb, grow);
  4703. ok:
  4704. if (napi->gro_hash[hash].count) {
  4705. if (!test_bit(hash, &napi->gro_bitmask))
  4706. __set_bit(hash, &napi->gro_bitmask);
  4707. } else if (test_bit(hash, &napi->gro_bitmask)) {
  4708. __clear_bit(hash, &napi->gro_bitmask);
  4709. }
  4710. return ret;
  4711. normal:
  4712. ret = GRO_NORMAL;
  4713. goto pull;
  4714. }
  4715. struct packet_offload *gro_find_receive_by_type(__be16 type)
  4716. {
  4717. struct list_head *offload_head = &offload_base;
  4718. struct packet_offload *ptype;
  4719. list_for_each_entry_rcu(ptype, offload_head, list) {
  4720. if (ptype->type != type || !ptype->callbacks.gro_receive)
  4721. continue;
  4722. return ptype;
  4723. }
  4724. return NULL;
  4725. }
  4726. EXPORT_SYMBOL(gro_find_receive_by_type);
  4727. struct packet_offload *gro_find_complete_by_type(__be16 type)
  4728. {
  4729. struct list_head *offload_head = &offload_base;
  4730. struct packet_offload *ptype;
  4731. list_for_each_entry_rcu(ptype, offload_head, list) {
  4732. if (ptype->type != type || !ptype->callbacks.gro_complete)
  4733. continue;
  4734. return ptype;
  4735. }
  4736. return NULL;
  4737. }
  4738. EXPORT_SYMBOL(gro_find_complete_by_type);
  4739. static void napi_skb_free_stolen_head(struct sk_buff *skb)
  4740. {
  4741. skb_dst_drop(skb);
  4742. secpath_reset(skb);
  4743. kmem_cache_free(skbuff_head_cache, skb);
  4744. }
  4745. static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
  4746. {
  4747. switch (ret) {
  4748. case GRO_NORMAL:
  4749. if (netif_receive_skb_internal(skb))
  4750. ret = GRO_DROP;
  4751. break;
  4752. case GRO_DROP:
  4753. kfree_skb(skb);
  4754. break;
  4755. case GRO_MERGED_FREE:
  4756. if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
  4757. napi_skb_free_stolen_head(skb);
  4758. else
  4759. __kfree_skb(skb);
  4760. break;
  4761. case GRO_HELD:
  4762. case GRO_MERGED:
  4763. case GRO_CONSUMED:
  4764. break;
  4765. }
  4766. return ret;
  4767. }
  4768. gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
  4769. {
  4770. skb_mark_napi_id(skb, napi);
  4771. trace_napi_gro_receive_entry(skb);
  4772. skb_gro_reset_offset(skb);
  4773. return napi_skb_finish(dev_gro_receive(napi, skb), skb);
  4774. }
  4775. EXPORT_SYMBOL(napi_gro_receive);
  4776. static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
  4777. {
  4778. if (unlikely(skb->pfmemalloc)) {
  4779. consume_skb(skb);
  4780. return;
  4781. }
  4782. __skb_pull(skb, skb_headlen(skb));
  4783. /* restore the reserve we had after netdev_alloc_skb_ip_align() */
  4784. skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
  4785. skb->vlan_tci = 0;
  4786. skb->dev = napi->dev;
  4787. skb->skb_iif = 0;
  4788. /* eth_type_trans() assumes pkt_type is PACKET_HOST */
  4789. skb->pkt_type = PACKET_HOST;
  4790. skb->encapsulation = 0;
  4791. skb_shinfo(skb)->gso_type = 0;
  4792. skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
  4793. secpath_reset(skb);
  4794. napi->skb = skb;
  4795. }
  4796. struct sk_buff *napi_get_frags(struct napi_struct *napi)
  4797. {
  4798. struct sk_buff *skb = napi->skb;
  4799. if (!skb) {
  4800. skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
  4801. if (skb) {
  4802. napi->skb = skb;
  4803. skb_mark_napi_id(skb, napi);
  4804. }
  4805. }
  4806. return skb;
  4807. }
  4808. EXPORT_SYMBOL(napi_get_frags);
  4809. static gro_result_t napi_frags_finish(struct napi_struct *napi,
  4810. struct sk_buff *skb,
  4811. gro_result_t ret)
  4812. {
  4813. switch (ret) {
  4814. case GRO_NORMAL:
  4815. case GRO_HELD:
  4816. __skb_push(skb, ETH_HLEN);
  4817. skb->protocol = eth_type_trans(skb, skb->dev);
  4818. if (ret == GRO_NORMAL && netif_receive_skb_internal(skb))
  4819. ret = GRO_DROP;
  4820. break;
  4821. case GRO_DROP:
  4822. napi_reuse_skb(napi, skb);
  4823. break;
  4824. case GRO_MERGED_FREE:
  4825. if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
  4826. napi_skb_free_stolen_head(skb);
  4827. else
  4828. napi_reuse_skb(napi, skb);
  4829. break;
  4830. case GRO_MERGED:
  4831. case GRO_CONSUMED:
  4832. break;
  4833. }
  4834. return ret;
  4835. }
  4836. /* Upper GRO stack assumes network header starts at gro_offset=0
  4837. * Drivers could call both napi_gro_frags() and napi_gro_receive()
  4838. * We copy ethernet header into skb->data to have a common layout.
  4839. */
  4840. static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
  4841. {
  4842. struct sk_buff *skb = napi->skb;
  4843. const struct ethhdr *eth;
  4844. unsigned int hlen = sizeof(*eth);
  4845. napi->skb = NULL;
  4846. skb_reset_mac_header(skb);
  4847. skb_gro_reset_offset(skb);
  4848. eth = skb_gro_header_fast(skb, 0);
  4849. if (unlikely(skb_gro_header_hard(skb, hlen))) {
  4850. eth = skb_gro_header_slow(skb, hlen, 0);
  4851. if (unlikely(!eth)) {
  4852. net_warn_ratelimited("%s: dropping impossible skb from %s\n",
  4853. __func__, napi->dev->name);
  4854. napi_reuse_skb(napi, skb);
  4855. return NULL;
  4856. }
  4857. } else {
  4858. gro_pull_from_frag0(skb, hlen);
  4859. NAPI_GRO_CB(skb)->frag0 += hlen;
  4860. NAPI_GRO_CB(skb)->frag0_len -= hlen;
  4861. }
  4862. __skb_pull(skb, hlen);
  4863. /*
  4864. * This works because the only protocols we care about don't require
  4865. * special handling.
  4866. * We'll fix it up properly in napi_frags_finish()
  4867. */
  4868. skb->protocol = eth->h_proto;
  4869. return skb;
  4870. }
  4871. gro_result_t napi_gro_frags(struct napi_struct *napi)
  4872. {
  4873. struct sk_buff *skb = napi_frags_skb(napi);
  4874. if (!skb)
  4875. return GRO_DROP;
  4876. trace_napi_gro_frags_entry(skb);
  4877. return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
  4878. }
  4879. EXPORT_SYMBOL(napi_gro_frags);
  4880. /* Compute the checksum from gro_offset and return the folded value
  4881. * after adding in any pseudo checksum.
  4882. */
  4883. __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
  4884. {
  4885. __wsum wsum;
  4886. __sum16 sum;
  4887. wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
  4888. /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
  4889. sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
  4890. if (likely(!sum)) {
  4891. if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
  4892. !skb->csum_complete_sw)
  4893. netdev_rx_csum_fault(skb->dev);
  4894. }
  4895. NAPI_GRO_CB(skb)->csum = wsum;
  4896. NAPI_GRO_CB(skb)->csum_valid = 1;
  4897. return sum;
  4898. }
  4899. EXPORT_SYMBOL(__skb_gro_checksum_complete);
  4900. static void net_rps_send_ipi(struct softnet_data *remsd)
  4901. {
  4902. #ifdef CONFIG_RPS
  4903. while (remsd) {
  4904. struct softnet_data *next = remsd->rps_ipi_next;
  4905. if (cpu_online(remsd->cpu))
  4906. smp_call_function_single_async(remsd->cpu, &remsd->csd);
  4907. remsd = next;
  4908. }
  4909. #endif
  4910. }
  4911. /*
  4912. * net_rps_action_and_irq_enable sends any pending IPI's for rps.
  4913. * Note: called with local irq disabled, but exits with local irq enabled.
  4914. */
  4915. static void net_rps_action_and_irq_enable(struct softnet_data *sd)
  4916. {
  4917. #ifdef CONFIG_RPS
  4918. struct softnet_data *remsd = sd->rps_ipi_list;
  4919. if (remsd) {
  4920. sd->rps_ipi_list = NULL;
  4921. local_irq_enable();
  4922. /* Send pending IPI's to kick RPS processing on remote cpus. */
  4923. net_rps_send_ipi(remsd);
  4924. } else
  4925. #endif
  4926. local_irq_enable();
  4927. }
  4928. static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
  4929. {
  4930. #ifdef CONFIG_RPS
  4931. return sd->rps_ipi_list != NULL;
  4932. #else
  4933. return false;
  4934. #endif
  4935. }
  4936. static int process_backlog(struct napi_struct *napi, int quota)
  4937. {
  4938. struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
  4939. bool again = true;
  4940. int work = 0;
  4941. /* Check if we have pending ipi, its better to send them now,
  4942. * not waiting net_rx_action() end.
  4943. */
  4944. if (sd_has_rps_ipi_waiting(sd)) {
  4945. local_irq_disable();
  4946. net_rps_action_and_irq_enable(sd);
  4947. }
  4948. napi->weight = dev_rx_weight;
  4949. while (again) {
  4950. struct sk_buff *skb;
  4951. while ((skb = __skb_dequeue(&sd->process_queue))) {
  4952. rcu_read_lock();
  4953. __netif_receive_skb(skb);
  4954. rcu_read_unlock();
  4955. input_queue_head_incr(sd);
  4956. if (++work >= quota)
  4957. return work;
  4958. }
  4959. local_irq_disable();
  4960. rps_lock(sd);
  4961. if (skb_queue_empty(&sd->input_pkt_queue)) {
  4962. /*
  4963. * Inline a custom version of __napi_complete().
  4964. * only current cpu owns and manipulates this napi,
  4965. * and NAPI_STATE_SCHED is the only possible flag set
  4966. * on backlog.
  4967. * We can use a plain write instead of clear_bit(),
  4968. * and we dont need an smp_mb() memory barrier.
  4969. */
  4970. napi->state = 0;
  4971. again = false;
  4972. } else {
  4973. skb_queue_splice_tail_init(&sd->input_pkt_queue,
  4974. &sd->process_queue);
  4975. }
  4976. rps_unlock(sd);
  4977. local_irq_enable();
  4978. }
  4979. return work;
  4980. }
  4981. /**
  4982. * __napi_schedule - schedule for receive
  4983. * @n: entry to schedule
  4984. *
  4985. * The entry's receive function will be scheduled to run.
  4986. * Consider using __napi_schedule_irqoff() if hard irqs are masked.
  4987. */
  4988. void __napi_schedule(struct napi_struct *n)
  4989. {
  4990. unsigned long flags;
  4991. local_irq_save(flags);
  4992. ____napi_schedule(this_cpu_ptr(&softnet_data), n);
  4993. local_irq_restore(flags);
  4994. }
  4995. EXPORT_SYMBOL(__napi_schedule);
  4996. /**
  4997. * napi_schedule_prep - check if napi can be scheduled
  4998. * @n: napi context
  4999. *
  5000. * Test if NAPI routine is already running, and if not mark
  5001. * it as running. This is used as a condition variable
  5002. * insure only one NAPI poll instance runs. We also make
  5003. * sure there is no pending NAPI disable.
  5004. */
  5005. bool napi_schedule_prep(struct napi_struct *n)
  5006. {
  5007. unsigned long val, new;
  5008. do {
  5009. val = READ_ONCE(n->state);
  5010. if (unlikely(val & NAPIF_STATE_DISABLE))
  5011. return false;
  5012. new = val | NAPIF_STATE_SCHED;
  5013. /* Sets STATE_MISSED bit if STATE_SCHED was already set
  5014. * This was suggested by Alexander Duyck, as compiler
  5015. * emits better code than :
  5016. * if (val & NAPIF_STATE_SCHED)
  5017. * new |= NAPIF_STATE_MISSED;
  5018. */
  5019. new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED *
  5020. NAPIF_STATE_MISSED;
  5021. } while (cmpxchg(&n->state, val, new) != val);
  5022. return !(val & NAPIF_STATE_SCHED);
  5023. }
  5024. EXPORT_SYMBOL(napi_schedule_prep);
  5025. /**
  5026. * __napi_schedule_irqoff - schedule for receive
  5027. * @n: entry to schedule
  5028. *
  5029. * Variant of __napi_schedule() assuming hard irqs are masked
  5030. */
  5031. void __napi_schedule_irqoff(struct napi_struct *n)
  5032. {
  5033. ____napi_schedule(this_cpu_ptr(&softnet_data), n);
  5034. }
  5035. EXPORT_SYMBOL(__napi_schedule_irqoff);
  5036. bool napi_complete_done(struct napi_struct *n, int work_done)
  5037. {
  5038. unsigned long flags, val, new;
  5039. /*
  5040. * 1) Don't let napi dequeue from the cpu poll list
  5041. * just in case its running on a different cpu.
  5042. * 2) If we are busy polling, do nothing here, we have
  5043. * the guarantee we will be called later.
  5044. */
  5045. if (unlikely(n->state & (NAPIF_STATE_NPSVC |
  5046. NAPIF_STATE_IN_BUSY_POLL)))
  5047. return false;
  5048. if (n->gro_bitmask) {
  5049. unsigned long timeout = 0;
  5050. if (work_done)
  5051. timeout = n->dev->gro_flush_timeout;
  5052. /* When the NAPI instance uses a timeout and keeps postponing
  5053. * it, we need to bound somehow the time packets are kept in
  5054. * the GRO layer
  5055. */
  5056. napi_gro_flush(n, !!timeout);
  5057. if (timeout)
  5058. hrtimer_start(&n->timer, ns_to_ktime(timeout),
  5059. HRTIMER_MODE_REL_PINNED);
  5060. }
  5061. if (unlikely(!list_empty(&n->poll_list))) {
  5062. /* If n->poll_list is not empty, we need to mask irqs */
  5063. local_irq_save(flags);
  5064. list_del_init(&n->poll_list);
  5065. local_irq_restore(flags);
  5066. }
  5067. do {
  5068. val = READ_ONCE(n->state);
  5069. WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
  5070. new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED);
  5071. /* If STATE_MISSED was set, leave STATE_SCHED set,
  5072. * because we will call napi->poll() one more time.
  5073. * This C code was suggested by Alexander Duyck to help gcc.
  5074. */
  5075. new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED *
  5076. NAPIF_STATE_SCHED;
  5077. } while (cmpxchg(&n->state, val, new) != val);
  5078. if (unlikely(val & NAPIF_STATE_MISSED)) {
  5079. __napi_schedule(n);
  5080. return false;
  5081. }
  5082. return true;
  5083. }
  5084. EXPORT_SYMBOL(napi_complete_done);
  5085. /* must be called under rcu_read_lock(), as we dont take a reference */
  5086. static struct napi_struct *napi_by_id(unsigned int napi_id)
  5087. {
  5088. unsigned int hash = napi_id % HASH_SIZE(napi_hash);
  5089. struct napi_struct *napi;
  5090. hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
  5091. if (napi->napi_id == napi_id)
  5092. return napi;
  5093. return NULL;
  5094. }
  5095. #if defined(CONFIG_NET_RX_BUSY_POLL)
  5096. #define BUSY_POLL_BUDGET 8
  5097. static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
  5098. {
  5099. int rc;
  5100. /* Busy polling means there is a high chance device driver hard irq
  5101. * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
  5102. * set in napi_schedule_prep().
  5103. * Since we are about to call napi->poll() once more, we can safely
  5104. * clear NAPI_STATE_MISSED.
  5105. *
  5106. * Note: x86 could use a single "lock and ..." instruction
  5107. * to perform these two clear_bit()
  5108. */
  5109. clear_bit(NAPI_STATE_MISSED, &napi->state);
  5110. clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
  5111. local_bh_disable();
  5112. /* All we really want here is to re-enable device interrupts.
  5113. * Ideally, a new ndo_busy_poll_stop() could avoid another round.
  5114. */
  5115. rc = napi->poll(napi, BUSY_POLL_BUDGET);
  5116. trace_napi_poll(napi, rc, BUSY_POLL_BUDGET);
  5117. netpoll_poll_unlock(have_poll_lock);
  5118. if (rc == BUSY_POLL_BUDGET)
  5119. __napi_schedule(napi);
  5120. local_bh_enable();
  5121. }
  5122. void napi_busy_loop(unsigned int napi_id,
  5123. bool (*loop_end)(void *, unsigned long),
  5124. void *loop_end_arg)
  5125. {
  5126. unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
  5127. int (*napi_poll)(struct napi_struct *napi, int budget);
  5128. void *have_poll_lock = NULL;
  5129. struct napi_struct *napi;
  5130. restart:
  5131. napi_poll = NULL;
  5132. rcu_read_lock();
  5133. napi = napi_by_id(napi_id);
  5134. if (!napi)
  5135. goto out;
  5136. preempt_disable();
  5137. for (;;) {
  5138. int work = 0;
  5139. local_bh_disable();
  5140. if (!napi_poll) {
  5141. unsigned long val = READ_ONCE(napi->state);
  5142. /* If multiple threads are competing for this napi,
  5143. * we avoid dirtying napi->state as much as we can.
  5144. */
  5145. if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
  5146. NAPIF_STATE_IN_BUSY_POLL))
  5147. goto count;
  5148. if (cmpxchg(&napi->state, val,
  5149. val | NAPIF_STATE_IN_BUSY_POLL |
  5150. NAPIF_STATE_SCHED) != val)
  5151. goto count;
  5152. have_poll_lock = netpoll_poll_lock(napi);
  5153. napi_poll = napi->poll;
  5154. }
  5155. work = napi_poll(napi, BUSY_POLL_BUDGET);
  5156. trace_napi_poll(napi, work, BUSY_POLL_BUDGET);
  5157. count:
  5158. if (work > 0)
  5159. __NET_ADD_STATS(dev_net(napi->dev),
  5160. LINUX_MIB_BUSYPOLLRXPACKETS, work);
  5161. local_bh_enable();
  5162. if (!loop_end || loop_end(loop_end_arg, start_time))
  5163. break;
  5164. if (unlikely(need_resched())) {
  5165. if (napi_poll)
  5166. busy_poll_stop(napi, have_poll_lock);
  5167. preempt_enable();
  5168. rcu_read_unlock();
  5169. cond_resched();
  5170. if (loop_end(loop_end_arg, start_time))
  5171. return;
  5172. goto restart;
  5173. }
  5174. cpu_relax();
  5175. }
  5176. if (napi_poll)
  5177. busy_poll_stop(napi, have_poll_lock);
  5178. preempt_enable();
  5179. out:
  5180. rcu_read_unlock();
  5181. }
  5182. EXPORT_SYMBOL(napi_busy_loop);
  5183. #endif /* CONFIG_NET_RX_BUSY_POLL */
  5184. static void napi_hash_add(struct napi_struct *napi)
  5185. {
  5186. if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state) ||
  5187. test_and_set_bit(NAPI_STATE_HASHED, &napi->state))
  5188. return;
  5189. spin_lock(&napi_hash_lock);
  5190. /* 0..NR_CPUS range is reserved for sender_cpu use */
  5191. do {
  5192. if (unlikely(++napi_gen_id < MIN_NAPI_ID))
  5193. napi_gen_id = MIN_NAPI_ID;
  5194. } while (napi_by_id(napi_gen_id));
  5195. napi->napi_id = napi_gen_id;
  5196. hlist_add_head_rcu(&napi->napi_hash_node,
  5197. &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
  5198. spin_unlock(&napi_hash_lock);
  5199. }
  5200. /* Warning : caller is responsible to make sure rcu grace period
  5201. * is respected before freeing memory containing @napi
  5202. */
  5203. bool napi_hash_del(struct napi_struct *napi)
  5204. {
  5205. bool rcu_sync_needed = false;
  5206. spin_lock(&napi_hash_lock);
  5207. if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state)) {
  5208. rcu_sync_needed = true;
  5209. hlist_del_rcu(&napi->napi_hash_node);
  5210. }
  5211. spin_unlock(&napi_hash_lock);
  5212. return rcu_sync_needed;
  5213. }
  5214. EXPORT_SYMBOL_GPL(napi_hash_del);
  5215. static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
  5216. {
  5217. struct napi_struct *napi;
  5218. napi = container_of(timer, struct napi_struct, timer);
  5219. /* Note : we use a relaxed variant of napi_schedule_prep() not setting
  5220. * NAPI_STATE_MISSED, since we do not react to a device IRQ.
  5221. */
  5222. if (napi->gro_bitmask && !napi_disable_pending(napi) &&
  5223. !test_and_set_bit(NAPI_STATE_SCHED, &napi->state))
  5224. __napi_schedule_irqoff(napi);
  5225. return HRTIMER_NORESTART;
  5226. }
  5227. static void init_gro_hash(struct napi_struct *napi)
  5228. {
  5229. int i;
  5230. for (i = 0; i < GRO_HASH_BUCKETS; i++) {
  5231. INIT_LIST_HEAD(&napi->gro_hash[i].list);
  5232. napi->gro_hash[i].count = 0;
  5233. }
  5234. napi->gro_bitmask = 0;
  5235. }
  5236. void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
  5237. int (*poll)(struct napi_struct *, int), int weight)
  5238. {
  5239. INIT_LIST_HEAD(&napi->poll_list);
  5240. hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
  5241. napi->timer.function = napi_watchdog;
  5242. init_gro_hash(napi);
  5243. napi->skb = NULL;
  5244. napi->poll = poll;
  5245. if (weight > NAPI_POLL_WEIGHT)
  5246. pr_err_once("netif_napi_add() called with weight %d on device %s\n",
  5247. weight, dev->name);
  5248. napi->weight = weight;
  5249. list_add(&napi->dev_list, &dev->napi_list);
  5250. napi->dev = dev;
  5251. #ifdef CONFIG_NETPOLL
  5252. napi->poll_owner = -1;
  5253. #endif
  5254. set_bit(NAPI_STATE_SCHED, &napi->state);
  5255. napi_hash_add(napi);
  5256. }
  5257. EXPORT_SYMBOL(netif_napi_add);
  5258. void napi_disable(struct napi_struct *n)
  5259. {
  5260. might_sleep();
  5261. set_bit(NAPI_STATE_DISABLE, &n->state);
  5262. while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
  5263. msleep(1);
  5264. while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state))
  5265. msleep(1);
  5266. hrtimer_cancel(&n->timer);
  5267. clear_bit(NAPI_STATE_DISABLE, &n->state);
  5268. }
  5269. EXPORT_SYMBOL(napi_disable);
  5270. static void flush_gro_hash(struct napi_struct *napi)
  5271. {
  5272. int i;
  5273. for (i = 0; i < GRO_HASH_BUCKETS; i++) {
  5274. struct sk_buff *skb, *n;
  5275. list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list)
  5276. kfree_skb(skb);
  5277. napi->gro_hash[i].count = 0;
  5278. }
  5279. }
  5280. /* Must be called in process context */
  5281. void netif_napi_del(struct napi_struct *napi)
  5282. {
  5283. might_sleep();
  5284. if (napi_hash_del(napi))
  5285. synchronize_net();
  5286. list_del_init(&napi->dev_list);
  5287. napi_free_frags(napi);
  5288. flush_gro_hash(napi);
  5289. napi->gro_bitmask = 0;
  5290. }
  5291. EXPORT_SYMBOL(netif_napi_del);
  5292. static int napi_poll(struct napi_struct *n, struct list_head *repoll)
  5293. {
  5294. void *have;
  5295. int work, weight;
  5296. list_del_init(&n->poll_list);
  5297. have = netpoll_poll_lock(n);
  5298. weight = n->weight;
  5299. /* This NAPI_STATE_SCHED test is for avoiding a race
  5300. * with netpoll's poll_napi(). Only the entity which
  5301. * obtains the lock and sees NAPI_STATE_SCHED set will
  5302. * actually make the ->poll() call. Therefore we avoid
  5303. * accidentally calling ->poll() when NAPI is not scheduled.
  5304. */
  5305. work = 0;
  5306. if (test_bit(NAPI_STATE_SCHED, &n->state)) {
  5307. work = n->poll(n, weight);
  5308. trace_napi_poll(n, work, weight);
  5309. }
  5310. WARN_ON_ONCE(work > weight);
  5311. if (likely(work < weight))
  5312. goto out_unlock;
  5313. /* Drivers must not modify the NAPI state if they
  5314. * consume the entire weight. In such cases this code
  5315. * still "owns" the NAPI instance and therefore can
  5316. * move the instance around on the list at-will.
  5317. */
  5318. if (unlikely(napi_disable_pending(n))) {
  5319. napi_complete(n);
  5320. goto out_unlock;
  5321. }
  5322. if (n->gro_bitmask) {
  5323. /* flush too old packets
  5324. * If HZ < 1000, flush all packets.
  5325. */
  5326. napi_gro_flush(n, HZ >= 1000);
  5327. }
  5328. /* Some drivers may have called napi_schedule
  5329. * prior to exhausting their budget.
  5330. */
  5331. if (unlikely(!list_empty(&n->poll_list))) {
  5332. pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
  5333. n->dev ? n->dev->name : "backlog");
  5334. goto out_unlock;
  5335. }
  5336. list_add_tail(&n->poll_list, repoll);
  5337. out_unlock:
  5338. netpoll_poll_unlock(have);
  5339. return work;
  5340. }
  5341. static __latent_entropy void net_rx_action(struct softirq_action *h)
  5342. {
  5343. struct softnet_data *sd = this_cpu_ptr(&softnet_data);
  5344. unsigned long time_limit = jiffies +
  5345. usecs_to_jiffies(netdev_budget_usecs);
  5346. int budget = netdev_budget;
  5347. LIST_HEAD(list);
  5348. LIST_HEAD(repoll);
  5349. local_irq_disable();
  5350. list_splice_init(&sd->poll_list, &list);
  5351. local_irq_enable();
  5352. for (;;) {
  5353. struct napi_struct *n;
  5354. if (list_empty(&list)) {
  5355. if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
  5356. goto out;
  5357. break;
  5358. }
  5359. n = list_first_entry(&list, struct napi_struct, poll_list);
  5360. budget -= napi_poll(n, &repoll);
  5361. /* If softirq window is exhausted then punt.
  5362. * Allow this to run for 2 jiffies since which will allow
  5363. * an average latency of 1.5/HZ.
  5364. */
  5365. if (unlikely(budget <= 0 ||
  5366. time_after_eq(jiffies, time_limit))) {
  5367. sd->time_squeeze++;
  5368. break;
  5369. }
  5370. }
  5371. local_irq_disable();
  5372. list_splice_tail_init(&sd->poll_list, &list);
  5373. list_splice_tail(&repoll, &list);
  5374. list_splice(&list, &sd->poll_list);
  5375. if (!list_empty(&sd->poll_list))
  5376. __raise_softirq_irqoff(NET_RX_SOFTIRQ);
  5377. net_rps_action_and_irq_enable(sd);
  5378. out:
  5379. __kfree_skb_flush();
  5380. }
  5381. struct netdev_adjacent {
  5382. struct net_device *dev;
  5383. /* upper master flag, there can only be one master device per list */
  5384. bool master;
  5385. /* counter for the number of times this device was added to us */
  5386. u16 ref_nr;
  5387. /* private field for the users */
  5388. void *private;
  5389. struct list_head list;
  5390. struct rcu_head rcu;
  5391. };
  5392. static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
  5393. struct list_head *adj_list)
  5394. {
  5395. struct netdev_adjacent *adj;
  5396. list_for_each_entry(adj, adj_list, list) {
  5397. if (adj->dev == adj_dev)
  5398. return adj;
  5399. }
  5400. return NULL;
  5401. }
  5402. static int __netdev_has_upper_dev(struct net_device *upper_dev, void *data)
  5403. {
  5404. struct net_device *dev = data;
  5405. return upper_dev == dev;
  5406. }
  5407. /**
  5408. * netdev_has_upper_dev - Check if device is linked to an upper device
  5409. * @dev: device
  5410. * @upper_dev: upper device to check
  5411. *
  5412. * Find out if a device is linked to specified upper device and return true
  5413. * in case it is. Note that this checks only immediate upper device,
  5414. * not through a complete stack of devices. The caller must hold the RTNL lock.
  5415. */
  5416. bool netdev_has_upper_dev(struct net_device *dev,
  5417. struct net_device *upper_dev)
  5418. {
  5419. ASSERT_RTNL();
  5420. return netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
  5421. upper_dev);
  5422. }
  5423. EXPORT_SYMBOL(netdev_has_upper_dev);
  5424. /**
  5425. * netdev_has_upper_dev_all - Check if device is linked to an upper device
  5426. * @dev: device
  5427. * @upper_dev: upper device to check
  5428. *
  5429. * Find out if a device is linked to specified upper device and return true
  5430. * in case it is. Note that this checks the entire upper device chain.
  5431. * The caller must hold rcu lock.
  5432. */
  5433. bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
  5434. struct net_device *upper_dev)
  5435. {
  5436. return !!netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
  5437. upper_dev);
  5438. }
  5439. EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
  5440. /**
  5441. * netdev_has_any_upper_dev - Check if device is linked to some device
  5442. * @dev: device
  5443. *
  5444. * Find out if a device is linked to an upper device and return true in case
  5445. * it is. The caller must hold the RTNL lock.
  5446. */
  5447. bool netdev_has_any_upper_dev(struct net_device *dev)
  5448. {
  5449. ASSERT_RTNL();
  5450. return !list_empty(&dev->adj_list.upper);
  5451. }
  5452. EXPORT_SYMBOL(netdev_has_any_upper_dev);
  5453. /**
  5454. * netdev_master_upper_dev_get - Get master upper device
  5455. * @dev: device
  5456. *
  5457. * Find a master upper device and return pointer to it or NULL in case
  5458. * it's not there. The caller must hold the RTNL lock.
  5459. */
  5460. struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
  5461. {
  5462. struct netdev_adjacent *upper;
  5463. ASSERT_RTNL();
  5464. if (list_empty(&dev->adj_list.upper))
  5465. return NULL;
  5466. upper = list_first_entry(&dev->adj_list.upper,
  5467. struct netdev_adjacent, list);
  5468. if (likely(upper->master))
  5469. return upper->dev;
  5470. return NULL;
  5471. }
  5472. EXPORT_SYMBOL(netdev_master_upper_dev_get);
  5473. /**
  5474. * netdev_has_any_lower_dev - Check if device is linked to some device
  5475. * @dev: device
  5476. *
  5477. * Find out if a device is linked to a lower device and return true in case
  5478. * it is. The caller must hold the RTNL lock.
  5479. */
  5480. static bool netdev_has_any_lower_dev(struct net_device *dev)
  5481. {
  5482. ASSERT_RTNL();
  5483. return !list_empty(&dev->adj_list.lower);
  5484. }
  5485. void *netdev_adjacent_get_private(struct list_head *adj_list)
  5486. {
  5487. struct netdev_adjacent *adj;
  5488. adj = list_entry(adj_list, struct netdev_adjacent, list);
  5489. return adj->private;
  5490. }
  5491. EXPORT_SYMBOL(netdev_adjacent_get_private);
  5492. /**
  5493. * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
  5494. * @dev: device
  5495. * @iter: list_head ** of the current position
  5496. *
  5497. * Gets the next device from the dev's upper list, starting from iter
  5498. * position. The caller must hold RCU read lock.
  5499. */
  5500. struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
  5501. struct list_head **iter)
  5502. {
  5503. struct netdev_adjacent *upper;
  5504. WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
  5505. upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
  5506. if (&upper->list == &dev->adj_list.upper)
  5507. return NULL;
  5508. *iter = &upper->list;
  5509. return upper->dev;
  5510. }
  5511. EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
  5512. static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
  5513. struct list_head **iter)
  5514. {
  5515. struct netdev_adjacent *upper;
  5516. WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
  5517. upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
  5518. if (&upper->list == &dev->adj_list.upper)
  5519. return NULL;
  5520. *iter = &upper->list;
  5521. return upper->dev;
  5522. }
  5523. int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
  5524. int (*fn)(struct net_device *dev,
  5525. void *data),
  5526. void *data)
  5527. {
  5528. struct net_device *udev;
  5529. struct list_head *iter;
  5530. int ret;
  5531. for (iter = &dev->adj_list.upper,
  5532. udev = netdev_next_upper_dev_rcu(dev, &iter);
  5533. udev;
  5534. udev = netdev_next_upper_dev_rcu(dev, &iter)) {
  5535. /* first is the upper device itself */
  5536. ret = fn(udev, data);
  5537. if (ret)
  5538. return ret;
  5539. /* then look at all of its upper devices */
  5540. ret = netdev_walk_all_upper_dev_rcu(udev, fn, data);
  5541. if (ret)
  5542. return ret;
  5543. }
  5544. return 0;
  5545. }
  5546. EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
  5547. /**
  5548. * netdev_lower_get_next_private - Get the next ->private from the
  5549. * lower neighbour list
  5550. * @dev: device
  5551. * @iter: list_head ** of the current position
  5552. *
  5553. * Gets the next netdev_adjacent->private from the dev's lower neighbour
  5554. * list, starting from iter position. The caller must hold either hold the
  5555. * RTNL lock or its own locking that guarantees that the neighbour lower
  5556. * list will remain unchanged.
  5557. */
  5558. void *netdev_lower_get_next_private(struct net_device *dev,
  5559. struct list_head **iter)
  5560. {
  5561. struct netdev_adjacent *lower;
  5562. lower = list_entry(*iter, struct netdev_adjacent, list);
  5563. if (&lower->list == &dev->adj_list.lower)
  5564. return NULL;
  5565. *iter = lower->list.next;
  5566. return lower->private;
  5567. }
  5568. EXPORT_SYMBOL(netdev_lower_get_next_private);
  5569. /**
  5570. * netdev_lower_get_next_private_rcu - Get the next ->private from the
  5571. * lower neighbour list, RCU
  5572. * variant
  5573. * @dev: device
  5574. * @iter: list_head ** of the current position
  5575. *
  5576. * Gets the next netdev_adjacent->private from the dev's lower neighbour
  5577. * list, starting from iter position. The caller must hold RCU read lock.
  5578. */
  5579. void *netdev_lower_get_next_private_rcu(struct net_device *dev,
  5580. struct list_head **iter)
  5581. {
  5582. struct netdev_adjacent *lower;
  5583. WARN_ON_ONCE(!rcu_read_lock_held());
  5584. lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
  5585. if (&lower->list == &dev->adj_list.lower)
  5586. return NULL;
  5587. *iter = &lower->list;
  5588. return lower->private;
  5589. }
  5590. EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
  5591. /**
  5592. * netdev_lower_get_next - Get the next device from the lower neighbour
  5593. * list
  5594. * @dev: device
  5595. * @iter: list_head ** of the current position
  5596. *
  5597. * Gets the next netdev_adjacent from the dev's lower neighbour
  5598. * list, starting from iter position. The caller must hold RTNL lock or
  5599. * its own locking that guarantees that the neighbour lower
  5600. * list will remain unchanged.
  5601. */
  5602. void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
  5603. {
  5604. struct netdev_adjacent *lower;
  5605. lower = list_entry(*iter, struct netdev_adjacent, list);
  5606. if (&lower->list == &dev->adj_list.lower)
  5607. return NULL;
  5608. *iter = lower->list.next;
  5609. return lower->dev;
  5610. }
  5611. EXPORT_SYMBOL(netdev_lower_get_next);
  5612. static struct net_device *netdev_next_lower_dev(struct net_device *dev,
  5613. struct list_head **iter)
  5614. {
  5615. struct netdev_adjacent *lower;
  5616. lower = list_entry((*iter)->next, struct netdev_adjacent, list);
  5617. if (&lower->list == &dev->adj_list.lower)
  5618. return NULL;
  5619. *iter = &lower->list;
  5620. return lower->dev;
  5621. }
  5622. int netdev_walk_all_lower_dev(struct net_device *dev,
  5623. int (*fn)(struct net_device *dev,
  5624. void *data),
  5625. void *data)
  5626. {
  5627. struct net_device *ldev;
  5628. struct list_head *iter;
  5629. int ret;
  5630. for (iter = &dev->adj_list.lower,
  5631. ldev = netdev_next_lower_dev(dev, &iter);
  5632. ldev;
  5633. ldev = netdev_next_lower_dev(dev, &iter)) {
  5634. /* first is the lower device itself */
  5635. ret = fn(ldev, data);
  5636. if (ret)
  5637. return ret;
  5638. /* then look at all of its lower devices */
  5639. ret = netdev_walk_all_lower_dev(ldev, fn, data);
  5640. if (ret)
  5641. return ret;
  5642. }
  5643. return 0;
  5644. }
  5645. EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
  5646. static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
  5647. struct list_head **iter)
  5648. {
  5649. struct netdev_adjacent *lower;
  5650. lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
  5651. if (&lower->list == &dev->adj_list.lower)
  5652. return NULL;
  5653. *iter = &lower->list;
  5654. return lower->dev;
  5655. }
  5656. int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
  5657. int (*fn)(struct net_device *dev,
  5658. void *data),
  5659. void *data)
  5660. {
  5661. struct net_device *ldev;
  5662. struct list_head *iter;
  5663. int ret;
  5664. for (iter = &dev->adj_list.lower,
  5665. ldev = netdev_next_lower_dev_rcu(dev, &iter);
  5666. ldev;
  5667. ldev = netdev_next_lower_dev_rcu(dev, &iter)) {
  5668. /* first is the lower device itself */
  5669. ret = fn(ldev, data);
  5670. if (ret)
  5671. return ret;
  5672. /* then look at all of its lower devices */
  5673. ret = netdev_walk_all_lower_dev_rcu(ldev, fn, data);
  5674. if (ret)
  5675. return ret;
  5676. }
  5677. return 0;
  5678. }
  5679. EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu);
  5680. /**
  5681. * netdev_lower_get_first_private_rcu - Get the first ->private from the
  5682. * lower neighbour list, RCU
  5683. * variant
  5684. * @dev: device
  5685. *
  5686. * Gets the first netdev_adjacent->private from the dev's lower neighbour
  5687. * list. The caller must hold RCU read lock.
  5688. */
  5689. void *netdev_lower_get_first_private_rcu(struct net_device *dev)
  5690. {
  5691. struct netdev_adjacent *lower;
  5692. lower = list_first_or_null_rcu(&dev->adj_list.lower,
  5693. struct netdev_adjacent, list);
  5694. if (lower)
  5695. return lower->private;
  5696. return NULL;
  5697. }
  5698. EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
  5699. /**
  5700. * netdev_master_upper_dev_get_rcu - Get master upper device
  5701. * @dev: device
  5702. *
  5703. * Find a master upper device and return pointer to it or NULL in case
  5704. * it's not there. The caller must hold the RCU read lock.
  5705. */
  5706. struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
  5707. {
  5708. struct netdev_adjacent *upper;
  5709. upper = list_first_or_null_rcu(&dev->adj_list.upper,
  5710. struct netdev_adjacent, list);
  5711. if (upper && likely(upper->master))
  5712. return upper->dev;
  5713. return NULL;
  5714. }
  5715. EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
  5716. static int netdev_adjacent_sysfs_add(struct net_device *dev,
  5717. struct net_device *adj_dev,
  5718. struct list_head *dev_list)
  5719. {
  5720. char linkname[IFNAMSIZ+7];
  5721. sprintf(linkname, dev_list == &dev->adj_list.upper ?
  5722. "upper_%s" : "lower_%s", adj_dev->name);
  5723. return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
  5724. linkname);
  5725. }
  5726. static void netdev_adjacent_sysfs_del(struct net_device *dev,
  5727. char *name,
  5728. struct list_head *dev_list)
  5729. {
  5730. char linkname[IFNAMSIZ+7];
  5731. sprintf(linkname, dev_list == &dev->adj_list.upper ?
  5732. "upper_%s" : "lower_%s", name);
  5733. sysfs_remove_link(&(dev->dev.kobj), linkname);
  5734. }
  5735. static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
  5736. struct net_device *adj_dev,
  5737. struct list_head *dev_list)
  5738. {
  5739. return (dev_list == &dev->adj_list.upper ||
  5740. dev_list == &dev->adj_list.lower) &&
  5741. net_eq(dev_net(dev), dev_net(adj_dev));
  5742. }
  5743. static int __netdev_adjacent_dev_insert(struct net_device *dev,
  5744. struct net_device *adj_dev,
  5745. struct list_head *dev_list,
  5746. void *private, bool master)
  5747. {
  5748. struct netdev_adjacent *adj;
  5749. int ret;
  5750. adj = __netdev_find_adj(adj_dev, dev_list);
  5751. if (adj) {
  5752. adj->ref_nr += 1;
  5753. pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
  5754. dev->name, adj_dev->name, adj->ref_nr);
  5755. return 0;
  5756. }
  5757. adj = kmalloc(sizeof(*adj), GFP_KERNEL);
  5758. if (!adj)
  5759. return -ENOMEM;
  5760. adj->dev = adj_dev;
  5761. adj->master = master;
  5762. adj->ref_nr = 1;
  5763. adj->private = private;
  5764. dev_hold(adj_dev);
  5765. pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
  5766. dev->name, adj_dev->name, adj->ref_nr, adj_dev->name);
  5767. if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
  5768. ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
  5769. if (ret)
  5770. goto free_adj;
  5771. }
  5772. /* Ensure that master link is always the first item in list. */
  5773. if (master) {
  5774. ret = sysfs_create_link(&(dev->dev.kobj),
  5775. &(adj_dev->dev.kobj), "master");
  5776. if (ret)
  5777. goto remove_symlinks;
  5778. list_add_rcu(&adj->list, dev_list);
  5779. } else {
  5780. list_add_tail_rcu(&adj->list, dev_list);
  5781. }
  5782. return 0;
  5783. remove_symlinks:
  5784. if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
  5785. netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
  5786. free_adj:
  5787. kfree(adj);
  5788. dev_put(adj_dev);
  5789. return ret;
  5790. }
  5791. static void __netdev_adjacent_dev_remove(struct net_device *dev,
  5792. struct net_device *adj_dev,
  5793. u16 ref_nr,
  5794. struct list_head *dev_list)
  5795. {
  5796. struct netdev_adjacent *adj;
  5797. pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
  5798. dev->name, adj_dev->name, ref_nr);
  5799. adj = __netdev_find_adj(adj_dev, dev_list);
  5800. if (!adj) {
  5801. pr_err("Adjacency does not exist for device %s from %s\n",
  5802. dev->name, adj_dev->name);
  5803. WARN_ON(1);
  5804. return;
  5805. }
  5806. if (adj->ref_nr > ref_nr) {
  5807. pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
  5808. dev->name, adj_dev->name, ref_nr,
  5809. adj->ref_nr - ref_nr);
  5810. adj->ref_nr -= ref_nr;
  5811. return;
  5812. }
  5813. if (adj->master)
  5814. sysfs_remove_link(&(dev->dev.kobj), "master");
  5815. if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
  5816. netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
  5817. list_del_rcu(&adj->list);
  5818. pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
  5819. adj_dev->name, dev->name, adj_dev->name);
  5820. dev_put(adj_dev);
  5821. kfree_rcu(adj, rcu);
  5822. }
  5823. static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
  5824. struct net_device *upper_dev,
  5825. struct list_head *up_list,
  5826. struct list_head *down_list,
  5827. void *private, bool master)
  5828. {
  5829. int ret;
  5830. ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list,
  5831. private, master);
  5832. if (ret)
  5833. return ret;
  5834. ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list,
  5835. private, false);
  5836. if (ret) {
  5837. __netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list);
  5838. return ret;
  5839. }
  5840. return 0;
  5841. }
  5842. static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
  5843. struct net_device *upper_dev,
  5844. u16 ref_nr,
  5845. struct list_head *up_list,
  5846. struct list_head *down_list)
  5847. {
  5848. __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
  5849. __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
  5850. }
  5851. static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
  5852. struct net_device *upper_dev,
  5853. void *private, bool master)
  5854. {
  5855. return __netdev_adjacent_dev_link_lists(dev, upper_dev,
  5856. &dev->adj_list.upper,
  5857. &upper_dev->adj_list.lower,
  5858. private, master);
  5859. }
  5860. static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
  5861. struct net_device *upper_dev)
  5862. {
  5863. __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
  5864. &dev->adj_list.upper,
  5865. &upper_dev->adj_list.lower);
  5866. }
  5867. static int __netdev_upper_dev_link(struct net_device *dev,
  5868. struct net_device *upper_dev, bool master,
  5869. void *upper_priv, void *upper_info,
  5870. struct netlink_ext_ack *extack)
  5871. {
  5872. struct netdev_notifier_changeupper_info changeupper_info = {
  5873. .info = {
  5874. .dev = dev,
  5875. .extack = extack,
  5876. },
  5877. .upper_dev = upper_dev,
  5878. .master = master,
  5879. .linking = true,
  5880. .upper_info = upper_info,
  5881. };
  5882. struct net_device *master_dev;
  5883. int ret = 0;
  5884. ASSERT_RTNL();
  5885. if (dev == upper_dev)
  5886. return -EBUSY;
  5887. /* To prevent loops, check if dev is not upper device to upper_dev. */
  5888. if (netdev_has_upper_dev(upper_dev, dev))
  5889. return -EBUSY;
  5890. if (!master) {
  5891. if (netdev_has_upper_dev(dev, upper_dev))
  5892. return -EEXIST;
  5893. } else {
  5894. master_dev = netdev_master_upper_dev_get(dev);
  5895. if (master_dev)
  5896. return master_dev == upper_dev ? -EEXIST : -EBUSY;
  5897. }
  5898. ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
  5899. &changeupper_info.info);
  5900. ret = notifier_to_errno(ret);
  5901. if (ret)
  5902. return ret;
  5903. ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
  5904. master);
  5905. if (ret)
  5906. return ret;
  5907. ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
  5908. &changeupper_info.info);
  5909. ret = notifier_to_errno(ret);
  5910. if (ret)
  5911. goto rollback;
  5912. return 0;
  5913. rollback:
  5914. __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
  5915. return ret;
  5916. }
  5917. /**
  5918. * netdev_upper_dev_link - Add a link to the upper device
  5919. * @dev: device
  5920. * @upper_dev: new upper device
  5921. * @extack: netlink extended ack
  5922. *
  5923. * Adds a link to device which is upper to this one. The caller must hold
  5924. * the RTNL lock. On a failure a negative errno code is returned.
  5925. * On success the reference counts are adjusted and the function
  5926. * returns zero.
  5927. */
  5928. int netdev_upper_dev_link(struct net_device *dev,
  5929. struct net_device *upper_dev,
  5930. struct netlink_ext_ack *extack)
  5931. {
  5932. return __netdev_upper_dev_link(dev, upper_dev, false,
  5933. NULL, NULL, extack);
  5934. }
  5935. EXPORT_SYMBOL(netdev_upper_dev_link);
  5936. /**
  5937. * netdev_master_upper_dev_link - Add a master link to the upper device
  5938. * @dev: device
  5939. * @upper_dev: new upper device
  5940. * @upper_priv: upper device private
  5941. * @upper_info: upper info to be passed down via notifier
  5942. * @extack: netlink extended ack
  5943. *
  5944. * Adds a link to device which is upper to this one. In this case, only
  5945. * one master upper device can be linked, although other non-master devices
  5946. * might be linked as well. The caller must hold the RTNL lock.
  5947. * On a failure a negative errno code is returned. On success the reference
  5948. * counts are adjusted and the function returns zero.
  5949. */
  5950. int netdev_master_upper_dev_link(struct net_device *dev,
  5951. struct net_device *upper_dev,
  5952. void *upper_priv, void *upper_info,
  5953. struct netlink_ext_ack *extack)
  5954. {
  5955. return __netdev_upper_dev_link(dev, upper_dev, true,
  5956. upper_priv, upper_info, extack);
  5957. }
  5958. EXPORT_SYMBOL(netdev_master_upper_dev_link);
  5959. /**
  5960. * netdev_upper_dev_unlink - Removes a link to upper device
  5961. * @dev: device
  5962. * @upper_dev: new upper device
  5963. *
  5964. * Removes a link to device which is upper to this one. The caller must hold
  5965. * the RTNL lock.
  5966. */
  5967. void netdev_upper_dev_unlink(struct net_device *dev,
  5968. struct net_device *upper_dev)
  5969. {
  5970. struct netdev_notifier_changeupper_info changeupper_info = {
  5971. .info = {
  5972. .dev = dev,
  5973. },
  5974. .upper_dev = upper_dev,
  5975. .linking = false,
  5976. };
  5977. ASSERT_RTNL();
  5978. changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
  5979. call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
  5980. &changeupper_info.info);
  5981. __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
  5982. call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
  5983. &changeupper_info.info);
  5984. }
  5985. EXPORT_SYMBOL(netdev_upper_dev_unlink);
  5986. /**
  5987. * netdev_bonding_info_change - Dispatch event about slave change
  5988. * @dev: device
  5989. * @bonding_info: info to dispatch
  5990. *
  5991. * Send NETDEV_BONDING_INFO to netdev notifiers with info.
  5992. * The caller must hold the RTNL lock.
  5993. */
  5994. void netdev_bonding_info_change(struct net_device *dev,
  5995. struct netdev_bonding_info *bonding_info)
  5996. {
  5997. struct netdev_notifier_bonding_info info = {
  5998. .info.dev = dev,
  5999. };
  6000. memcpy(&info.bonding_info, bonding_info,
  6001. sizeof(struct netdev_bonding_info));
  6002. call_netdevice_notifiers_info(NETDEV_BONDING_INFO,
  6003. &info.info);
  6004. }
  6005. EXPORT_SYMBOL(netdev_bonding_info_change);
  6006. static void netdev_adjacent_add_links(struct net_device *dev)
  6007. {
  6008. struct netdev_adjacent *iter;
  6009. struct net *net = dev_net(dev);
  6010. list_for_each_entry(iter, &dev->adj_list.upper, list) {
  6011. if (!net_eq(net, dev_net(iter->dev)))
  6012. continue;
  6013. netdev_adjacent_sysfs_add(iter->dev, dev,
  6014. &iter->dev->adj_list.lower);
  6015. netdev_adjacent_sysfs_add(dev, iter->dev,
  6016. &dev->adj_list.upper);
  6017. }
  6018. list_for_each_entry(iter, &dev->adj_list.lower, list) {
  6019. if (!net_eq(net, dev_net(iter->dev)))
  6020. continue;
  6021. netdev_adjacent_sysfs_add(iter->dev, dev,
  6022. &iter->dev->adj_list.upper);
  6023. netdev_adjacent_sysfs_add(dev, iter->dev,
  6024. &dev->adj_list.lower);
  6025. }
  6026. }
  6027. static void netdev_adjacent_del_links(struct net_device *dev)
  6028. {
  6029. struct netdev_adjacent *iter;
  6030. struct net *net = dev_net(dev);
  6031. list_for_each_entry(iter, &dev->adj_list.upper, list) {
  6032. if (!net_eq(net, dev_net(iter->dev)))
  6033. continue;
  6034. netdev_adjacent_sysfs_del(iter->dev, dev->name,
  6035. &iter->dev->adj_list.lower);
  6036. netdev_adjacent_sysfs_del(dev, iter->dev->name,
  6037. &dev->adj_list.upper);
  6038. }
  6039. list_for_each_entry(iter, &dev->adj_list.lower, list) {
  6040. if (!net_eq(net, dev_net(iter->dev)))
  6041. continue;
  6042. netdev_adjacent_sysfs_del(iter->dev, dev->name,
  6043. &iter->dev->adj_list.upper);
  6044. netdev_adjacent_sysfs_del(dev, iter->dev->name,
  6045. &dev->adj_list.lower);
  6046. }
  6047. }
  6048. void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
  6049. {
  6050. struct netdev_adjacent *iter;
  6051. struct net *net = dev_net(dev);
  6052. list_for_each_entry(iter, &dev->adj_list.upper, list) {
  6053. if (!net_eq(net, dev_net(iter->dev)))
  6054. continue;
  6055. netdev_adjacent_sysfs_del(iter->dev, oldname,
  6056. &iter->dev->adj_list.lower);
  6057. netdev_adjacent_sysfs_add(iter->dev, dev,
  6058. &iter->dev->adj_list.lower);
  6059. }
  6060. list_for_each_entry(iter, &dev->adj_list.lower, list) {
  6061. if (!net_eq(net, dev_net(iter->dev)))
  6062. continue;
  6063. netdev_adjacent_sysfs_del(iter->dev, oldname,
  6064. &iter->dev->adj_list.upper);
  6065. netdev_adjacent_sysfs_add(iter->dev, dev,
  6066. &iter->dev->adj_list.upper);
  6067. }
  6068. }
  6069. void *netdev_lower_dev_get_private(struct net_device *dev,
  6070. struct net_device *lower_dev)
  6071. {
  6072. struct netdev_adjacent *lower;
  6073. if (!lower_dev)
  6074. return NULL;
  6075. lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
  6076. if (!lower)
  6077. return NULL;
  6078. return lower->private;
  6079. }
  6080. EXPORT_SYMBOL(netdev_lower_dev_get_private);
  6081. int dev_get_nest_level(struct net_device *dev)
  6082. {
  6083. struct net_device *lower = NULL;
  6084. struct list_head *iter;
  6085. int max_nest = -1;
  6086. int nest;
  6087. ASSERT_RTNL();
  6088. netdev_for_each_lower_dev(dev, lower, iter) {
  6089. nest = dev_get_nest_level(lower);
  6090. if (max_nest < nest)
  6091. max_nest = nest;
  6092. }
  6093. return max_nest + 1;
  6094. }
  6095. EXPORT_SYMBOL(dev_get_nest_level);
  6096. /**
  6097. * netdev_lower_change - Dispatch event about lower device state change
  6098. * @lower_dev: device
  6099. * @lower_state_info: state to dispatch
  6100. *
  6101. * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
  6102. * The caller must hold the RTNL lock.
  6103. */
  6104. void netdev_lower_state_changed(struct net_device *lower_dev,
  6105. void *lower_state_info)
  6106. {
  6107. struct netdev_notifier_changelowerstate_info changelowerstate_info = {
  6108. .info.dev = lower_dev,
  6109. };
  6110. ASSERT_RTNL();
  6111. changelowerstate_info.lower_state_info = lower_state_info;
  6112. call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE,
  6113. &changelowerstate_info.info);
  6114. }
  6115. EXPORT_SYMBOL(netdev_lower_state_changed);
  6116. static void dev_change_rx_flags(struct net_device *dev, int flags)
  6117. {
  6118. const struct net_device_ops *ops = dev->netdev_ops;
  6119. if (ops->ndo_change_rx_flags)
  6120. ops->ndo_change_rx_flags(dev, flags);
  6121. }
  6122. static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
  6123. {
  6124. unsigned int old_flags = dev->flags;
  6125. kuid_t uid;
  6126. kgid_t gid;
  6127. ASSERT_RTNL();
  6128. dev->flags |= IFF_PROMISC;
  6129. dev->promiscuity += inc;
  6130. if (dev->promiscuity == 0) {
  6131. /*
  6132. * Avoid overflow.
  6133. * If inc causes overflow, untouch promisc and return error.
  6134. */
  6135. if (inc < 0)
  6136. dev->flags &= ~IFF_PROMISC;
  6137. else {
  6138. dev->promiscuity -= inc;
  6139. pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
  6140. dev->name);
  6141. return -EOVERFLOW;
  6142. }
  6143. }
  6144. if (dev->flags != old_flags) {
  6145. pr_info("device %s %s promiscuous mode\n",
  6146. dev->name,
  6147. dev->flags & IFF_PROMISC ? "entered" : "left");
  6148. if (audit_enabled) {
  6149. current_uid_gid(&uid, &gid);
  6150. audit_log(audit_context(), GFP_ATOMIC,
  6151. AUDIT_ANOM_PROMISCUOUS,
  6152. "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
  6153. dev->name, (dev->flags & IFF_PROMISC),
  6154. (old_flags & IFF_PROMISC),
  6155. from_kuid(&init_user_ns, audit_get_loginuid(current)),
  6156. from_kuid(&init_user_ns, uid),
  6157. from_kgid(&init_user_ns, gid),
  6158. audit_get_sessionid(current));
  6159. }
  6160. dev_change_rx_flags(dev, IFF_PROMISC);
  6161. }
  6162. if (notify)
  6163. __dev_notify_flags(dev, old_flags, IFF_PROMISC);
  6164. return 0;
  6165. }
  6166. /**
  6167. * dev_set_promiscuity - update promiscuity count on a device
  6168. * @dev: device
  6169. * @inc: modifier
  6170. *
  6171. * Add or remove promiscuity from a device. While the count in the device
  6172. * remains above zero the interface remains promiscuous. Once it hits zero
  6173. * the device reverts back to normal filtering operation. A negative inc
  6174. * value is used to drop promiscuity on the device.
  6175. * Return 0 if successful or a negative errno code on error.
  6176. */
  6177. int dev_set_promiscuity(struct net_device *dev, int inc)
  6178. {
  6179. unsigned int old_flags = dev->flags;
  6180. int err;
  6181. err = __dev_set_promiscuity(dev, inc, true);
  6182. if (err < 0)
  6183. return err;
  6184. if (dev->flags != old_flags)
  6185. dev_set_rx_mode(dev);
  6186. return err;
  6187. }
  6188. EXPORT_SYMBOL(dev_set_promiscuity);
  6189. static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
  6190. {
  6191. unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
  6192. ASSERT_RTNL();
  6193. dev->flags |= IFF_ALLMULTI;
  6194. dev->allmulti += inc;
  6195. if (dev->allmulti == 0) {
  6196. /*
  6197. * Avoid overflow.
  6198. * If inc causes overflow, untouch allmulti and return error.
  6199. */
  6200. if (inc < 0)
  6201. dev->flags &= ~IFF_ALLMULTI;
  6202. else {
  6203. dev->allmulti -= inc;
  6204. pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
  6205. dev->name);
  6206. return -EOVERFLOW;
  6207. }
  6208. }
  6209. if (dev->flags ^ old_flags) {
  6210. dev_change_rx_flags(dev, IFF_ALLMULTI);
  6211. dev_set_rx_mode(dev);
  6212. if (notify)
  6213. __dev_notify_flags(dev, old_flags,
  6214. dev->gflags ^ old_gflags);
  6215. }
  6216. return 0;
  6217. }
  6218. /**
  6219. * dev_set_allmulti - update allmulti count on a device
  6220. * @dev: device
  6221. * @inc: modifier
  6222. *
  6223. * Add or remove reception of all multicast frames to a device. While the
  6224. * count in the device remains above zero the interface remains listening
  6225. * to all interfaces. Once it hits zero the device reverts back to normal
  6226. * filtering operation. A negative @inc value is used to drop the counter
  6227. * when releasing a resource needing all multicasts.
  6228. * Return 0 if successful or a negative errno code on error.
  6229. */
  6230. int dev_set_allmulti(struct net_device *dev, int inc)
  6231. {
  6232. return __dev_set_allmulti(dev, inc, true);
  6233. }
  6234. EXPORT_SYMBOL(dev_set_allmulti);
  6235. /*
  6236. * Upload unicast and multicast address lists to device and
  6237. * configure RX filtering. When the device doesn't support unicast
  6238. * filtering it is put in promiscuous mode while unicast addresses
  6239. * are present.
  6240. */
  6241. void __dev_set_rx_mode(struct net_device *dev)
  6242. {
  6243. const struct net_device_ops *ops = dev->netdev_ops;
  6244. /* dev_open will call this function so the list will stay sane. */
  6245. if (!(dev->flags&IFF_UP))
  6246. return;
  6247. if (!netif_device_present(dev))
  6248. return;
  6249. if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
  6250. /* Unicast addresses changes may only happen under the rtnl,
  6251. * therefore calling __dev_set_promiscuity here is safe.
  6252. */
  6253. if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
  6254. __dev_set_promiscuity(dev, 1, false);
  6255. dev->uc_promisc = true;
  6256. } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
  6257. __dev_set_promiscuity(dev, -1, false);
  6258. dev->uc_promisc = false;
  6259. }
  6260. }
  6261. if (ops->ndo_set_rx_mode)
  6262. ops->ndo_set_rx_mode(dev);
  6263. }
  6264. void dev_set_rx_mode(struct net_device *dev)
  6265. {
  6266. netif_addr_lock_bh(dev);
  6267. __dev_set_rx_mode(dev);
  6268. netif_addr_unlock_bh(dev);
  6269. }
  6270. /**
  6271. * dev_get_flags - get flags reported to userspace
  6272. * @dev: device
  6273. *
  6274. * Get the combination of flag bits exported through APIs to userspace.
  6275. */
  6276. unsigned int dev_get_flags(const struct net_device *dev)
  6277. {
  6278. unsigned int flags;
  6279. flags = (dev->flags & ~(IFF_PROMISC |
  6280. IFF_ALLMULTI |
  6281. IFF_RUNNING |
  6282. IFF_LOWER_UP |
  6283. IFF_DORMANT)) |
  6284. (dev->gflags & (IFF_PROMISC |
  6285. IFF_ALLMULTI));
  6286. if (netif_running(dev)) {
  6287. if (netif_oper_up(dev))
  6288. flags |= IFF_RUNNING;
  6289. if (netif_carrier_ok(dev))
  6290. flags |= IFF_LOWER_UP;
  6291. if (netif_dormant(dev))
  6292. flags |= IFF_DORMANT;
  6293. }
  6294. return flags;
  6295. }
  6296. EXPORT_SYMBOL(dev_get_flags);
  6297. int __dev_change_flags(struct net_device *dev, unsigned int flags)
  6298. {
  6299. unsigned int old_flags = dev->flags;
  6300. int ret;
  6301. ASSERT_RTNL();
  6302. /*
  6303. * Set the flags on our device.
  6304. */
  6305. dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
  6306. IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
  6307. IFF_AUTOMEDIA)) |
  6308. (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
  6309. IFF_ALLMULTI));
  6310. /*
  6311. * Load in the correct multicast list now the flags have changed.
  6312. */
  6313. if ((old_flags ^ flags) & IFF_MULTICAST)
  6314. dev_change_rx_flags(dev, IFF_MULTICAST);
  6315. dev_set_rx_mode(dev);
  6316. /*
  6317. * Have we downed the interface. We handle IFF_UP ourselves
  6318. * according to user attempts to set it, rather than blindly
  6319. * setting it.
  6320. */
  6321. ret = 0;
  6322. if ((old_flags ^ flags) & IFF_UP) {
  6323. if (old_flags & IFF_UP)
  6324. __dev_close(dev);
  6325. else
  6326. ret = __dev_open(dev);
  6327. }
  6328. if ((flags ^ dev->gflags) & IFF_PROMISC) {
  6329. int inc = (flags & IFF_PROMISC) ? 1 : -1;
  6330. unsigned int old_flags = dev->flags;
  6331. dev->gflags ^= IFF_PROMISC;
  6332. if (__dev_set_promiscuity(dev, inc, false) >= 0)
  6333. if (dev->flags != old_flags)
  6334. dev_set_rx_mode(dev);
  6335. }
  6336. /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
  6337. * is important. Some (broken) drivers set IFF_PROMISC, when
  6338. * IFF_ALLMULTI is requested not asking us and not reporting.
  6339. */
  6340. if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
  6341. int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
  6342. dev->gflags ^= IFF_ALLMULTI;
  6343. __dev_set_allmulti(dev, inc, false);
  6344. }
  6345. return ret;
  6346. }
  6347. void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
  6348. unsigned int gchanges)
  6349. {
  6350. unsigned int changes = dev->flags ^ old_flags;
  6351. if (gchanges)
  6352. rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
  6353. if (changes & IFF_UP) {
  6354. if (dev->flags & IFF_UP)
  6355. call_netdevice_notifiers(NETDEV_UP, dev);
  6356. else
  6357. call_netdevice_notifiers(NETDEV_DOWN, dev);
  6358. }
  6359. if (dev->flags & IFF_UP &&
  6360. (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
  6361. struct netdev_notifier_change_info change_info = {
  6362. .info = {
  6363. .dev = dev,
  6364. },
  6365. .flags_changed = changes,
  6366. };
  6367. call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info);
  6368. }
  6369. }
  6370. /**
  6371. * dev_change_flags - change device settings
  6372. * @dev: device
  6373. * @flags: device state flags
  6374. *
  6375. * Change settings on device based state flags. The flags are
  6376. * in the userspace exported format.
  6377. */
  6378. int dev_change_flags(struct net_device *dev, unsigned int flags)
  6379. {
  6380. int ret;
  6381. unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
  6382. ret = __dev_change_flags(dev, flags);
  6383. if (ret < 0)
  6384. return ret;
  6385. changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
  6386. __dev_notify_flags(dev, old_flags, changes);
  6387. return ret;
  6388. }
  6389. EXPORT_SYMBOL(dev_change_flags);
  6390. int __dev_set_mtu(struct net_device *dev, int new_mtu)
  6391. {
  6392. const struct net_device_ops *ops = dev->netdev_ops;
  6393. if (ops->ndo_change_mtu)
  6394. return ops->ndo_change_mtu(dev, new_mtu);
  6395. dev->mtu = new_mtu;
  6396. return 0;
  6397. }
  6398. EXPORT_SYMBOL(__dev_set_mtu);
  6399. /**
  6400. * dev_set_mtu_ext - Change maximum transfer unit
  6401. * @dev: device
  6402. * @new_mtu: new transfer unit
  6403. * @extack: netlink extended ack
  6404. *
  6405. * Change the maximum transfer size of the network device.
  6406. */
  6407. int dev_set_mtu_ext(struct net_device *dev, int new_mtu,
  6408. struct netlink_ext_ack *extack)
  6409. {
  6410. int err, orig_mtu;
  6411. if (new_mtu == dev->mtu)
  6412. return 0;
  6413. /* MTU must be positive, and in range */
  6414. if (new_mtu < 0 || new_mtu < dev->min_mtu) {
  6415. NL_SET_ERR_MSG(extack, "mtu less than device minimum");
  6416. return -EINVAL;
  6417. }
  6418. if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
  6419. NL_SET_ERR_MSG(extack, "mtu greater than device maximum");
  6420. return -EINVAL;
  6421. }
  6422. if (!netif_device_present(dev))
  6423. return -ENODEV;
  6424. err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
  6425. err = notifier_to_errno(err);
  6426. if (err)
  6427. return err;
  6428. orig_mtu = dev->mtu;
  6429. err = __dev_set_mtu(dev, new_mtu);
  6430. if (!err) {
  6431. err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
  6432. orig_mtu);
  6433. err = notifier_to_errno(err);
  6434. if (err) {
  6435. /* setting mtu back and notifying everyone again,
  6436. * so that they have a chance to revert changes.
  6437. */
  6438. __dev_set_mtu(dev, orig_mtu);
  6439. call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
  6440. new_mtu);
  6441. }
  6442. }
  6443. return err;
  6444. }
  6445. int dev_set_mtu(struct net_device *dev, int new_mtu)
  6446. {
  6447. struct netlink_ext_ack extack;
  6448. int err;
  6449. memset(&extack, 0, sizeof(extack));
  6450. err = dev_set_mtu_ext(dev, new_mtu, &extack);
  6451. if (err && extack._msg)
  6452. net_err_ratelimited("%s: %s\n", dev->name, extack._msg);
  6453. return err;
  6454. }
  6455. EXPORT_SYMBOL(dev_set_mtu);
  6456. /**
  6457. * dev_change_tx_queue_len - Change TX queue length of a netdevice
  6458. * @dev: device
  6459. * @new_len: new tx queue length
  6460. */
  6461. int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len)
  6462. {
  6463. unsigned int orig_len = dev->tx_queue_len;
  6464. int res;
  6465. if (new_len != (unsigned int)new_len)
  6466. return -ERANGE;
  6467. if (new_len != orig_len) {
  6468. dev->tx_queue_len = new_len;
  6469. res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev);
  6470. res = notifier_to_errno(res);
  6471. if (res)
  6472. goto err_rollback;
  6473. res = dev_qdisc_change_tx_queue_len(dev);
  6474. if (res)
  6475. goto err_rollback;
  6476. }
  6477. return 0;
  6478. err_rollback:
  6479. netdev_err(dev, "refused to change device tx_queue_len\n");
  6480. dev->tx_queue_len = orig_len;
  6481. return res;
  6482. }
  6483. /**
  6484. * dev_set_group - Change group this device belongs to
  6485. * @dev: device
  6486. * @new_group: group this device should belong to
  6487. */
  6488. void dev_set_group(struct net_device *dev, int new_group)
  6489. {
  6490. dev->group = new_group;
  6491. }
  6492. EXPORT_SYMBOL(dev_set_group);
  6493. /**
  6494. * dev_set_mac_address - Change Media Access Control Address
  6495. * @dev: device
  6496. * @sa: new address
  6497. *
  6498. * Change the hardware (MAC) address of the device
  6499. */
  6500. int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
  6501. {
  6502. const struct net_device_ops *ops = dev->netdev_ops;
  6503. int err;
  6504. if (!ops->ndo_set_mac_address)
  6505. return -EOPNOTSUPP;
  6506. if (sa->sa_family != dev->type)
  6507. return -EINVAL;
  6508. if (!netif_device_present(dev))
  6509. return -ENODEV;
  6510. err = ops->ndo_set_mac_address(dev, sa);
  6511. if (err)
  6512. return err;
  6513. dev->addr_assign_type = NET_ADDR_SET;
  6514. call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
  6515. add_device_randomness(dev->dev_addr, dev->addr_len);
  6516. return 0;
  6517. }
  6518. EXPORT_SYMBOL(dev_set_mac_address);
  6519. /**
  6520. * dev_change_carrier - Change device carrier
  6521. * @dev: device
  6522. * @new_carrier: new value
  6523. *
  6524. * Change device carrier
  6525. */
  6526. int dev_change_carrier(struct net_device *dev, bool new_carrier)
  6527. {
  6528. const struct net_device_ops *ops = dev->netdev_ops;
  6529. if (!ops->ndo_change_carrier)
  6530. return -EOPNOTSUPP;
  6531. if (!netif_device_present(dev))
  6532. return -ENODEV;
  6533. return ops->ndo_change_carrier(dev, new_carrier);
  6534. }
  6535. EXPORT_SYMBOL(dev_change_carrier);
  6536. /**
  6537. * dev_get_phys_port_id - Get device physical port ID
  6538. * @dev: device
  6539. * @ppid: port ID
  6540. *
  6541. * Get device physical port ID
  6542. */
  6543. int dev_get_phys_port_id(struct net_device *dev,
  6544. struct netdev_phys_item_id *ppid)
  6545. {
  6546. const struct net_device_ops *ops = dev->netdev_ops;
  6547. if (!ops->ndo_get_phys_port_id)
  6548. return -EOPNOTSUPP;
  6549. return ops->ndo_get_phys_port_id(dev, ppid);
  6550. }
  6551. EXPORT_SYMBOL(dev_get_phys_port_id);
  6552. /**
  6553. * dev_get_phys_port_name - Get device physical port name
  6554. * @dev: device
  6555. * @name: port name
  6556. * @len: limit of bytes to copy to name
  6557. *
  6558. * Get device physical port name
  6559. */
  6560. int dev_get_phys_port_name(struct net_device *dev,
  6561. char *name, size_t len)
  6562. {
  6563. const struct net_device_ops *ops = dev->netdev_ops;
  6564. if (!ops->ndo_get_phys_port_name)
  6565. return -EOPNOTSUPP;
  6566. return ops->ndo_get_phys_port_name(dev, name, len);
  6567. }
  6568. EXPORT_SYMBOL(dev_get_phys_port_name);
  6569. /**
  6570. * dev_change_proto_down - update protocol port state information
  6571. * @dev: device
  6572. * @proto_down: new value
  6573. *
  6574. * This info can be used by switch drivers to set the phys state of the
  6575. * port.
  6576. */
  6577. int dev_change_proto_down(struct net_device *dev, bool proto_down)
  6578. {
  6579. const struct net_device_ops *ops = dev->netdev_ops;
  6580. if (!ops->ndo_change_proto_down)
  6581. return -EOPNOTSUPP;
  6582. if (!netif_device_present(dev))
  6583. return -ENODEV;
  6584. return ops->ndo_change_proto_down(dev, proto_down);
  6585. }
  6586. EXPORT_SYMBOL(dev_change_proto_down);
  6587. u32 __dev_xdp_query(struct net_device *dev, bpf_op_t bpf_op,
  6588. enum bpf_netdev_command cmd)
  6589. {
  6590. struct netdev_bpf xdp;
  6591. if (!bpf_op)
  6592. return 0;
  6593. memset(&xdp, 0, sizeof(xdp));
  6594. xdp.command = cmd;
  6595. /* Query must always succeed. */
  6596. WARN_ON(bpf_op(dev, &xdp) < 0 && cmd == XDP_QUERY_PROG);
  6597. return xdp.prog_id;
  6598. }
  6599. static int dev_xdp_install(struct net_device *dev, bpf_op_t bpf_op,
  6600. struct netlink_ext_ack *extack, u32 flags,
  6601. struct bpf_prog *prog)
  6602. {
  6603. struct netdev_bpf xdp;
  6604. memset(&xdp, 0, sizeof(xdp));
  6605. if (flags & XDP_FLAGS_HW_MODE)
  6606. xdp.command = XDP_SETUP_PROG_HW;
  6607. else
  6608. xdp.command = XDP_SETUP_PROG;
  6609. xdp.extack = extack;
  6610. xdp.flags = flags;
  6611. xdp.prog = prog;
  6612. return bpf_op(dev, &xdp);
  6613. }
  6614. static void dev_xdp_uninstall(struct net_device *dev)
  6615. {
  6616. struct netdev_bpf xdp;
  6617. bpf_op_t ndo_bpf;
  6618. /* Remove generic XDP */
  6619. WARN_ON(dev_xdp_install(dev, generic_xdp_install, NULL, 0, NULL));
  6620. /* Remove from the driver */
  6621. ndo_bpf = dev->netdev_ops->ndo_bpf;
  6622. if (!ndo_bpf)
  6623. return;
  6624. memset(&xdp, 0, sizeof(xdp));
  6625. xdp.command = XDP_QUERY_PROG;
  6626. WARN_ON(ndo_bpf(dev, &xdp));
  6627. if (xdp.prog_id)
  6628. WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags,
  6629. NULL));
  6630. /* Remove HW offload */
  6631. memset(&xdp, 0, sizeof(xdp));
  6632. xdp.command = XDP_QUERY_PROG_HW;
  6633. if (!ndo_bpf(dev, &xdp) && xdp.prog_id)
  6634. WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags,
  6635. NULL));
  6636. }
  6637. /**
  6638. * dev_change_xdp_fd - set or clear a bpf program for a device rx path
  6639. * @dev: device
  6640. * @extack: netlink extended ack
  6641. * @fd: new program fd or negative value to clear
  6642. * @flags: xdp-related flags
  6643. *
  6644. * Set or clear a bpf program for a device
  6645. */
  6646. int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
  6647. int fd, u32 flags)
  6648. {
  6649. const struct net_device_ops *ops = dev->netdev_ops;
  6650. enum bpf_netdev_command query;
  6651. struct bpf_prog *prog = NULL;
  6652. bpf_op_t bpf_op, bpf_chk;
  6653. int err;
  6654. ASSERT_RTNL();
  6655. query = flags & XDP_FLAGS_HW_MODE ? XDP_QUERY_PROG_HW : XDP_QUERY_PROG;
  6656. bpf_op = bpf_chk = ops->ndo_bpf;
  6657. if (!bpf_op && (flags & (XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE)))
  6658. return -EOPNOTSUPP;
  6659. if (!bpf_op || (flags & XDP_FLAGS_SKB_MODE))
  6660. bpf_op = generic_xdp_install;
  6661. if (bpf_op == bpf_chk)
  6662. bpf_chk = generic_xdp_install;
  6663. if (fd >= 0) {
  6664. if (__dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG) ||
  6665. __dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG_HW))
  6666. return -EEXIST;
  6667. if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) &&
  6668. __dev_xdp_query(dev, bpf_op, query))
  6669. return -EBUSY;
  6670. prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
  6671. bpf_op == ops->ndo_bpf);
  6672. if (IS_ERR(prog))
  6673. return PTR_ERR(prog);
  6674. if (!(flags & XDP_FLAGS_HW_MODE) &&
  6675. bpf_prog_is_dev_bound(prog->aux)) {
  6676. NL_SET_ERR_MSG(extack, "using device-bound program without HW_MODE flag is not supported");
  6677. bpf_prog_put(prog);
  6678. return -EINVAL;
  6679. }
  6680. }
  6681. err = dev_xdp_install(dev, bpf_op, extack, flags, prog);
  6682. if (err < 0 && prog)
  6683. bpf_prog_put(prog);
  6684. return err;
  6685. }
  6686. /**
  6687. * dev_new_index - allocate an ifindex
  6688. * @net: the applicable net namespace
  6689. *
  6690. * Returns a suitable unique value for a new device interface
  6691. * number. The caller must hold the rtnl semaphore or the
  6692. * dev_base_lock to be sure it remains unique.
  6693. */
  6694. static int dev_new_index(struct net *net)
  6695. {
  6696. int ifindex = net->ifindex;
  6697. for (;;) {
  6698. if (++ifindex <= 0)
  6699. ifindex = 1;
  6700. if (!__dev_get_by_index(net, ifindex))
  6701. return net->ifindex = ifindex;
  6702. }
  6703. }
  6704. /* Delayed registration/unregisteration */
  6705. static LIST_HEAD(net_todo_list);
  6706. DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
  6707. static void net_set_todo(struct net_device *dev)
  6708. {
  6709. list_add_tail(&dev->todo_list, &net_todo_list);
  6710. dev_net(dev)->dev_unreg_count++;
  6711. }
  6712. static void rollback_registered_many(struct list_head *head)
  6713. {
  6714. struct net_device *dev, *tmp;
  6715. LIST_HEAD(close_head);
  6716. BUG_ON(dev_boot_phase);
  6717. ASSERT_RTNL();
  6718. list_for_each_entry_safe(dev, tmp, head, unreg_list) {
  6719. /* Some devices call without registering
  6720. * for initialization unwind. Remove those
  6721. * devices and proceed with the remaining.
  6722. */
  6723. if (dev->reg_state == NETREG_UNINITIALIZED) {
  6724. pr_debug("unregister_netdevice: device %s/%p never was registered\n",
  6725. dev->name, dev);
  6726. WARN_ON(1);
  6727. list_del(&dev->unreg_list);
  6728. continue;
  6729. }
  6730. dev->dismantle = true;
  6731. BUG_ON(dev->reg_state != NETREG_REGISTERED);
  6732. }
  6733. /* If device is running, close it first. */
  6734. list_for_each_entry(dev, head, unreg_list)
  6735. list_add_tail(&dev->close_list, &close_head);
  6736. dev_close_many(&close_head, true);
  6737. list_for_each_entry(dev, head, unreg_list) {
  6738. /* And unlink it from device chain. */
  6739. unlist_netdevice(dev);
  6740. dev->reg_state = NETREG_UNREGISTERING;
  6741. }
  6742. flush_all_backlogs();
  6743. synchronize_net();
  6744. list_for_each_entry(dev, head, unreg_list) {
  6745. struct sk_buff *skb = NULL;
  6746. /* Shutdown queueing discipline. */
  6747. dev_shutdown(dev);
  6748. dev_xdp_uninstall(dev);
  6749. /* Notify protocols, that we are about to destroy
  6750. * this device. They should clean all the things.
  6751. */
  6752. call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
  6753. if (!dev->rtnl_link_ops ||
  6754. dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
  6755. skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
  6756. GFP_KERNEL, NULL, 0);
  6757. /*
  6758. * Flush the unicast and multicast chains
  6759. */
  6760. dev_uc_flush(dev);
  6761. dev_mc_flush(dev);
  6762. if (dev->netdev_ops->ndo_uninit)
  6763. dev->netdev_ops->ndo_uninit(dev);
  6764. if (skb)
  6765. rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
  6766. /* Notifier chain MUST detach us all upper devices. */
  6767. WARN_ON(netdev_has_any_upper_dev(dev));
  6768. WARN_ON(netdev_has_any_lower_dev(dev));
  6769. /* Remove entries from kobject tree */
  6770. netdev_unregister_kobject(dev);
  6771. #ifdef CONFIG_XPS
  6772. /* Remove XPS queueing entries */
  6773. netif_reset_xps_queues_gt(dev, 0);
  6774. #endif
  6775. }
  6776. synchronize_net();
  6777. list_for_each_entry(dev, head, unreg_list)
  6778. dev_put(dev);
  6779. }
  6780. static void rollback_registered(struct net_device *dev)
  6781. {
  6782. LIST_HEAD(single);
  6783. list_add(&dev->unreg_list, &single);
  6784. rollback_registered_many(&single);
  6785. list_del(&single);
  6786. }
  6787. static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
  6788. struct net_device *upper, netdev_features_t features)
  6789. {
  6790. netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
  6791. netdev_features_t feature;
  6792. int feature_bit;
  6793. for_each_netdev_feature(upper_disables, feature_bit) {
  6794. feature = __NETIF_F_BIT(feature_bit);
  6795. if (!(upper->wanted_features & feature)
  6796. && (features & feature)) {
  6797. netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
  6798. &feature, upper->name);
  6799. features &= ~feature;
  6800. }
  6801. }
  6802. return features;
  6803. }
  6804. static void netdev_sync_lower_features(struct net_device *upper,
  6805. struct net_device *lower, netdev_features_t features)
  6806. {
  6807. netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
  6808. netdev_features_t feature;
  6809. int feature_bit;
  6810. for_each_netdev_feature(upper_disables, feature_bit) {
  6811. feature = __NETIF_F_BIT(feature_bit);
  6812. if (!(features & feature) && (lower->features & feature)) {
  6813. netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
  6814. &feature, lower->name);
  6815. lower->wanted_features &= ~feature;
  6816. netdev_update_features(lower);
  6817. if (unlikely(lower->features & feature))
  6818. netdev_WARN(upper, "failed to disable %pNF on %s!\n",
  6819. &feature, lower->name);
  6820. }
  6821. }
  6822. }
  6823. static netdev_features_t netdev_fix_features(struct net_device *dev,
  6824. netdev_features_t features)
  6825. {
  6826. /* Fix illegal checksum combinations */
  6827. if ((features & NETIF_F_HW_CSUM) &&
  6828. (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
  6829. netdev_warn(dev, "mixed HW and IP checksum settings.\n");
  6830. features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
  6831. }
  6832. /* TSO requires that SG is present as well. */
  6833. if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
  6834. netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
  6835. features &= ~NETIF_F_ALL_TSO;
  6836. }
  6837. if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
  6838. !(features & NETIF_F_IP_CSUM)) {
  6839. netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
  6840. features &= ~NETIF_F_TSO;
  6841. features &= ~NETIF_F_TSO_ECN;
  6842. }
  6843. if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
  6844. !(features & NETIF_F_IPV6_CSUM)) {
  6845. netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
  6846. features &= ~NETIF_F_TSO6;
  6847. }
  6848. /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
  6849. if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO))
  6850. features &= ~NETIF_F_TSO_MANGLEID;
  6851. /* TSO ECN requires that TSO is present as well. */
  6852. if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
  6853. features &= ~NETIF_F_TSO_ECN;
  6854. /* Software GSO depends on SG. */
  6855. if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
  6856. netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
  6857. features &= ~NETIF_F_GSO;
  6858. }
  6859. /* GSO partial features require GSO partial be set */
  6860. if ((features & dev->gso_partial_features) &&
  6861. !(features & NETIF_F_GSO_PARTIAL)) {
  6862. netdev_dbg(dev,
  6863. "Dropping partially supported GSO features since no GSO partial.\n");
  6864. features &= ~dev->gso_partial_features;
  6865. }
  6866. if (!(features & NETIF_F_RXCSUM)) {
  6867. /* NETIF_F_GRO_HW implies doing RXCSUM since every packet
  6868. * successfully merged by hardware must also have the
  6869. * checksum verified by hardware. If the user does not
  6870. * want to enable RXCSUM, logically, we should disable GRO_HW.
  6871. */
  6872. if (features & NETIF_F_GRO_HW) {
  6873. netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n");
  6874. features &= ~NETIF_F_GRO_HW;
  6875. }
  6876. }
  6877. /* LRO/HW-GRO features cannot be combined with RX-FCS */
  6878. if (features & NETIF_F_RXFCS) {
  6879. if (features & NETIF_F_LRO) {
  6880. netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n");
  6881. features &= ~NETIF_F_LRO;
  6882. }
  6883. if (features & NETIF_F_GRO_HW) {
  6884. netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n");
  6885. features &= ~NETIF_F_GRO_HW;
  6886. }
  6887. }
  6888. return features;
  6889. }
  6890. int __netdev_update_features(struct net_device *dev)
  6891. {
  6892. struct net_device *upper, *lower;
  6893. netdev_features_t features;
  6894. struct list_head *iter;
  6895. int err = -1;
  6896. ASSERT_RTNL();
  6897. features = netdev_get_wanted_features(dev);
  6898. if (dev->netdev_ops->ndo_fix_features)
  6899. features = dev->netdev_ops->ndo_fix_features(dev, features);
  6900. /* driver might be less strict about feature dependencies */
  6901. features = netdev_fix_features(dev, features);
  6902. /* some features can't be enabled if they're off an an upper device */
  6903. netdev_for_each_upper_dev_rcu(dev, upper, iter)
  6904. features = netdev_sync_upper_features(dev, upper, features);
  6905. if (dev->features == features)
  6906. goto sync_lower;
  6907. netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
  6908. &dev->features, &features);
  6909. if (dev->netdev_ops->ndo_set_features)
  6910. err = dev->netdev_ops->ndo_set_features(dev, features);
  6911. else
  6912. err = 0;
  6913. if (unlikely(err < 0)) {
  6914. netdev_err(dev,
  6915. "set_features() failed (%d); wanted %pNF, left %pNF\n",
  6916. err, &features, &dev->features);
  6917. /* return non-0 since some features might have changed and
  6918. * it's better to fire a spurious notification than miss it
  6919. */
  6920. return -1;
  6921. }
  6922. sync_lower:
  6923. /* some features must be disabled on lower devices when disabled
  6924. * on an upper device (think: bonding master or bridge)
  6925. */
  6926. netdev_for_each_lower_dev(dev, lower, iter)
  6927. netdev_sync_lower_features(dev, lower, features);
  6928. if (!err) {
  6929. netdev_features_t diff = features ^ dev->features;
  6930. if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) {
  6931. /* udp_tunnel_{get,drop}_rx_info both need
  6932. * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the
  6933. * device, or they won't do anything.
  6934. * Thus we need to update dev->features
  6935. * *before* calling udp_tunnel_get_rx_info,
  6936. * but *after* calling udp_tunnel_drop_rx_info.
  6937. */
  6938. if (features & NETIF_F_RX_UDP_TUNNEL_PORT) {
  6939. dev->features = features;
  6940. udp_tunnel_get_rx_info(dev);
  6941. } else {
  6942. udp_tunnel_drop_rx_info(dev);
  6943. }
  6944. }
  6945. if (diff & NETIF_F_HW_VLAN_CTAG_FILTER) {
  6946. if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
  6947. dev->features = features;
  6948. err |= vlan_get_rx_ctag_filter_info(dev);
  6949. } else {
  6950. vlan_drop_rx_ctag_filter_info(dev);
  6951. }
  6952. }
  6953. if (diff & NETIF_F_HW_VLAN_STAG_FILTER) {
  6954. if (features & NETIF_F_HW_VLAN_STAG_FILTER) {
  6955. dev->features = features;
  6956. err |= vlan_get_rx_stag_filter_info(dev);
  6957. } else {
  6958. vlan_drop_rx_stag_filter_info(dev);
  6959. }
  6960. }
  6961. dev->features = features;
  6962. }
  6963. return err < 0 ? 0 : 1;
  6964. }
  6965. /**
  6966. * netdev_update_features - recalculate device features
  6967. * @dev: the device to check
  6968. *
  6969. * Recalculate dev->features set and send notifications if it
  6970. * has changed. Should be called after driver or hardware dependent
  6971. * conditions might have changed that influence the features.
  6972. */
  6973. void netdev_update_features(struct net_device *dev)
  6974. {
  6975. if (__netdev_update_features(dev))
  6976. netdev_features_change(dev);
  6977. }
  6978. EXPORT_SYMBOL(netdev_update_features);
  6979. /**
  6980. * netdev_change_features - recalculate device features
  6981. * @dev: the device to check
  6982. *
  6983. * Recalculate dev->features set and send notifications even
  6984. * if they have not changed. Should be called instead of
  6985. * netdev_update_features() if also dev->vlan_features might
  6986. * have changed to allow the changes to be propagated to stacked
  6987. * VLAN devices.
  6988. */
  6989. void netdev_change_features(struct net_device *dev)
  6990. {
  6991. __netdev_update_features(dev);
  6992. netdev_features_change(dev);
  6993. }
  6994. EXPORT_SYMBOL(netdev_change_features);
  6995. /**
  6996. * netif_stacked_transfer_operstate - transfer operstate
  6997. * @rootdev: the root or lower level device to transfer state from
  6998. * @dev: the device to transfer operstate to
  6999. *
  7000. * Transfer operational state from root to device. This is normally
  7001. * called when a stacking relationship exists between the root
  7002. * device and the device(a leaf device).
  7003. */
  7004. void netif_stacked_transfer_operstate(const struct net_device *rootdev,
  7005. struct net_device *dev)
  7006. {
  7007. if (rootdev->operstate == IF_OPER_DORMANT)
  7008. netif_dormant_on(dev);
  7009. else
  7010. netif_dormant_off(dev);
  7011. if (netif_carrier_ok(rootdev))
  7012. netif_carrier_on(dev);
  7013. else
  7014. netif_carrier_off(dev);
  7015. }
  7016. EXPORT_SYMBOL(netif_stacked_transfer_operstate);
  7017. static int netif_alloc_rx_queues(struct net_device *dev)
  7018. {
  7019. unsigned int i, count = dev->num_rx_queues;
  7020. struct netdev_rx_queue *rx;
  7021. size_t sz = count * sizeof(*rx);
  7022. int err = 0;
  7023. BUG_ON(count < 1);
  7024. rx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
  7025. if (!rx)
  7026. return -ENOMEM;
  7027. dev->_rx = rx;
  7028. for (i = 0; i < count; i++) {
  7029. rx[i].dev = dev;
  7030. /* XDP RX-queue setup */
  7031. err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i);
  7032. if (err < 0)
  7033. goto err_rxq_info;
  7034. }
  7035. return 0;
  7036. err_rxq_info:
  7037. /* Rollback successful reg's and free other resources */
  7038. while (i--)
  7039. xdp_rxq_info_unreg(&rx[i].xdp_rxq);
  7040. kvfree(dev->_rx);
  7041. dev->_rx = NULL;
  7042. return err;
  7043. }
  7044. static void netif_free_rx_queues(struct net_device *dev)
  7045. {
  7046. unsigned int i, count = dev->num_rx_queues;
  7047. /* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */
  7048. if (!dev->_rx)
  7049. return;
  7050. for (i = 0; i < count; i++)
  7051. xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq);
  7052. kvfree(dev->_rx);
  7053. }
  7054. static void netdev_init_one_queue(struct net_device *dev,
  7055. struct netdev_queue *queue, void *_unused)
  7056. {
  7057. /* Initialize queue lock */
  7058. spin_lock_init(&queue->_xmit_lock);
  7059. netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
  7060. queue->xmit_lock_owner = -1;
  7061. netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
  7062. queue->dev = dev;
  7063. #ifdef CONFIG_BQL
  7064. dql_init(&queue->dql, HZ);
  7065. #endif
  7066. }
  7067. static void netif_free_tx_queues(struct net_device *dev)
  7068. {
  7069. kvfree(dev->_tx);
  7070. }
  7071. static int netif_alloc_netdev_queues(struct net_device *dev)
  7072. {
  7073. unsigned int count = dev->num_tx_queues;
  7074. struct netdev_queue *tx;
  7075. size_t sz = count * sizeof(*tx);
  7076. if (count < 1 || count > 0xffff)
  7077. return -EINVAL;
  7078. tx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
  7079. if (!tx)
  7080. return -ENOMEM;
  7081. dev->_tx = tx;
  7082. netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
  7083. spin_lock_init(&dev->tx_global_lock);
  7084. return 0;
  7085. }
  7086. void netif_tx_stop_all_queues(struct net_device *dev)
  7087. {
  7088. unsigned int i;
  7089. for (i = 0; i < dev->num_tx_queues; i++) {
  7090. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  7091. netif_tx_stop_queue(txq);
  7092. }
  7093. }
  7094. EXPORT_SYMBOL(netif_tx_stop_all_queues);
  7095. /**
  7096. * register_netdevice - register a network device
  7097. * @dev: device to register
  7098. *
  7099. * Take a completed network device structure and add it to the kernel
  7100. * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
  7101. * chain. 0 is returned on success. A negative errno code is returned
  7102. * on a failure to set up the device, or if the name is a duplicate.
  7103. *
  7104. * Callers must hold the rtnl semaphore. You may want
  7105. * register_netdev() instead of this.
  7106. *
  7107. * BUGS:
  7108. * The locking appears insufficient to guarantee two parallel registers
  7109. * will not get the same name.
  7110. */
  7111. int register_netdevice(struct net_device *dev)
  7112. {
  7113. int ret;
  7114. struct net *net = dev_net(dev);
  7115. BUILD_BUG_ON(sizeof(netdev_features_t) * BITS_PER_BYTE <
  7116. NETDEV_FEATURE_COUNT);
  7117. BUG_ON(dev_boot_phase);
  7118. ASSERT_RTNL();
  7119. might_sleep();
  7120. /* When net_device's are persistent, this will be fatal. */
  7121. BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
  7122. BUG_ON(!net);
  7123. spin_lock_init(&dev->addr_list_lock);
  7124. netdev_set_addr_lockdep_class(dev);
  7125. ret = dev_get_valid_name(net, dev, dev->name);
  7126. if (ret < 0)
  7127. goto out;
  7128. /* Init, if this function is available */
  7129. if (dev->netdev_ops->ndo_init) {
  7130. ret = dev->netdev_ops->ndo_init(dev);
  7131. if (ret) {
  7132. if (ret > 0)
  7133. ret = -EIO;
  7134. goto out;
  7135. }
  7136. }
  7137. if (((dev->hw_features | dev->features) &
  7138. NETIF_F_HW_VLAN_CTAG_FILTER) &&
  7139. (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
  7140. !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
  7141. netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
  7142. ret = -EINVAL;
  7143. goto err_uninit;
  7144. }
  7145. ret = -EBUSY;
  7146. if (!dev->ifindex)
  7147. dev->ifindex = dev_new_index(net);
  7148. else if (__dev_get_by_index(net, dev->ifindex))
  7149. goto err_uninit;
  7150. /* Transfer changeable features to wanted_features and enable
  7151. * software offloads (GSO and GRO).
  7152. */
  7153. dev->hw_features |= NETIF_F_SOFT_FEATURES;
  7154. dev->features |= NETIF_F_SOFT_FEATURES;
  7155. if (dev->netdev_ops->ndo_udp_tunnel_add) {
  7156. dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT;
  7157. dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT;
  7158. }
  7159. dev->wanted_features = dev->features & dev->hw_features;
  7160. if (!(dev->flags & IFF_LOOPBACK))
  7161. dev->hw_features |= NETIF_F_NOCACHE_COPY;
  7162. /* If IPv4 TCP segmentation offload is supported we should also
  7163. * allow the device to enable segmenting the frame with the option
  7164. * of ignoring a static IP ID value. This doesn't enable the
  7165. * feature itself but allows the user to enable it later.
  7166. */
  7167. if (dev->hw_features & NETIF_F_TSO)
  7168. dev->hw_features |= NETIF_F_TSO_MANGLEID;
  7169. if (dev->vlan_features & NETIF_F_TSO)
  7170. dev->vlan_features |= NETIF_F_TSO_MANGLEID;
  7171. if (dev->mpls_features & NETIF_F_TSO)
  7172. dev->mpls_features |= NETIF_F_TSO_MANGLEID;
  7173. if (dev->hw_enc_features & NETIF_F_TSO)
  7174. dev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
  7175. /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
  7176. */
  7177. dev->vlan_features |= NETIF_F_HIGHDMA;
  7178. /* Make NETIF_F_SG inheritable to tunnel devices.
  7179. */
  7180. dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL;
  7181. /* Make NETIF_F_SG inheritable to MPLS.
  7182. */
  7183. dev->mpls_features |= NETIF_F_SG;
  7184. ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
  7185. ret = notifier_to_errno(ret);
  7186. if (ret)
  7187. goto err_uninit;
  7188. ret = netdev_register_kobject(dev);
  7189. if (ret)
  7190. goto err_uninit;
  7191. dev->reg_state = NETREG_REGISTERED;
  7192. __netdev_update_features(dev);
  7193. /*
  7194. * Default initial state at registry is that the
  7195. * device is present.
  7196. */
  7197. set_bit(__LINK_STATE_PRESENT, &dev->state);
  7198. linkwatch_init_dev(dev);
  7199. dev_init_scheduler(dev);
  7200. dev_hold(dev);
  7201. list_netdevice(dev);
  7202. add_device_randomness(dev->dev_addr, dev->addr_len);
  7203. /* If the device has permanent device address, driver should
  7204. * set dev_addr and also addr_assign_type should be set to
  7205. * NET_ADDR_PERM (default value).
  7206. */
  7207. if (dev->addr_assign_type == NET_ADDR_PERM)
  7208. memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
  7209. /* Notify protocols, that a new device appeared. */
  7210. ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
  7211. ret = notifier_to_errno(ret);
  7212. if (ret) {
  7213. rollback_registered(dev);
  7214. dev->reg_state = NETREG_UNREGISTERED;
  7215. }
  7216. /*
  7217. * Prevent userspace races by waiting until the network
  7218. * device is fully setup before sending notifications.
  7219. */
  7220. if (!dev->rtnl_link_ops ||
  7221. dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
  7222. rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
  7223. out:
  7224. return ret;
  7225. err_uninit:
  7226. if (dev->netdev_ops->ndo_uninit)
  7227. dev->netdev_ops->ndo_uninit(dev);
  7228. if (dev->priv_destructor)
  7229. dev->priv_destructor(dev);
  7230. goto out;
  7231. }
  7232. EXPORT_SYMBOL(register_netdevice);
  7233. /**
  7234. * init_dummy_netdev - init a dummy network device for NAPI
  7235. * @dev: device to init
  7236. *
  7237. * This takes a network device structure and initialize the minimum
  7238. * amount of fields so it can be used to schedule NAPI polls without
  7239. * registering a full blown interface. This is to be used by drivers
  7240. * that need to tie several hardware interfaces to a single NAPI
  7241. * poll scheduler due to HW limitations.
  7242. */
  7243. int init_dummy_netdev(struct net_device *dev)
  7244. {
  7245. /* Clear everything. Note we don't initialize spinlocks
  7246. * are they aren't supposed to be taken by any of the
  7247. * NAPI code and this dummy netdev is supposed to be
  7248. * only ever used for NAPI polls
  7249. */
  7250. memset(dev, 0, sizeof(struct net_device));
  7251. /* make sure we BUG if trying to hit standard
  7252. * register/unregister code path
  7253. */
  7254. dev->reg_state = NETREG_DUMMY;
  7255. /* NAPI wants this */
  7256. INIT_LIST_HEAD(&dev->napi_list);
  7257. /* a dummy interface is started by default */
  7258. set_bit(__LINK_STATE_PRESENT, &dev->state);
  7259. set_bit(__LINK_STATE_START, &dev->state);
  7260. /* napi_busy_loop stats accounting wants this */
  7261. dev_net_set(dev, &init_net);
  7262. /* Note : We dont allocate pcpu_refcnt for dummy devices,
  7263. * because users of this 'device' dont need to change
  7264. * its refcount.
  7265. */
  7266. return 0;
  7267. }
  7268. EXPORT_SYMBOL_GPL(init_dummy_netdev);
  7269. /**
  7270. * register_netdev - register a network device
  7271. * @dev: device to register
  7272. *
  7273. * Take a completed network device structure and add it to the kernel
  7274. * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
  7275. * chain. 0 is returned on success. A negative errno code is returned
  7276. * on a failure to set up the device, or if the name is a duplicate.
  7277. *
  7278. * This is a wrapper around register_netdevice that takes the rtnl semaphore
  7279. * and expands the device name if you passed a format string to
  7280. * alloc_netdev.
  7281. */
  7282. int register_netdev(struct net_device *dev)
  7283. {
  7284. int err;
  7285. if (rtnl_lock_killable())
  7286. return -EINTR;
  7287. err = register_netdevice(dev);
  7288. rtnl_unlock();
  7289. return err;
  7290. }
  7291. EXPORT_SYMBOL(register_netdev);
  7292. int netdev_refcnt_read(const struct net_device *dev)
  7293. {
  7294. int i, refcnt = 0;
  7295. for_each_possible_cpu(i)
  7296. refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
  7297. return refcnt;
  7298. }
  7299. EXPORT_SYMBOL(netdev_refcnt_read);
  7300. /**
  7301. * netdev_wait_allrefs - wait until all references are gone.
  7302. * @dev: target net_device
  7303. *
  7304. * This is called when unregistering network devices.
  7305. *
  7306. * Any protocol or device that holds a reference should register
  7307. * for netdevice notification, and cleanup and put back the
  7308. * reference if they receive an UNREGISTER event.
  7309. * We can get stuck here if buggy protocols don't correctly
  7310. * call dev_put.
  7311. */
  7312. static void netdev_wait_allrefs(struct net_device *dev)
  7313. {
  7314. unsigned long rebroadcast_time, warning_time;
  7315. int refcnt;
  7316. linkwatch_forget_dev(dev);
  7317. rebroadcast_time = warning_time = jiffies;
  7318. refcnt = netdev_refcnt_read(dev);
  7319. while (refcnt != 0) {
  7320. if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
  7321. rtnl_lock();
  7322. /* Rebroadcast unregister notification */
  7323. call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
  7324. __rtnl_unlock();
  7325. rcu_barrier();
  7326. rtnl_lock();
  7327. if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
  7328. &dev->state)) {
  7329. /* We must not have linkwatch events
  7330. * pending on unregister. If this
  7331. * happens, we simply run the queue
  7332. * unscheduled, resulting in a noop
  7333. * for this device.
  7334. */
  7335. linkwatch_run_queue();
  7336. }
  7337. __rtnl_unlock();
  7338. rebroadcast_time = jiffies;
  7339. }
  7340. msleep(250);
  7341. refcnt = netdev_refcnt_read(dev);
  7342. if (time_after(jiffies, warning_time + 10 * HZ)) {
  7343. pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
  7344. dev->name, refcnt);
  7345. warning_time = jiffies;
  7346. }
  7347. }
  7348. }
  7349. /* The sequence is:
  7350. *
  7351. * rtnl_lock();
  7352. * ...
  7353. * register_netdevice(x1);
  7354. * register_netdevice(x2);
  7355. * ...
  7356. * unregister_netdevice(y1);
  7357. * unregister_netdevice(y2);
  7358. * ...
  7359. * rtnl_unlock();
  7360. * free_netdev(y1);
  7361. * free_netdev(y2);
  7362. *
  7363. * We are invoked by rtnl_unlock().
  7364. * This allows us to deal with problems:
  7365. * 1) We can delete sysfs objects which invoke hotplug
  7366. * without deadlocking with linkwatch via keventd.
  7367. * 2) Since we run with the RTNL semaphore not held, we can sleep
  7368. * safely in order to wait for the netdev refcnt to drop to zero.
  7369. *
  7370. * We must not return until all unregister events added during
  7371. * the interval the lock was held have been completed.
  7372. */
  7373. void netdev_run_todo(void)
  7374. {
  7375. struct list_head list;
  7376. /* Snapshot list, allow later requests */
  7377. list_replace_init(&net_todo_list, &list);
  7378. __rtnl_unlock();
  7379. /* Wait for rcu callbacks to finish before next phase */
  7380. if (!list_empty(&list))
  7381. rcu_barrier();
  7382. while (!list_empty(&list)) {
  7383. struct net_device *dev
  7384. = list_first_entry(&list, struct net_device, todo_list);
  7385. list_del(&dev->todo_list);
  7386. if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
  7387. pr_err("network todo '%s' but state %d\n",
  7388. dev->name, dev->reg_state);
  7389. dump_stack();
  7390. continue;
  7391. }
  7392. dev->reg_state = NETREG_UNREGISTERED;
  7393. netdev_wait_allrefs(dev);
  7394. /* paranoia */
  7395. BUG_ON(netdev_refcnt_read(dev));
  7396. BUG_ON(!list_empty(&dev->ptype_all));
  7397. BUG_ON(!list_empty(&dev->ptype_specific));
  7398. WARN_ON(rcu_access_pointer(dev->ip_ptr));
  7399. WARN_ON(rcu_access_pointer(dev->ip6_ptr));
  7400. #if IS_ENABLED(CONFIG_DECNET)
  7401. WARN_ON(dev->dn_ptr);
  7402. #endif
  7403. if (dev->priv_destructor)
  7404. dev->priv_destructor(dev);
  7405. if (dev->needs_free_netdev)
  7406. free_netdev(dev);
  7407. /* Report a network device has been unregistered */
  7408. rtnl_lock();
  7409. dev_net(dev)->dev_unreg_count--;
  7410. __rtnl_unlock();
  7411. wake_up(&netdev_unregistering_wq);
  7412. /* Free network device */
  7413. kobject_put(&dev->dev.kobj);
  7414. }
  7415. }
  7416. /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
  7417. * all the same fields in the same order as net_device_stats, with only
  7418. * the type differing, but rtnl_link_stats64 may have additional fields
  7419. * at the end for newer counters.
  7420. */
  7421. void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
  7422. const struct net_device_stats *netdev_stats)
  7423. {
  7424. #if BITS_PER_LONG == 64
  7425. BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
  7426. memcpy(stats64, netdev_stats, sizeof(*netdev_stats));
  7427. /* zero out counters that only exist in rtnl_link_stats64 */
  7428. memset((char *)stats64 + sizeof(*netdev_stats), 0,
  7429. sizeof(*stats64) - sizeof(*netdev_stats));
  7430. #else
  7431. size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long);
  7432. const unsigned long *src = (const unsigned long *)netdev_stats;
  7433. u64 *dst = (u64 *)stats64;
  7434. BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
  7435. for (i = 0; i < n; i++)
  7436. dst[i] = src[i];
  7437. /* zero out counters that only exist in rtnl_link_stats64 */
  7438. memset((char *)stats64 + n * sizeof(u64), 0,
  7439. sizeof(*stats64) - n * sizeof(u64));
  7440. #endif
  7441. }
  7442. EXPORT_SYMBOL(netdev_stats_to_stats64);
  7443. /**
  7444. * dev_get_stats - get network device statistics
  7445. * @dev: device to get statistics from
  7446. * @storage: place to store stats
  7447. *
  7448. * Get network statistics from device. Return @storage.
  7449. * The device driver may provide its own method by setting
  7450. * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
  7451. * otherwise the internal statistics structure is used.
  7452. */
  7453. struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
  7454. struct rtnl_link_stats64 *storage)
  7455. {
  7456. const struct net_device_ops *ops = dev->netdev_ops;
  7457. if (ops->ndo_get_stats64) {
  7458. memset(storage, 0, sizeof(*storage));
  7459. ops->ndo_get_stats64(dev, storage);
  7460. } else if (ops->ndo_get_stats) {
  7461. netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
  7462. } else {
  7463. netdev_stats_to_stats64(storage, &dev->stats);
  7464. }
  7465. storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped);
  7466. storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped);
  7467. storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler);
  7468. return storage;
  7469. }
  7470. EXPORT_SYMBOL(dev_get_stats);
  7471. struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
  7472. {
  7473. struct netdev_queue *queue = dev_ingress_queue(dev);
  7474. #ifdef CONFIG_NET_CLS_ACT
  7475. if (queue)
  7476. return queue;
  7477. queue = kzalloc(sizeof(*queue), GFP_KERNEL);
  7478. if (!queue)
  7479. return NULL;
  7480. netdev_init_one_queue(dev, queue, NULL);
  7481. RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
  7482. queue->qdisc_sleeping = &noop_qdisc;
  7483. rcu_assign_pointer(dev->ingress_queue, queue);
  7484. #endif
  7485. return queue;
  7486. }
  7487. static const struct ethtool_ops default_ethtool_ops;
  7488. void netdev_set_default_ethtool_ops(struct net_device *dev,
  7489. const struct ethtool_ops *ops)
  7490. {
  7491. if (dev->ethtool_ops == &default_ethtool_ops)
  7492. dev->ethtool_ops = ops;
  7493. }
  7494. EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
  7495. void netdev_freemem(struct net_device *dev)
  7496. {
  7497. char *addr = (char *)dev - dev->padded;
  7498. kvfree(addr);
  7499. }
  7500. /**
  7501. * alloc_netdev_mqs - allocate network device
  7502. * @sizeof_priv: size of private data to allocate space for
  7503. * @name: device name format string
  7504. * @name_assign_type: origin of device name
  7505. * @setup: callback to initialize device
  7506. * @txqs: the number of TX subqueues to allocate
  7507. * @rxqs: the number of RX subqueues to allocate
  7508. *
  7509. * Allocates a struct net_device with private data area for driver use
  7510. * and performs basic initialization. Also allocates subqueue structs
  7511. * for each queue on the device.
  7512. */
  7513. struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
  7514. unsigned char name_assign_type,
  7515. void (*setup)(struct net_device *),
  7516. unsigned int txqs, unsigned int rxqs)
  7517. {
  7518. struct net_device *dev;
  7519. unsigned int alloc_size;
  7520. struct net_device *p;
  7521. BUG_ON(strlen(name) >= sizeof(dev->name));
  7522. if (txqs < 1) {
  7523. pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
  7524. return NULL;
  7525. }
  7526. if (rxqs < 1) {
  7527. pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
  7528. return NULL;
  7529. }
  7530. alloc_size = sizeof(struct net_device);
  7531. if (sizeof_priv) {
  7532. /* ensure 32-byte alignment of private area */
  7533. alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
  7534. alloc_size += sizeof_priv;
  7535. }
  7536. /* ensure 32-byte alignment of whole construct */
  7537. alloc_size += NETDEV_ALIGN - 1;
  7538. p = kvzalloc(alloc_size, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
  7539. if (!p)
  7540. return NULL;
  7541. dev = PTR_ALIGN(p, NETDEV_ALIGN);
  7542. dev->padded = (char *)dev - (char *)p;
  7543. dev->pcpu_refcnt = alloc_percpu(int);
  7544. if (!dev->pcpu_refcnt)
  7545. goto free_dev;
  7546. if (dev_addr_init(dev))
  7547. goto free_pcpu;
  7548. dev_mc_init(dev);
  7549. dev_uc_init(dev);
  7550. dev_net_set(dev, &init_net);
  7551. dev->gso_max_size = GSO_MAX_SIZE;
  7552. dev->gso_max_segs = GSO_MAX_SEGS;
  7553. INIT_LIST_HEAD(&dev->napi_list);
  7554. INIT_LIST_HEAD(&dev->unreg_list);
  7555. INIT_LIST_HEAD(&dev->close_list);
  7556. INIT_LIST_HEAD(&dev->link_watch_list);
  7557. INIT_LIST_HEAD(&dev->adj_list.upper);
  7558. INIT_LIST_HEAD(&dev->adj_list.lower);
  7559. INIT_LIST_HEAD(&dev->ptype_all);
  7560. INIT_LIST_HEAD(&dev->ptype_specific);
  7561. #ifdef CONFIG_NET_SCHED
  7562. hash_init(dev->qdisc_hash);
  7563. #endif
  7564. dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
  7565. setup(dev);
  7566. if (!dev->tx_queue_len) {
  7567. dev->priv_flags |= IFF_NO_QUEUE;
  7568. dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
  7569. }
  7570. dev->num_tx_queues = txqs;
  7571. dev->real_num_tx_queues = txqs;
  7572. if (netif_alloc_netdev_queues(dev))
  7573. goto free_all;
  7574. dev->num_rx_queues = rxqs;
  7575. dev->real_num_rx_queues = rxqs;
  7576. if (netif_alloc_rx_queues(dev))
  7577. goto free_all;
  7578. strcpy(dev->name, name);
  7579. dev->name_assign_type = name_assign_type;
  7580. dev->group = INIT_NETDEV_GROUP;
  7581. if (!dev->ethtool_ops)
  7582. dev->ethtool_ops = &default_ethtool_ops;
  7583. nf_hook_ingress_init(dev);
  7584. return dev;
  7585. free_all:
  7586. free_netdev(dev);
  7587. return NULL;
  7588. free_pcpu:
  7589. free_percpu(dev->pcpu_refcnt);
  7590. free_dev:
  7591. netdev_freemem(dev);
  7592. return NULL;
  7593. }
  7594. EXPORT_SYMBOL(alloc_netdev_mqs);
  7595. /**
  7596. * free_netdev - free network device
  7597. * @dev: device
  7598. *
  7599. * This function does the last stage of destroying an allocated device
  7600. * interface. The reference to the device object is released. If this
  7601. * is the last reference then it will be freed.Must be called in process
  7602. * context.
  7603. */
  7604. void free_netdev(struct net_device *dev)
  7605. {
  7606. struct napi_struct *p, *n;
  7607. might_sleep();
  7608. netif_free_tx_queues(dev);
  7609. netif_free_rx_queues(dev);
  7610. kfree(rcu_dereference_protected(dev->ingress_queue, 1));
  7611. /* Flush device addresses */
  7612. dev_addr_flush(dev);
  7613. list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
  7614. netif_napi_del(p);
  7615. free_percpu(dev->pcpu_refcnt);
  7616. dev->pcpu_refcnt = NULL;
  7617. /* Compatibility with error handling in drivers */
  7618. if (dev->reg_state == NETREG_UNINITIALIZED) {
  7619. netdev_freemem(dev);
  7620. return;
  7621. }
  7622. BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
  7623. dev->reg_state = NETREG_RELEASED;
  7624. /* will free via device release */
  7625. put_device(&dev->dev);
  7626. }
  7627. EXPORT_SYMBOL(free_netdev);
  7628. /**
  7629. * synchronize_net - Synchronize with packet receive processing
  7630. *
  7631. * Wait for packets currently being received to be done.
  7632. * Does not block later packets from starting.
  7633. */
  7634. void synchronize_net(void)
  7635. {
  7636. might_sleep();
  7637. if (rtnl_is_locked())
  7638. synchronize_rcu_expedited();
  7639. else
  7640. synchronize_rcu();
  7641. }
  7642. EXPORT_SYMBOL(synchronize_net);
  7643. /**
  7644. * unregister_netdevice_queue - remove device from the kernel
  7645. * @dev: device
  7646. * @head: list
  7647. *
  7648. * This function shuts down a device interface and removes it
  7649. * from the kernel tables.
  7650. * If head not NULL, device is queued to be unregistered later.
  7651. *
  7652. * Callers must hold the rtnl semaphore. You may want
  7653. * unregister_netdev() instead of this.
  7654. */
  7655. void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
  7656. {
  7657. ASSERT_RTNL();
  7658. if (head) {
  7659. list_move_tail(&dev->unreg_list, head);
  7660. } else {
  7661. rollback_registered(dev);
  7662. /* Finish processing unregister after unlock */
  7663. net_set_todo(dev);
  7664. }
  7665. }
  7666. EXPORT_SYMBOL(unregister_netdevice_queue);
  7667. /**
  7668. * unregister_netdevice_many - unregister many devices
  7669. * @head: list of devices
  7670. *
  7671. * Note: As most callers use a stack allocated list_head,
  7672. * we force a list_del() to make sure stack wont be corrupted later.
  7673. */
  7674. void unregister_netdevice_many(struct list_head *head)
  7675. {
  7676. struct net_device *dev;
  7677. if (!list_empty(head)) {
  7678. rollback_registered_many(head);
  7679. list_for_each_entry(dev, head, unreg_list)
  7680. net_set_todo(dev);
  7681. list_del(head);
  7682. }
  7683. }
  7684. EXPORT_SYMBOL(unregister_netdevice_many);
  7685. /**
  7686. * unregister_netdev - remove device from the kernel
  7687. * @dev: device
  7688. *
  7689. * This function shuts down a device interface and removes it
  7690. * from the kernel tables.
  7691. *
  7692. * This is just a wrapper for unregister_netdevice that takes
  7693. * the rtnl semaphore. In general you want to use this and not
  7694. * unregister_netdevice.
  7695. */
  7696. void unregister_netdev(struct net_device *dev)
  7697. {
  7698. rtnl_lock();
  7699. unregister_netdevice(dev);
  7700. rtnl_unlock();
  7701. }
  7702. EXPORT_SYMBOL(unregister_netdev);
  7703. /**
  7704. * dev_change_net_namespace - move device to different nethost namespace
  7705. * @dev: device
  7706. * @net: network namespace
  7707. * @pat: If not NULL name pattern to try if the current device name
  7708. * is already taken in the destination network namespace.
  7709. *
  7710. * This function shuts down a device interface and moves it
  7711. * to a new network namespace. On success 0 is returned, on
  7712. * a failure a netagive errno code is returned.
  7713. *
  7714. * Callers must hold the rtnl semaphore.
  7715. */
  7716. int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
  7717. {
  7718. int err, new_nsid, new_ifindex;
  7719. ASSERT_RTNL();
  7720. /* Don't allow namespace local devices to be moved. */
  7721. err = -EINVAL;
  7722. if (dev->features & NETIF_F_NETNS_LOCAL)
  7723. goto out;
  7724. /* Ensure the device has been registrered */
  7725. if (dev->reg_state != NETREG_REGISTERED)
  7726. goto out;
  7727. /* Get out if there is nothing todo */
  7728. err = 0;
  7729. if (net_eq(dev_net(dev), net))
  7730. goto out;
  7731. /* Pick the destination device name, and ensure
  7732. * we can use it in the destination network namespace.
  7733. */
  7734. err = -EEXIST;
  7735. if (__dev_get_by_name(net, dev->name)) {
  7736. /* We get here if we can't use the current device name */
  7737. if (!pat)
  7738. goto out;
  7739. err = dev_get_valid_name(net, dev, pat);
  7740. if (err < 0)
  7741. goto out;
  7742. }
  7743. /*
  7744. * And now a mini version of register_netdevice unregister_netdevice.
  7745. */
  7746. /* If device is running close it first. */
  7747. dev_close(dev);
  7748. /* And unlink it from device chain */
  7749. unlist_netdevice(dev);
  7750. synchronize_net();
  7751. /* Shutdown queueing discipline. */
  7752. dev_shutdown(dev);
  7753. /* Notify protocols, that we are about to destroy
  7754. * this device. They should clean all the things.
  7755. *
  7756. * Note that dev->reg_state stays at NETREG_REGISTERED.
  7757. * This is wanted because this way 8021q and macvlan know
  7758. * the device is just moving and can keep their slaves up.
  7759. */
  7760. call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
  7761. rcu_barrier();
  7762. new_nsid = peernet2id_alloc(dev_net(dev), net);
  7763. /* If there is an ifindex conflict assign a new one */
  7764. if (__dev_get_by_index(net, dev->ifindex))
  7765. new_ifindex = dev_new_index(net);
  7766. else
  7767. new_ifindex = dev->ifindex;
  7768. rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid,
  7769. new_ifindex);
  7770. /*
  7771. * Flush the unicast and multicast chains
  7772. */
  7773. dev_uc_flush(dev);
  7774. dev_mc_flush(dev);
  7775. /* Send a netdev-removed uevent to the old namespace */
  7776. kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
  7777. netdev_adjacent_del_links(dev);
  7778. /* Actually switch the network namespace */
  7779. dev_net_set(dev, net);
  7780. dev->ifindex = new_ifindex;
  7781. /* Send a netdev-add uevent to the new namespace */
  7782. kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
  7783. netdev_adjacent_add_links(dev);
  7784. /* Fixup kobjects */
  7785. err = device_rename(&dev->dev, dev->name);
  7786. WARN_ON(err);
  7787. /* Add the device back in the hashes */
  7788. list_netdevice(dev);
  7789. /* Notify protocols, that a new device appeared. */
  7790. call_netdevice_notifiers(NETDEV_REGISTER, dev);
  7791. /*
  7792. * Prevent userspace races by waiting until the network
  7793. * device is fully setup before sending notifications.
  7794. */
  7795. rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
  7796. synchronize_net();
  7797. err = 0;
  7798. out:
  7799. return err;
  7800. }
  7801. EXPORT_SYMBOL_GPL(dev_change_net_namespace);
  7802. static int dev_cpu_dead(unsigned int oldcpu)
  7803. {
  7804. struct sk_buff **list_skb;
  7805. struct sk_buff *skb;
  7806. unsigned int cpu;
  7807. struct softnet_data *sd, *oldsd, *remsd = NULL;
  7808. local_irq_disable();
  7809. cpu = smp_processor_id();
  7810. sd = &per_cpu(softnet_data, cpu);
  7811. oldsd = &per_cpu(softnet_data, oldcpu);
  7812. /* Find end of our completion_queue. */
  7813. list_skb = &sd->completion_queue;
  7814. while (*list_skb)
  7815. list_skb = &(*list_skb)->next;
  7816. /* Append completion queue from offline CPU. */
  7817. *list_skb = oldsd->completion_queue;
  7818. oldsd->completion_queue = NULL;
  7819. /* Append output queue from offline CPU. */
  7820. if (oldsd->output_queue) {
  7821. *sd->output_queue_tailp = oldsd->output_queue;
  7822. sd->output_queue_tailp = oldsd->output_queue_tailp;
  7823. oldsd->output_queue = NULL;
  7824. oldsd->output_queue_tailp = &oldsd->output_queue;
  7825. }
  7826. /* Append NAPI poll list from offline CPU, with one exception :
  7827. * process_backlog() must be called by cpu owning percpu backlog.
  7828. * We properly handle process_queue & input_pkt_queue later.
  7829. */
  7830. while (!list_empty(&oldsd->poll_list)) {
  7831. struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
  7832. struct napi_struct,
  7833. poll_list);
  7834. list_del_init(&napi->poll_list);
  7835. if (napi->poll == process_backlog)
  7836. napi->state = 0;
  7837. else
  7838. ____napi_schedule(sd, napi);
  7839. }
  7840. raise_softirq_irqoff(NET_TX_SOFTIRQ);
  7841. local_irq_enable();
  7842. #ifdef CONFIG_RPS
  7843. remsd = oldsd->rps_ipi_list;
  7844. oldsd->rps_ipi_list = NULL;
  7845. #endif
  7846. /* send out pending IPI's on offline CPU */
  7847. net_rps_send_ipi(remsd);
  7848. /* Process offline CPU's input_pkt_queue */
  7849. while ((skb = __skb_dequeue(&oldsd->process_queue))) {
  7850. netif_rx_ni(skb);
  7851. input_queue_head_incr(oldsd);
  7852. }
  7853. while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
  7854. netif_rx_ni(skb);
  7855. input_queue_head_incr(oldsd);
  7856. }
  7857. return 0;
  7858. }
  7859. /**
  7860. * netdev_increment_features - increment feature set by one
  7861. * @all: current feature set
  7862. * @one: new feature set
  7863. * @mask: mask feature set
  7864. *
  7865. * Computes a new feature set after adding a device with feature set
  7866. * @one to the master device with current feature set @all. Will not
  7867. * enable anything that is off in @mask. Returns the new feature set.
  7868. */
  7869. netdev_features_t netdev_increment_features(netdev_features_t all,
  7870. netdev_features_t one, netdev_features_t mask)
  7871. {
  7872. if (mask & NETIF_F_HW_CSUM)
  7873. mask |= NETIF_F_CSUM_MASK;
  7874. mask |= NETIF_F_VLAN_CHALLENGED;
  7875. all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;
  7876. all &= one | ~NETIF_F_ALL_FOR_ALL;
  7877. /* If one device supports hw checksumming, set for all. */
  7878. if (all & NETIF_F_HW_CSUM)
  7879. all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM);
  7880. return all;
  7881. }
  7882. EXPORT_SYMBOL(netdev_increment_features);
  7883. static struct hlist_head * __net_init netdev_create_hash(void)
  7884. {
  7885. int i;
  7886. struct hlist_head *hash;
  7887. hash = kmalloc_array(NETDEV_HASHENTRIES, sizeof(*hash), GFP_KERNEL);
  7888. if (hash != NULL)
  7889. for (i = 0; i < NETDEV_HASHENTRIES; i++)
  7890. INIT_HLIST_HEAD(&hash[i]);
  7891. return hash;
  7892. }
  7893. /* Initialize per network namespace state */
  7894. static int __net_init netdev_init(struct net *net)
  7895. {
  7896. BUILD_BUG_ON(GRO_HASH_BUCKETS >
  7897. 8 * FIELD_SIZEOF(struct napi_struct, gro_bitmask));
  7898. if (net != &init_net)
  7899. INIT_LIST_HEAD(&net->dev_base_head);
  7900. net->dev_name_head = netdev_create_hash();
  7901. if (net->dev_name_head == NULL)
  7902. goto err_name;
  7903. net->dev_index_head = netdev_create_hash();
  7904. if (net->dev_index_head == NULL)
  7905. goto err_idx;
  7906. return 0;
  7907. err_idx:
  7908. kfree(net->dev_name_head);
  7909. err_name:
  7910. return -ENOMEM;
  7911. }
  7912. /**
  7913. * netdev_drivername - network driver for the device
  7914. * @dev: network device
  7915. *
  7916. * Determine network driver for device.
  7917. */
  7918. const char *netdev_drivername(const struct net_device *dev)
  7919. {
  7920. const struct device_driver *driver;
  7921. const struct device *parent;
  7922. const char *empty = "";
  7923. parent = dev->dev.parent;
  7924. if (!parent)
  7925. return empty;
  7926. driver = parent->driver;
  7927. if (driver && driver->name)
  7928. return driver->name;
  7929. return empty;
  7930. }
  7931. static void __netdev_printk(const char *level, const struct net_device *dev,
  7932. struct va_format *vaf)
  7933. {
  7934. if (dev && dev->dev.parent) {
  7935. dev_printk_emit(level[1] - '0',
  7936. dev->dev.parent,
  7937. "%s %s %s%s: %pV",
  7938. dev_driver_string(dev->dev.parent),
  7939. dev_name(dev->dev.parent),
  7940. netdev_name(dev), netdev_reg_state(dev),
  7941. vaf);
  7942. } else if (dev) {
  7943. printk("%s%s%s: %pV",
  7944. level, netdev_name(dev), netdev_reg_state(dev), vaf);
  7945. } else {
  7946. printk("%s(NULL net_device): %pV", level, vaf);
  7947. }
  7948. }
  7949. void netdev_printk(const char *level, const struct net_device *dev,
  7950. const char *format, ...)
  7951. {
  7952. struct va_format vaf;
  7953. va_list args;
  7954. va_start(args, format);
  7955. vaf.fmt = format;
  7956. vaf.va = &args;
  7957. __netdev_printk(level, dev, &vaf);
  7958. va_end(args);
  7959. }
  7960. EXPORT_SYMBOL(netdev_printk);
  7961. #define define_netdev_printk_level(func, level) \
  7962. void func(const struct net_device *dev, const char *fmt, ...) \
  7963. { \
  7964. struct va_format vaf; \
  7965. va_list args; \
  7966. \
  7967. va_start(args, fmt); \
  7968. \
  7969. vaf.fmt = fmt; \
  7970. vaf.va = &args; \
  7971. \
  7972. __netdev_printk(level, dev, &vaf); \
  7973. \
  7974. va_end(args); \
  7975. } \
  7976. EXPORT_SYMBOL(func);
  7977. define_netdev_printk_level(netdev_emerg, KERN_EMERG);
  7978. define_netdev_printk_level(netdev_alert, KERN_ALERT);
  7979. define_netdev_printk_level(netdev_crit, KERN_CRIT);
  7980. define_netdev_printk_level(netdev_err, KERN_ERR);
  7981. define_netdev_printk_level(netdev_warn, KERN_WARNING);
  7982. define_netdev_printk_level(netdev_notice, KERN_NOTICE);
  7983. define_netdev_printk_level(netdev_info, KERN_INFO);
  7984. static void __net_exit netdev_exit(struct net *net)
  7985. {
  7986. kfree(net->dev_name_head);
  7987. kfree(net->dev_index_head);
  7988. if (net != &init_net)
  7989. WARN_ON_ONCE(!list_empty(&net->dev_base_head));
  7990. }
  7991. static struct pernet_operations __net_initdata netdev_net_ops = {
  7992. .init = netdev_init,
  7993. .exit = netdev_exit,
  7994. };
  7995. static void __net_exit default_device_exit(struct net *net)
  7996. {
  7997. struct net_device *dev, *aux;
  7998. /*
  7999. * Push all migratable network devices back to the
  8000. * initial network namespace
  8001. */
  8002. rtnl_lock();
  8003. for_each_netdev_safe(net, dev, aux) {
  8004. int err;
  8005. char fb_name[IFNAMSIZ];
  8006. /* Ignore unmoveable devices (i.e. loopback) */
  8007. if (dev->features & NETIF_F_NETNS_LOCAL)
  8008. continue;
  8009. /* Leave virtual devices for the generic cleanup */
  8010. if (dev->rtnl_link_ops)
  8011. continue;
  8012. /* Push remaining network devices to init_net */
  8013. snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
  8014. err = dev_change_net_namespace(dev, &init_net, fb_name);
  8015. if (err) {
  8016. pr_emerg("%s: failed to move %s to init_net: %d\n",
  8017. __func__, dev->name, err);
  8018. BUG();
  8019. }
  8020. }
  8021. rtnl_unlock();
  8022. }
  8023. static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
  8024. {
  8025. /* Return with the rtnl_lock held when there are no network
  8026. * devices unregistering in any network namespace in net_list.
  8027. */
  8028. struct net *net;
  8029. bool unregistering;
  8030. DEFINE_WAIT_FUNC(wait, woken_wake_function);
  8031. add_wait_queue(&netdev_unregistering_wq, &wait);
  8032. for (;;) {
  8033. unregistering = false;
  8034. rtnl_lock();
  8035. list_for_each_entry(net, net_list, exit_list) {
  8036. if (net->dev_unreg_count > 0) {
  8037. unregistering = true;
  8038. break;
  8039. }
  8040. }
  8041. if (!unregistering)
  8042. break;
  8043. __rtnl_unlock();
  8044. wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
  8045. }
  8046. remove_wait_queue(&netdev_unregistering_wq, &wait);
  8047. }
  8048. static void __net_exit default_device_exit_batch(struct list_head *net_list)
  8049. {
  8050. /* At exit all network devices most be removed from a network
  8051. * namespace. Do this in the reverse order of registration.
  8052. * Do this across as many network namespaces as possible to
  8053. * improve batching efficiency.
  8054. */
  8055. struct net_device *dev;
  8056. struct net *net;
  8057. LIST_HEAD(dev_kill_list);
  8058. /* To prevent network device cleanup code from dereferencing
  8059. * loopback devices or network devices that have been freed
  8060. * wait here for all pending unregistrations to complete,
  8061. * before unregistring the loopback device and allowing the
  8062. * network namespace be freed.
  8063. *
  8064. * The netdev todo list containing all network devices
  8065. * unregistrations that happen in default_device_exit_batch
  8066. * will run in the rtnl_unlock() at the end of
  8067. * default_device_exit_batch.
  8068. */
  8069. rtnl_lock_unregistering(net_list);
  8070. list_for_each_entry(net, net_list, exit_list) {
  8071. for_each_netdev_reverse(net, dev) {
  8072. if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
  8073. dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
  8074. else
  8075. unregister_netdevice_queue(dev, &dev_kill_list);
  8076. }
  8077. }
  8078. unregister_netdevice_many(&dev_kill_list);
  8079. rtnl_unlock();
  8080. }
  8081. static struct pernet_operations __net_initdata default_device_ops = {
  8082. .exit = default_device_exit,
  8083. .exit_batch = default_device_exit_batch,
  8084. };
  8085. /*
  8086. * Initialize the DEV module. At boot time this walks the device list and
  8087. * unhooks any devices that fail to initialise (normally hardware not
  8088. * present) and leaves us with a valid list of present and active devices.
  8089. *
  8090. */
  8091. /*
  8092. * This is called single threaded during boot, so no need
  8093. * to take the rtnl semaphore.
  8094. */
  8095. static int __init net_dev_init(void)
  8096. {
  8097. int i, rc = -ENOMEM;
  8098. BUG_ON(!dev_boot_phase);
  8099. if (dev_proc_init())
  8100. goto out;
  8101. if (netdev_kobject_init())
  8102. goto out;
  8103. INIT_LIST_HEAD(&ptype_all);
  8104. for (i = 0; i < PTYPE_HASH_SIZE; i++)
  8105. INIT_LIST_HEAD(&ptype_base[i]);
  8106. INIT_LIST_HEAD(&offload_base);
  8107. if (register_pernet_subsys(&netdev_net_ops))
  8108. goto out;
  8109. /*
  8110. * Initialise the packet receive queues.
  8111. */
  8112. for_each_possible_cpu(i) {
  8113. struct work_struct *flush = per_cpu_ptr(&flush_works, i);
  8114. struct softnet_data *sd = &per_cpu(softnet_data, i);
  8115. INIT_WORK(flush, flush_backlog);
  8116. skb_queue_head_init(&sd->input_pkt_queue);
  8117. skb_queue_head_init(&sd->process_queue);
  8118. #ifdef CONFIG_XFRM_OFFLOAD
  8119. skb_queue_head_init(&sd->xfrm_backlog);
  8120. #endif
  8121. INIT_LIST_HEAD(&sd->poll_list);
  8122. sd->output_queue_tailp = &sd->output_queue;
  8123. #ifdef CONFIG_RPS
  8124. sd->csd.func = rps_trigger_softirq;
  8125. sd->csd.info = sd;
  8126. sd->cpu = i;
  8127. #endif
  8128. init_gro_hash(&sd->backlog);
  8129. sd->backlog.poll = process_backlog;
  8130. sd->backlog.weight = weight_p;
  8131. }
  8132. dev_boot_phase = 0;
  8133. /* The loopback device is special if any other network devices
  8134. * is present in a network namespace the loopback device must
  8135. * be present. Since we now dynamically allocate and free the
  8136. * loopback device ensure this invariant is maintained by
  8137. * keeping the loopback device as the first device on the
  8138. * list of network devices. Ensuring the loopback devices
  8139. * is the first device that appears and the last network device
  8140. * that disappears.
  8141. */
  8142. if (register_pernet_device(&loopback_net_ops))
  8143. goto out;
  8144. if (register_pernet_device(&default_device_ops))
  8145. goto out;
  8146. open_softirq(NET_TX_SOFTIRQ, net_tx_action);
  8147. open_softirq(NET_RX_SOFTIRQ, net_rx_action);
  8148. rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead",
  8149. NULL, dev_cpu_dead);
  8150. WARN_ON(rc < 0);
  8151. rc = 0;
  8152. out:
  8153. return rc;
  8154. }
  8155. subsys_initcall(net_dev_init);