dev.c 220 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854
  1. /*
  2. * NET3 Protocol independent device support routines.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. *
  9. * Derived from the non IP parts of dev.c 1.0.19
  10. * Authors: Ross Biro
  11. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12. * Mark Evans, <evansmp@uhura.aston.ac.uk>
  13. *
  14. * Additional Authors:
  15. * Florian la Roche <rzsfl@rz.uni-sb.de>
  16. * Alan Cox <gw4pts@gw4pts.ampr.org>
  17. * David Hinds <dahinds@users.sourceforge.net>
  18. * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
  19. * Adam Sulmicki <adam@cfar.umd.edu>
  20. * Pekka Riikonen <priikone@poesidon.pspt.fi>
  21. *
  22. * Changes:
  23. * D.J. Barrow : Fixed bug where dev->refcnt gets set
  24. * to 2 if register_netdev gets called
  25. * before net_dev_init & also removed a
  26. * few lines of code in the process.
  27. * Alan Cox : device private ioctl copies fields back.
  28. * Alan Cox : Transmit queue code does relevant
  29. * stunts to keep the queue safe.
  30. * Alan Cox : Fixed double lock.
  31. * Alan Cox : Fixed promisc NULL pointer trap
  32. * ???????? : Support the full private ioctl range
  33. * Alan Cox : Moved ioctl permission check into
  34. * drivers
  35. * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
  36. * Alan Cox : 100 backlog just doesn't cut it when
  37. * you start doing multicast video 8)
  38. * Alan Cox : Rewrote net_bh and list manager.
  39. * Alan Cox : Fix ETH_P_ALL echoback lengths.
  40. * Alan Cox : Took out transmit every packet pass
  41. * Saved a few bytes in the ioctl handler
  42. * Alan Cox : Network driver sets packet type before
  43. * calling netif_rx. Saves a function
  44. * call a packet.
  45. * Alan Cox : Hashed net_bh()
  46. * Richard Kooijman: Timestamp fixes.
  47. * Alan Cox : Wrong field in SIOCGIFDSTADDR
  48. * Alan Cox : Device lock protection.
  49. * Alan Cox : Fixed nasty side effect of device close
  50. * changes.
  51. * Rudi Cilibrasi : Pass the right thing to
  52. * set_mac_address()
  53. * Dave Miller : 32bit quantity for the device lock to
  54. * make it work out on a Sparc.
  55. * Bjorn Ekwall : Added KERNELD hack.
  56. * Alan Cox : Cleaned up the backlog initialise.
  57. * Craig Metz : SIOCGIFCONF fix if space for under
  58. * 1 device.
  59. * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
  60. * is no device open function.
  61. * Andi Kleen : Fix error reporting for SIOCGIFCONF
  62. * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
  63. * Cyrus Durgin : Cleaned for KMOD
  64. * Adam Sulmicki : Bug Fix : Network Device Unload
  65. * A network device unload needs to purge
  66. * the backlog queue.
  67. * Paul Rusty Russell : SIOCSIFNAME
  68. * Pekka Riikonen : Netdev boot-time settings code
  69. * Andrew Morton : Make unregister_netdevice wait
  70. * indefinitely on dev->refcnt
  71. * J Hadi Salim : - Backlog queue sampling
  72. * - netif_rx() feedback
  73. */
  74. #include <linux/uaccess.h>
  75. #include <linux/bitops.h>
  76. #include <linux/capability.h>
  77. #include <linux/cpu.h>
  78. #include <linux/types.h>
  79. #include <linux/kernel.h>
  80. #include <linux/hash.h>
  81. #include <linux/slab.h>
  82. #include <linux/sched.h>
  83. #include <linux/sched/mm.h>
  84. #include <linux/mutex.h>
  85. #include <linux/string.h>
  86. #include <linux/mm.h>
  87. #include <linux/socket.h>
  88. #include <linux/sockios.h>
  89. #include <linux/errno.h>
  90. #include <linux/interrupt.h>
  91. #include <linux/if_ether.h>
  92. #include <linux/netdevice.h>
  93. #include <linux/etherdevice.h>
  94. #include <linux/ethtool.h>
  95. #include <linux/notifier.h>
  96. #include <linux/skbuff.h>
  97. #include <linux/bpf.h>
  98. #include <linux/bpf_trace.h>
  99. #include <net/net_namespace.h>
  100. #include <net/sock.h>
  101. #include <net/busy_poll.h>
  102. #include <linux/rtnetlink.h>
  103. #include <linux/stat.h>
  104. #include <net/dst.h>
  105. #include <net/dst_metadata.h>
  106. #include <net/pkt_sched.h>
  107. #include <net/pkt_cls.h>
  108. #include <net/checksum.h>
  109. #include <net/xfrm.h>
  110. #include <linux/highmem.h>
  111. #include <linux/init.h>
  112. #include <linux/module.h>
  113. #include <linux/netpoll.h>
  114. #include <linux/rcupdate.h>
  115. #include <linux/delay.h>
  116. #include <net/iw_handler.h>
  117. #include <asm/current.h>
  118. #include <linux/audit.h>
  119. #include <linux/dmaengine.h>
  120. #include <linux/err.h>
  121. #include <linux/ctype.h>
  122. #include <linux/if_arp.h>
  123. #include <linux/if_vlan.h>
  124. #include <linux/ip.h>
  125. #include <net/ip.h>
  126. #include <net/mpls.h>
  127. #include <linux/ipv6.h>
  128. #include <linux/in.h>
  129. #include <linux/jhash.h>
  130. #include <linux/random.h>
  131. #include <trace/events/napi.h>
  132. #include <trace/events/net.h>
  133. #include <trace/events/skb.h>
  134. #include <linux/pci.h>
  135. #include <linux/inetdevice.h>
  136. #include <linux/cpu_rmap.h>
  137. #include <linux/static_key.h>
  138. #include <linux/hashtable.h>
  139. #include <linux/vmalloc.h>
  140. #include <linux/if_macvlan.h>
  141. #include <linux/errqueue.h>
  142. #include <linux/hrtimer.h>
  143. #include <linux/netfilter_ingress.h>
  144. #include <linux/crash_dump.h>
  145. #include <linux/sctp.h>
  146. #include <net/udp_tunnel.h>
  147. #include <linux/net_namespace.h>
  148. #include "net-sysfs.h"
  149. /* Instead of increasing this, you should create a hash table. */
  150. #define MAX_GRO_SKBS 8
  151. /* This should be increased if a protocol with a bigger head is added. */
  152. #define GRO_MAX_HEAD (MAX_HEADER + 128)
  153. static DEFINE_SPINLOCK(ptype_lock);
  154. static DEFINE_SPINLOCK(offload_lock);
  155. struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
  156. struct list_head ptype_all __read_mostly; /* Taps */
  157. static struct list_head offload_base __read_mostly;
  158. static int netif_rx_internal(struct sk_buff *skb);
  159. static int call_netdevice_notifiers_info(unsigned long val,
  160. struct netdev_notifier_info *info);
  161. static struct napi_struct *napi_by_id(unsigned int napi_id);
  162. /*
  163. * The @dev_base_head list is protected by @dev_base_lock and the rtnl
  164. * semaphore.
  165. *
  166. * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
  167. *
  168. * Writers must hold the rtnl semaphore while they loop through the
  169. * dev_base_head list, and hold dev_base_lock for writing when they do the
  170. * actual updates. This allows pure readers to access the list even
  171. * while a writer is preparing to update it.
  172. *
  173. * To put it another way, dev_base_lock is held for writing only to
  174. * protect against pure readers; the rtnl semaphore provides the
  175. * protection against other writers.
  176. *
  177. * See, for example usages, register_netdevice() and
  178. * unregister_netdevice(), which must be called with the rtnl
  179. * semaphore held.
  180. */
  181. DEFINE_RWLOCK(dev_base_lock);
  182. EXPORT_SYMBOL(dev_base_lock);
  183. static DEFINE_MUTEX(ifalias_mutex);
  184. /* protects napi_hash addition/deletion and napi_gen_id */
  185. static DEFINE_SPINLOCK(napi_hash_lock);
  186. static unsigned int napi_gen_id = NR_CPUS;
  187. static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
  188. static seqcount_t devnet_rename_seq;
  189. static inline void dev_base_seq_inc(struct net *net)
  190. {
  191. while (++net->dev_base_seq == 0)
  192. ;
  193. }
  194. static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
  195. {
  196. unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ));
  197. return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
  198. }
  199. static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
  200. {
  201. return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
  202. }
  203. static inline void rps_lock(struct softnet_data *sd)
  204. {
  205. #ifdef CONFIG_RPS
  206. spin_lock(&sd->input_pkt_queue.lock);
  207. #endif
  208. }
  209. static inline void rps_unlock(struct softnet_data *sd)
  210. {
  211. #ifdef CONFIG_RPS
  212. spin_unlock(&sd->input_pkt_queue.lock);
  213. #endif
  214. }
  215. /* Device list insertion */
  216. static void list_netdevice(struct net_device *dev)
  217. {
  218. struct net *net = dev_net(dev);
  219. ASSERT_RTNL();
  220. write_lock_bh(&dev_base_lock);
  221. list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
  222. hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
  223. hlist_add_head_rcu(&dev->index_hlist,
  224. dev_index_hash(net, dev->ifindex));
  225. write_unlock_bh(&dev_base_lock);
  226. dev_base_seq_inc(net);
  227. }
  228. /* Device list removal
  229. * caller must respect a RCU grace period before freeing/reusing dev
  230. */
  231. static void unlist_netdevice(struct net_device *dev)
  232. {
  233. ASSERT_RTNL();
  234. /* Unlink dev from the device chain */
  235. write_lock_bh(&dev_base_lock);
  236. list_del_rcu(&dev->dev_list);
  237. hlist_del_rcu(&dev->name_hlist);
  238. hlist_del_rcu(&dev->index_hlist);
  239. write_unlock_bh(&dev_base_lock);
  240. dev_base_seq_inc(dev_net(dev));
  241. }
  242. /*
  243. * Our notifier list
  244. */
  245. static RAW_NOTIFIER_HEAD(netdev_chain);
  246. /*
  247. * Device drivers call our routines to queue packets here. We empty the
  248. * queue in the local softnet handler.
  249. */
  250. DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
  251. EXPORT_PER_CPU_SYMBOL(softnet_data);
  252. #ifdef CONFIG_LOCKDEP
  253. /*
  254. * register_netdevice() inits txq->_xmit_lock and sets lockdep class
  255. * according to dev->type
  256. */
  257. static const unsigned short netdev_lock_type[] = {
  258. ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
  259. ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
  260. ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
  261. ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
  262. ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
  263. ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
  264. ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
  265. ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
  266. ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
  267. ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
  268. ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
  269. ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
  270. ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
  271. ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
  272. ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
  273. static const char *const netdev_lock_name[] = {
  274. "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
  275. "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
  276. "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
  277. "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
  278. "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
  279. "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
  280. "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
  281. "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
  282. "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
  283. "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
  284. "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
  285. "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
  286. "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
  287. "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
  288. "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
  289. static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
  290. static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
  291. static inline unsigned short netdev_lock_pos(unsigned short dev_type)
  292. {
  293. int i;
  294. for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
  295. if (netdev_lock_type[i] == dev_type)
  296. return i;
  297. /* the last key is used by default */
  298. return ARRAY_SIZE(netdev_lock_type) - 1;
  299. }
  300. static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
  301. unsigned short dev_type)
  302. {
  303. int i;
  304. i = netdev_lock_pos(dev_type);
  305. lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
  306. netdev_lock_name[i]);
  307. }
  308. static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
  309. {
  310. int i;
  311. i = netdev_lock_pos(dev->type);
  312. lockdep_set_class_and_name(&dev->addr_list_lock,
  313. &netdev_addr_lock_key[i],
  314. netdev_lock_name[i]);
  315. }
  316. #else
  317. static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
  318. unsigned short dev_type)
  319. {
  320. }
  321. static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
  322. {
  323. }
  324. #endif
  325. /*******************************************************************************
  326. *
  327. * Protocol management and registration routines
  328. *
  329. *******************************************************************************/
  330. /*
  331. * Add a protocol ID to the list. Now that the input handler is
  332. * smarter we can dispense with all the messy stuff that used to be
  333. * here.
  334. *
  335. * BEWARE!!! Protocol handlers, mangling input packets,
  336. * MUST BE last in hash buckets and checking protocol handlers
  337. * MUST start from promiscuous ptype_all chain in net_bh.
  338. * It is true now, do not change it.
  339. * Explanation follows: if protocol handler, mangling packet, will
  340. * be the first on list, it is not able to sense, that packet
  341. * is cloned and should be copied-on-write, so that it will
  342. * change it and subsequent readers will get broken packet.
  343. * --ANK (980803)
  344. */
  345. static inline struct list_head *ptype_head(const struct packet_type *pt)
  346. {
  347. if (pt->type == htons(ETH_P_ALL))
  348. return pt->dev ? &pt->dev->ptype_all : &ptype_all;
  349. else
  350. return pt->dev ? &pt->dev->ptype_specific :
  351. &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
  352. }
  353. /**
  354. * dev_add_pack - add packet handler
  355. * @pt: packet type declaration
  356. *
  357. * Add a protocol handler to the networking stack. The passed &packet_type
  358. * is linked into kernel lists and may not be freed until it has been
  359. * removed from the kernel lists.
  360. *
  361. * This call does not sleep therefore it can not
  362. * guarantee all CPU's that are in middle of receiving packets
  363. * will see the new packet type (until the next received packet).
  364. */
  365. void dev_add_pack(struct packet_type *pt)
  366. {
  367. struct list_head *head = ptype_head(pt);
  368. spin_lock(&ptype_lock);
  369. list_add_rcu(&pt->list, head);
  370. spin_unlock(&ptype_lock);
  371. }
  372. EXPORT_SYMBOL(dev_add_pack);
  373. /**
  374. * __dev_remove_pack - remove packet handler
  375. * @pt: packet type declaration
  376. *
  377. * Remove a protocol handler that was previously added to the kernel
  378. * protocol handlers by dev_add_pack(). The passed &packet_type is removed
  379. * from the kernel lists and can be freed or reused once this function
  380. * returns.
  381. *
  382. * The packet type might still be in use by receivers
  383. * and must not be freed until after all the CPU's have gone
  384. * through a quiescent state.
  385. */
  386. void __dev_remove_pack(struct packet_type *pt)
  387. {
  388. struct list_head *head = ptype_head(pt);
  389. struct packet_type *pt1;
  390. spin_lock(&ptype_lock);
  391. list_for_each_entry(pt1, head, list) {
  392. if (pt == pt1) {
  393. list_del_rcu(&pt->list);
  394. goto out;
  395. }
  396. }
  397. pr_warn("dev_remove_pack: %p not found\n", pt);
  398. out:
  399. spin_unlock(&ptype_lock);
  400. }
  401. EXPORT_SYMBOL(__dev_remove_pack);
  402. /**
  403. * dev_remove_pack - remove packet handler
  404. * @pt: packet type declaration
  405. *
  406. * Remove a protocol handler that was previously added to the kernel
  407. * protocol handlers by dev_add_pack(). The passed &packet_type is removed
  408. * from the kernel lists and can be freed or reused once this function
  409. * returns.
  410. *
  411. * This call sleeps to guarantee that no CPU is looking at the packet
  412. * type after return.
  413. */
  414. void dev_remove_pack(struct packet_type *pt)
  415. {
  416. __dev_remove_pack(pt);
  417. synchronize_net();
  418. }
  419. EXPORT_SYMBOL(dev_remove_pack);
  420. /**
  421. * dev_add_offload - register offload handlers
  422. * @po: protocol offload declaration
  423. *
  424. * Add protocol offload handlers to the networking stack. The passed
  425. * &proto_offload is linked into kernel lists and may not be freed until
  426. * it has been removed from the kernel lists.
  427. *
  428. * This call does not sleep therefore it can not
  429. * guarantee all CPU's that are in middle of receiving packets
  430. * will see the new offload handlers (until the next received packet).
  431. */
  432. void dev_add_offload(struct packet_offload *po)
  433. {
  434. struct packet_offload *elem;
  435. spin_lock(&offload_lock);
  436. list_for_each_entry(elem, &offload_base, list) {
  437. if (po->priority < elem->priority)
  438. break;
  439. }
  440. list_add_rcu(&po->list, elem->list.prev);
  441. spin_unlock(&offload_lock);
  442. }
  443. EXPORT_SYMBOL(dev_add_offload);
  444. /**
  445. * __dev_remove_offload - remove offload handler
  446. * @po: packet offload declaration
  447. *
  448. * Remove a protocol offload handler that was previously added to the
  449. * kernel offload handlers by dev_add_offload(). The passed &offload_type
  450. * is removed from the kernel lists and can be freed or reused once this
  451. * function returns.
  452. *
  453. * The packet type might still be in use by receivers
  454. * and must not be freed until after all the CPU's have gone
  455. * through a quiescent state.
  456. */
  457. static void __dev_remove_offload(struct packet_offload *po)
  458. {
  459. struct list_head *head = &offload_base;
  460. struct packet_offload *po1;
  461. spin_lock(&offload_lock);
  462. list_for_each_entry(po1, head, list) {
  463. if (po == po1) {
  464. list_del_rcu(&po->list);
  465. goto out;
  466. }
  467. }
  468. pr_warn("dev_remove_offload: %p not found\n", po);
  469. out:
  470. spin_unlock(&offload_lock);
  471. }
  472. /**
  473. * dev_remove_offload - remove packet offload handler
  474. * @po: packet offload declaration
  475. *
  476. * Remove a packet offload handler that was previously added to the kernel
  477. * offload handlers by dev_add_offload(). The passed &offload_type is
  478. * removed from the kernel lists and can be freed or reused once this
  479. * function returns.
  480. *
  481. * This call sleeps to guarantee that no CPU is looking at the packet
  482. * type after return.
  483. */
  484. void dev_remove_offload(struct packet_offload *po)
  485. {
  486. __dev_remove_offload(po);
  487. synchronize_net();
  488. }
  489. EXPORT_SYMBOL(dev_remove_offload);
  490. /******************************************************************************
  491. *
  492. * Device Boot-time Settings Routines
  493. *
  494. ******************************************************************************/
  495. /* Boot time configuration table */
  496. static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
  497. /**
  498. * netdev_boot_setup_add - add new setup entry
  499. * @name: name of the device
  500. * @map: configured settings for the device
  501. *
  502. * Adds new setup entry to the dev_boot_setup list. The function
  503. * returns 0 on error and 1 on success. This is a generic routine to
  504. * all netdevices.
  505. */
  506. static int netdev_boot_setup_add(char *name, struct ifmap *map)
  507. {
  508. struct netdev_boot_setup *s;
  509. int i;
  510. s = dev_boot_setup;
  511. for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
  512. if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
  513. memset(s[i].name, 0, sizeof(s[i].name));
  514. strlcpy(s[i].name, name, IFNAMSIZ);
  515. memcpy(&s[i].map, map, sizeof(s[i].map));
  516. break;
  517. }
  518. }
  519. return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
  520. }
  521. /**
  522. * netdev_boot_setup_check - check boot time settings
  523. * @dev: the netdevice
  524. *
  525. * Check boot time settings for the device.
  526. * The found settings are set for the device to be used
  527. * later in the device probing.
  528. * Returns 0 if no settings found, 1 if they are.
  529. */
  530. int netdev_boot_setup_check(struct net_device *dev)
  531. {
  532. struct netdev_boot_setup *s = dev_boot_setup;
  533. int i;
  534. for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
  535. if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
  536. !strcmp(dev->name, s[i].name)) {
  537. dev->irq = s[i].map.irq;
  538. dev->base_addr = s[i].map.base_addr;
  539. dev->mem_start = s[i].map.mem_start;
  540. dev->mem_end = s[i].map.mem_end;
  541. return 1;
  542. }
  543. }
  544. return 0;
  545. }
  546. EXPORT_SYMBOL(netdev_boot_setup_check);
  547. /**
  548. * netdev_boot_base - get address from boot time settings
  549. * @prefix: prefix for network device
  550. * @unit: id for network device
  551. *
  552. * Check boot time settings for the base address of device.
  553. * The found settings are set for the device to be used
  554. * later in the device probing.
  555. * Returns 0 if no settings found.
  556. */
  557. unsigned long netdev_boot_base(const char *prefix, int unit)
  558. {
  559. const struct netdev_boot_setup *s = dev_boot_setup;
  560. char name[IFNAMSIZ];
  561. int i;
  562. sprintf(name, "%s%d", prefix, unit);
  563. /*
  564. * If device already registered then return base of 1
  565. * to indicate not to probe for this interface
  566. */
  567. if (__dev_get_by_name(&init_net, name))
  568. return 1;
  569. for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
  570. if (!strcmp(name, s[i].name))
  571. return s[i].map.base_addr;
  572. return 0;
  573. }
  574. /*
  575. * Saves at boot time configured settings for any netdevice.
  576. */
  577. int __init netdev_boot_setup(char *str)
  578. {
  579. int ints[5];
  580. struct ifmap map;
  581. str = get_options(str, ARRAY_SIZE(ints), ints);
  582. if (!str || !*str)
  583. return 0;
  584. /* Save settings */
  585. memset(&map, 0, sizeof(map));
  586. if (ints[0] > 0)
  587. map.irq = ints[1];
  588. if (ints[0] > 1)
  589. map.base_addr = ints[2];
  590. if (ints[0] > 2)
  591. map.mem_start = ints[3];
  592. if (ints[0] > 3)
  593. map.mem_end = ints[4];
  594. /* Add new entry to the list */
  595. return netdev_boot_setup_add(str, &map);
  596. }
  597. __setup("netdev=", netdev_boot_setup);
  598. /*******************************************************************************
  599. *
  600. * Device Interface Subroutines
  601. *
  602. *******************************************************************************/
  603. /**
  604. * dev_get_iflink - get 'iflink' value of a interface
  605. * @dev: targeted interface
  606. *
  607. * Indicates the ifindex the interface is linked to.
  608. * Physical interfaces have the same 'ifindex' and 'iflink' values.
  609. */
  610. int dev_get_iflink(const struct net_device *dev)
  611. {
  612. if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
  613. return dev->netdev_ops->ndo_get_iflink(dev);
  614. return dev->ifindex;
  615. }
  616. EXPORT_SYMBOL(dev_get_iflink);
  617. /**
  618. * dev_fill_metadata_dst - Retrieve tunnel egress information.
  619. * @dev: targeted interface
  620. * @skb: The packet.
  621. *
  622. * For better visibility of tunnel traffic OVS needs to retrieve
  623. * egress tunnel information for a packet. Following API allows
  624. * user to get this info.
  625. */
  626. int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
  627. {
  628. struct ip_tunnel_info *info;
  629. if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst)
  630. return -EINVAL;
  631. info = skb_tunnel_info_unclone(skb);
  632. if (!info)
  633. return -ENOMEM;
  634. if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
  635. return -EINVAL;
  636. return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
  637. }
  638. EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
  639. /**
  640. * __dev_get_by_name - find a device by its name
  641. * @net: the applicable net namespace
  642. * @name: name to find
  643. *
  644. * Find an interface by name. Must be called under RTNL semaphore
  645. * or @dev_base_lock. If the name is found a pointer to the device
  646. * is returned. If the name is not found then %NULL is returned. The
  647. * reference counters are not incremented so the caller must be
  648. * careful with locks.
  649. */
  650. struct net_device *__dev_get_by_name(struct net *net, const char *name)
  651. {
  652. struct net_device *dev;
  653. struct hlist_head *head = dev_name_hash(net, name);
  654. hlist_for_each_entry(dev, head, name_hlist)
  655. if (!strncmp(dev->name, name, IFNAMSIZ))
  656. return dev;
  657. return NULL;
  658. }
  659. EXPORT_SYMBOL(__dev_get_by_name);
  660. /**
  661. * dev_get_by_name_rcu - find a device by its name
  662. * @net: the applicable net namespace
  663. * @name: name to find
  664. *
  665. * Find an interface by name.
  666. * If the name is found a pointer to the device is returned.
  667. * If the name is not found then %NULL is returned.
  668. * The reference counters are not incremented so the caller must be
  669. * careful with locks. The caller must hold RCU lock.
  670. */
  671. struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
  672. {
  673. struct net_device *dev;
  674. struct hlist_head *head = dev_name_hash(net, name);
  675. hlist_for_each_entry_rcu(dev, head, name_hlist)
  676. if (!strncmp(dev->name, name, IFNAMSIZ))
  677. return dev;
  678. return NULL;
  679. }
  680. EXPORT_SYMBOL(dev_get_by_name_rcu);
  681. /**
  682. * dev_get_by_name - find a device by its name
  683. * @net: the applicable net namespace
  684. * @name: name to find
  685. *
  686. * Find an interface by name. This can be called from any
  687. * context and does its own locking. The returned handle has
  688. * the usage count incremented and the caller must use dev_put() to
  689. * release it when it is no longer needed. %NULL is returned if no
  690. * matching device is found.
  691. */
  692. struct net_device *dev_get_by_name(struct net *net, const char *name)
  693. {
  694. struct net_device *dev;
  695. rcu_read_lock();
  696. dev = dev_get_by_name_rcu(net, name);
  697. if (dev)
  698. dev_hold(dev);
  699. rcu_read_unlock();
  700. return dev;
  701. }
  702. EXPORT_SYMBOL(dev_get_by_name);
  703. /**
  704. * __dev_get_by_index - find a device by its ifindex
  705. * @net: the applicable net namespace
  706. * @ifindex: index of device
  707. *
  708. * Search for an interface by index. Returns %NULL if the device
  709. * is not found or a pointer to the device. The device has not
  710. * had its reference counter increased so the caller must be careful
  711. * about locking. The caller must hold either the RTNL semaphore
  712. * or @dev_base_lock.
  713. */
  714. struct net_device *__dev_get_by_index(struct net *net, int ifindex)
  715. {
  716. struct net_device *dev;
  717. struct hlist_head *head = dev_index_hash(net, ifindex);
  718. hlist_for_each_entry(dev, head, index_hlist)
  719. if (dev->ifindex == ifindex)
  720. return dev;
  721. return NULL;
  722. }
  723. EXPORT_SYMBOL(__dev_get_by_index);
  724. /**
  725. * dev_get_by_index_rcu - find a device by its ifindex
  726. * @net: the applicable net namespace
  727. * @ifindex: index of device
  728. *
  729. * Search for an interface by index. Returns %NULL if the device
  730. * is not found or a pointer to the device. The device has not
  731. * had its reference counter increased so the caller must be careful
  732. * about locking. The caller must hold RCU lock.
  733. */
  734. struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
  735. {
  736. struct net_device *dev;
  737. struct hlist_head *head = dev_index_hash(net, ifindex);
  738. hlist_for_each_entry_rcu(dev, head, index_hlist)
  739. if (dev->ifindex == ifindex)
  740. return dev;
  741. return NULL;
  742. }
  743. EXPORT_SYMBOL(dev_get_by_index_rcu);
  744. /**
  745. * dev_get_by_index - find a device by its ifindex
  746. * @net: the applicable net namespace
  747. * @ifindex: index of device
  748. *
  749. * Search for an interface by index. Returns NULL if the device
  750. * is not found or a pointer to the device. The device returned has
  751. * had a reference added and the pointer is safe until the user calls
  752. * dev_put to indicate they have finished with it.
  753. */
  754. struct net_device *dev_get_by_index(struct net *net, int ifindex)
  755. {
  756. struct net_device *dev;
  757. rcu_read_lock();
  758. dev = dev_get_by_index_rcu(net, ifindex);
  759. if (dev)
  760. dev_hold(dev);
  761. rcu_read_unlock();
  762. return dev;
  763. }
  764. EXPORT_SYMBOL(dev_get_by_index);
  765. /**
  766. * dev_get_by_napi_id - find a device by napi_id
  767. * @napi_id: ID of the NAPI struct
  768. *
  769. * Search for an interface by NAPI ID. Returns %NULL if the device
  770. * is not found or a pointer to the device. The device has not had
  771. * its reference counter increased so the caller must be careful
  772. * about locking. The caller must hold RCU lock.
  773. */
  774. struct net_device *dev_get_by_napi_id(unsigned int napi_id)
  775. {
  776. struct napi_struct *napi;
  777. WARN_ON_ONCE(!rcu_read_lock_held());
  778. if (napi_id < MIN_NAPI_ID)
  779. return NULL;
  780. napi = napi_by_id(napi_id);
  781. return napi ? napi->dev : NULL;
  782. }
  783. EXPORT_SYMBOL(dev_get_by_napi_id);
  784. /**
  785. * netdev_get_name - get a netdevice name, knowing its ifindex.
  786. * @net: network namespace
  787. * @name: a pointer to the buffer where the name will be stored.
  788. * @ifindex: the ifindex of the interface to get the name from.
  789. *
  790. * The use of raw_seqcount_begin() and cond_resched() before
  791. * retrying is required as we want to give the writers a chance
  792. * to complete when CONFIG_PREEMPT is not set.
  793. */
  794. int netdev_get_name(struct net *net, char *name, int ifindex)
  795. {
  796. struct net_device *dev;
  797. unsigned int seq;
  798. retry:
  799. seq = raw_seqcount_begin(&devnet_rename_seq);
  800. rcu_read_lock();
  801. dev = dev_get_by_index_rcu(net, ifindex);
  802. if (!dev) {
  803. rcu_read_unlock();
  804. return -ENODEV;
  805. }
  806. strcpy(name, dev->name);
  807. rcu_read_unlock();
  808. if (read_seqcount_retry(&devnet_rename_seq, seq)) {
  809. cond_resched();
  810. goto retry;
  811. }
  812. return 0;
  813. }
  814. /**
  815. * dev_getbyhwaddr_rcu - find a device by its hardware address
  816. * @net: the applicable net namespace
  817. * @type: media type of device
  818. * @ha: hardware address
  819. *
  820. * Search for an interface by MAC address. Returns NULL if the device
  821. * is not found or a pointer to the device.
  822. * The caller must hold RCU or RTNL.
  823. * The returned device has not had its ref count increased
  824. * and the caller must therefore be careful about locking
  825. *
  826. */
  827. struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
  828. const char *ha)
  829. {
  830. struct net_device *dev;
  831. for_each_netdev_rcu(net, dev)
  832. if (dev->type == type &&
  833. !memcmp(dev->dev_addr, ha, dev->addr_len))
  834. return dev;
  835. return NULL;
  836. }
  837. EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
  838. struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
  839. {
  840. struct net_device *dev;
  841. ASSERT_RTNL();
  842. for_each_netdev(net, dev)
  843. if (dev->type == type)
  844. return dev;
  845. return NULL;
  846. }
  847. EXPORT_SYMBOL(__dev_getfirstbyhwtype);
  848. struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
  849. {
  850. struct net_device *dev, *ret = NULL;
  851. rcu_read_lock();
  852. for_each_netdev_rcu(net, dev)
  853. if (dev->type == type) {
  854. dev_hold(dev);
  855. ret = dev;
  856. break;
  857. }
  858. rcu_read_unlock();
  859. return ret;
  860. }
  861. EXPORT_SYMBOL(dev_getfirstbyhwtype);
  862. /**
  863. * __dev_get_by_flags - find any device with given flags
  864. * @net: the applicable net namespace
  865. * @if_flags: IFF_* values
  866. * @mask: bitmask of bits in if_flags to check
  867. *
  868. * Search for any interface with the given flags. Returns NULL if a device
  869. * is not found or a pointer to the device. Must be called inside
  870. * rtnl_lock(), and result refcount is unchanged.
  871. */
  872. struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
  873. unsigned short mask)
  874. {
  875. struct net_device *dev, *ret;
  876. ASSERT_RTNL();
  877. ret = NULL;
  878. for_each_netdev(net, dev) {
  879. if (((dev->flags ^ if_flags) & mask) == 0) {
  880. ret = dev;
  881. break;
  882. }
  883. }
  884. return ret;
  885. }
  886. EXPORT_SYMBOL(__dev_get_by_flags);
  887. /**
  888. * dev_valid_name - check if name is okay for network device
  889. * @name: name string
  890. *
  891. * Network device names need to be valid file names to
  892. * to allow sysfs to work. We also disallow any kind of
  893. * whitespace.
  894. */
  895. bool dev_valid_name(const char *name)
  896. {
  897. if (*name == '\0')
  898. return false;
  899. if (strlen(name) >= IFNAMSIZ)
  900. return false;
  901. if (!strcmp(name, ".") || !strcmp(name, ".."))
  902. return false;
  903. while (*name) {
  904. if (*name == '/' || *name == ':' || isspace(*name))
  905. return false;
  906. name++;
  907. }
  908. return true;
  909. }
  910. EXPORT_SYMBOL(dev_valid_name);
  911. /**
  912. * __dev_alloc_name - allocate a name for a device
  913. * @net: network namespace to allocate the device name in
  914. * @name: name format string
  915. * @buf: scratch buffer and result name string
  916. *
  917. * Passed a format string - eg "lt%d" it will try and find a suitable
  918. * id. It scans list of devices to build up a free map, then chooses
  919. * the first empty slot. The caller must hold the dev_base or rtnl lock
  920. * while allocating the name and adding the device in order to avoid
  921. * duplicates.
  922. * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
  923. * Returns the number of the unit assigned or a negative errno code.
  924. */
  925. static int __dev_alloc_name(struct net *net, const char *name, char *buf)
  926. {
  927. int i = 0;
  928. const char *p;
  929. const int max_netdevices = 8*PAGE_SIZE;
  930. unsigned long *inuse;
  931. struct net_device *d;
  932. if (!dev_valid_name(name))
  933. return -EINVAL;
  934. p = strchr(name, '%');
  935. if (p) {
  936. /*
  937. * Verify the string as this thing may have come from
  938. * the user. There must be either one "%d" and no other "%"
  939. * characters.
  940. */
  941. if (p[1] != 'd' || strchr(p + 2, '%'))
  942. return -EINVAL;
  943. /* Use one page as a bit array of possible slots */
  944. inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
  945. if (!inuse)
  946. return -ENOMEM;
  947. for_each_netdev(net, d) {
  948. if (!sscanf(d->name, name, &i))
  949. continue;
  950. if (i < 0 || i >= max_netdevices)
  951. continue;
  952. /* avoid cases where sscanf is not exact inverse of printf */
  953. snprintf(buf, IFNAMSIZ, name, i);
  954. if (!strncmp(buf, d->name, IFNAMSIZ))
  955. set_bit(i, inuse);
  956. }
  957. i = find_first_zero_bit(inuse, max_netdevices);
  958. free_page((unsigned long) inuse);
  959. }
  960. snprintf(buf, IFNAMSIZ, name, i);
  961. if (!__dev_get_by_name(net, buf))
  962. return i;
  963. /* It is possible to run out of possible slots
  964. * when the name is long and there isn't enough space left
  965. * for the digits, or if all bits are used.
  966. */
  967. return p ? -ENFILE : -EEXIST;
  968. }
  969. static int dev_alloc_name_ns(struct net *net,
  970. struct net_device *dev,
  971. const char *name)
  972. {
  973. char buf[IFNAMSIZ];
  974. int ret;
  975. BUG_ON(!net);
  976. ret = __dev_alloc_name(net, name, buf);
  977. if (ret >= 0)
  978. strlcpy(dev->name, buf, IFNAMSIZ);
  979. return ret;
  980. }
  981. /**
  982. * dev_alloc_name - allocate a name for a device
  983. * @dev: device
  984. * @name: name format string
  985. *
  986. * Passed a format string - eg "lt%d" it will try and find a suitable
  987. * id. It scans list of devices to build up a free map, then chooses
  988. * the first empty slot. The caller must hold the dev_base or rtnl lock
  989. * while allocating the name and adding the device in order to avoid
  990. * duplicates.
  991. * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
  992. * Returns the number of the unit assigned or a negative errno code.
  993. */
  994. int dev_alloc_name(struct net_device *dev, const char *name)
  995. {
  996. return dev_alloc_name_ns(dev_net(dev), dev, name);
  997. }
  998. EXPORT_SYMBOL(dev_alloc_name);
  999. int dev_get_valid_name(struct net *net, struct net_device *dev,
  1000. const char *name)
  1001. {
  1002. return dev_alloc_name_ns(net, dev, name);
  1003. }
  1004. EXPORT_SYMBOL(dev_get_valid_name);
  1005. /**
  1006. * dev_change_name - change name of a device
  1007. * @dev: device
  1008. * @newname: name (or format string) must be at least IFNAMSIZ
  1009. *
  1010. * Change name of a device, can pass format strings "eth%d".
  1011. * for wildcarding.
  1012. */
  1013. int dev_change_name(struct net_device *dev, const char *newname)
  1014. {
  1015. unsigned char old_assign_type;
  1016. char oldname[IFNAMSIZ];
  1017. int err = 0;
  1018. int ret;
  1019. struct net *net;
  1020. ASSERT_RTNL();
  1021. BUG_ON(!dev_net(dev));
  1022. net = dev_net(dev);
  1023. if (dev->flags & IFF_UP)
  1024. return -EBUSY;
  1025. write_seqcount_begin(&devnet_rename_seq);
  1026. if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
  1027. write_seqcount_end(&devnet_rename_seq);
  1028. return 0;
  1029. }
  1030. memcpy(oldname, dev->name, IFNAMSIZ);
  1031. err = dev_get_valid_name(net, dev, newname);
  1032. if (err < 0) {
  1033. write_seqcount_end(&devnet_rename_seq);
  1034. return err;
  1035. }
  1036. if (oldname[0] && !strchr(oldname, '%'))
  1037. netdev_info(dev, "renamed from %s\n", oldname);
  1038. old_assign_type = dev->name_assign_type;
  1039. dev->name_assign_type = NET_NAME_RENAMED;
  1040. rollback:
  1041. ret = device_rename(&dev->dev, dev->name);
  1042. if (ret) {
  1043. memcpy(dev->name, oldname, IFNAMSIZ);
  1044. dev->name_assign_type = old_assign_type;
  1045. write_seqcount_end(&devnet_rename_seq);
  1046. return ret;
  1047. }
  1048. write_seqcount_end(&devnet_rename_seq);
  1049. netdev_adjacent_rename_links(dev, oldname);
  1050. write_lock_bh(&dev_base_lock);
  1051. hlist_del_rcu(&dev->name_hlist);
  1052. write_unlock_bh(&dev_base_lock);
  1053. synchronize_rcu();
  1054. write_lock_bh(&dev_base_lock);
  1055. hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
  1056. write_unlock_bh(&dev_base_lock);
  1057. ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
  1058. ret = notifier_to_errno(ret);
  1059. if (ret) {
  1060. /* err >= 0 after dev_alloc_name() or stores the first errno */
  1061. if (err >= 0) {
  1062. err = ret;
  1063. write_seqcount_begin(&devnet_rename_seq);
  1064. memcpy(dev->name, oldname, IFNAMSIZ);
  1065. memcpy(oldname, newname, IFNAMSIZ);
  1066. dev->name_assign_type = old_assign_type;
  1067. old_assign_type = NET_NAME_RENAMED;
  1068. goto rollback;
  1069. } else {
  1070. pr_err("%s: name change rollback failed: %d\n",
  1071. dev->name, ret);
  1072. }
  1073. }
  1074. return err;
  1075. }
  1076. /**
  1077. * dev_set_alias - change ifalias of a device
  1078. * @dev: device
  1079. * @alias: name up to IFALIASZ
  1080. * @len: limit of bytes to copy from info
  1081. *
  1082. * Set ifalias for a device,
  1083. */
  1084. int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
  1085. {
  1086. struct dev_ifalias *new_alias = NULL;
  1087. if (len >= IFALIASZ)
  1088. return -EINVAL;
  1089. if (len) {
  1090. new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL);
  1091. if (!new_alias)
  1092. return -ENOMEM;
  1093. memcpy(new_alias->ifalias, alias, len);
  1094. new_alias->ifalias[len] = 0;
  1095. }
  1096. mutex_lock(&ifalias_mutex);
  1097. rcu_swap_protected(dev->ifalias, new_alias,
  1098. mutex_is_locked(&ifalias_mutex));
  1099. mutex_unlock(&ifalias_mutex);
  1100. if (new_alias)
  1101. kfree_rcu(new_alias, rcuhead);
  1102. return len;
  1103. }
  1104. /**
  1105. * dev_get_alias - get ifalias of a device
  1106. * @dev: device
  1107. * @name: buffer to store name of ifalias
  1108. * @len: size of buffer
  1109. *
  1110. * get ifalias for a device. Caller must make sure dev cannot go
  1111. * away, e.g. rcu read lock or own a reference count to device.
  1112. */
  1113. int dev_get_alias(const struct net_device *dev, char *name, size_t len)
  1114. {
  1115. const struct dev_ifalias *alias;
  1116. int ret = 0;
  1117. rcu_read_lock();
  1118. alias = rcu_dereference(dev->ifalias);
  1119. if (alias)
  1120. ret = snprintf(name, len, "%s", alias->ifalias);
  1121. rcu_read_unlock();
  1122. return ret;
  1123. }
  1124. /**
  1125. * netdev_features_change - device changes features
  1126. * @dev: device to cause notification
  1127. *
  1128. * Called to indicate a device has changed features.
  1129. */
  1130. void netdev_features_change(struct net_device *dev)
  1131. {
  1132. call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
  1133. }
  1134. EXPORT_SYMBOL(netdev_features_change);
  1135. /**
  1136. * netdev_state_change - device changes state
  1137. * @dev: device to cause notification
  1138. *
  1139. * Called to indicate a device has changed state. This function calls
  1140. * the notifier chains for netdev_chain and sends a NEWLINK message
  1141. * to the routing socket.
  1142. */
  1143. void netdev_state_change(struct net_device *dev)
  1144. {
  1145. if (dev->flags & IFF_UP) {
  1146. struct netdev_notifier_change_info change_info = {
  1147. .info.dev = dev,
  1148. };
  1149. call_netdevice_notifiers_info(NETDEV_CHANGE,
  1150. &change_info.info);
  1151. rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
  1152. }
  1153. }
  1154. EXPORT_SYMBOL(netdev_state_change);
  1155. /**
  1156. * netdev_notify_peers - notify network peers about existence of @dev
  1157. * @dev: network device
  1158. *
  1159. * Generate traffic such that interested network peers are aware of
  1160. * @dev, such as by generating a gratuitous ARP. This may be used when
  1161. * a device wants to inform the rest of the network about some sort of
  1162. * reconfiguration such as a failover event or virtual machine
  1163. * migration.
  1164. */
  1165. void netdev_notify_peers(struct net_device *dev)
  1166. {
  1167. rtnl_lock();
  1168. call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
  1169. call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
  1170. rtnl_unlock();
  1171. }
  1172. EXPORT_SYMBOL(netdev_notify_peers);
  1173. static int __dev_open(struct net_device *dev)
  1174. {
  1175. const struct net_device_ops *ops = dev->netdev_ops;
  1176. int ret;
  1177. ASSERT_RTNL();
  1178. if (!netif_device_present(dev))
  1179. return -ENODEV;
  1180. /* Block netpoll from trying to do any rx path servicing.
  1181. * If we don't do this there is a chance ndo_poll_controller
  1182. * or ndo_poll may be running while we open the device
  1183. */
  1184. netpoll_poll_disable(dev);
  1185. ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
  1186. ret = notifier_to_errno(ret);
  1187. if (ret)
  1188. return ret;
  1189. set_bit(__LINK_STATE_START, &dev->state);
  1190. if (ops->ndo_validate_addr)
  1191. ret = ops->ndo_validate_addr(dev);
  1192. if (!ret && ops->ndo_open)
  1193. ret = ops->ndo_open(dev);
  1194. netpoll_poll_enable(dev);
  1195. if (ret)
  1196. clear_bit(__LINK_STATE_START, &dev->state);
  1197. else {
  1198. dev->flags |= IFF_UP;
  1199. dev_set_rx_mode(dev);
  1200. dev_activate(dev);
  1201. add_device_randomness(dev->dev_addr, dev->addr_len);
  1202. }
  1203. return ret;
  1204. }
  1205. /**
  1206. * dev_open - prepare an interface for use.
  1207. * @dev: device to open
  1208. *
  1209. * Takes a device from down to up state. The device's private open
  1210. * function is invoked and then the multicast lists are loaded. Finally
  1211. * the device is moved into the up state and a %NETDEV_UP message is
  1212. * sent to the netdev notifier chain.
  1213. *
  1214. * Calling this function on an active interface is a nop. On a failure
  1215. * a negative errno code is returned.
  1216. */
  1217. int dev_open(struct net_device *dev)
  1218. {
  1219. int ret;
  1220. if (dev->flags & IFF_UP)
  1221. return 0;
  1222. ret = __dev_open(dev);
  1223. if (ret < 0)
  1224. return ret;
  1225. rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
  1226. call_netdevice_notifiers(NETDEV_UP, dev);
  1227. return ret;
  1228. }
  1229. EXPORT_SYMBOL(dev_open);
  1230. static void __dev_close_many(struct list_head *head)
  1231. {
  1232. struct net_device *dev;
  1233. ASSERT_RTNL();
  1234. might_sleep();
  1235. list_for_each_entry(dev, head, close_list) {
  1236. /* Temporarily disable netpoll until the interface is down */
  1237. netpoll_poll_disable(dev);
  1238. call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
  1239. clear_bit(__LINK_STATE_START, &dev->state);
  1240. /* Synchronize to scheduled poll. We cannot touch poll list, it
  1241. * can be even on different cpu. So just clear netif_running().
  1242. *
  1243. * dev->stop() will invoke napi_disable() on all of it's
  1244. * napi_struct instances on this device.
  1245. */
  1246. smp_mb__after_atomic(); /* Commit netif_running(). */
  1247. }
  1248. dev_deactivate_many(head);
  1249. list_for_each_entry(dev, head, close_list) {
  1250. const struct net_device_ops *ops = dev->netdev_ops;
  1251. /*
  1252. * Call the device specific close. This cannot fail.
  1253. * Only if device is UP
  1254. *
  1255. * We allow it to be called even after a DETACH hot-plug
  1256. * event.
  1257. */
  1258. if (ops->ndo_stop)
  1259. ops->ndo_stop(dev);
  1260. dev->flags &= ~IFF_UP;
  1261. netpoll_poll_enable(dev);
  1262. }
  1263. }
  1264. static void __dev_close(struct net_device *dev)
  1265. {
  1266. LIST_HEAD(single);
  1267. list_add(&dev->close_list, &single);
  1268. __dev_close_many(&single);
  1269. list_del(&single);
  1270. }
  1271. void dev_close_many(struct list_head *head, bool unlink)
  1272. {
  1273. struct net_device *dev, *tmp;
  1274. /* Remove the devices that don't need to be closed */
  1275. list_for_each_entry_safe(dev, tmp, head, close_list)
  1276. if (!(dev->flags & IFF_UP))
  1277. list_del_init(&dev->close_list);
  1278. __dev_close_many(head);
  1279. list_for_each_entry_safe(dev, tmp, head, close_list) {
  1280. rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
  1281. call_netdevice_notifiers(NETDEV_DOWN, dev);
  1282. if (unlink)
  1283. list_del_init(&dev->close_list);
  1284. }
  1285. }
  1286. EXPORT_SYMBOL(dev_close_many);
  1287. /**
  1288. * dev_close - shutdown an interface.
  1289. * @dev: device to shutdown
  1290. *
  1291. * This function moves an active device into down state. A
  1292. * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
  1293. * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
  1294. * chain.
  1295. */
  1296. void dev_close(struct net_device *dev)
  1297. {
  1298. if (dev->flags & IFF_UP) {
  1299. LIST_HEAD(single);
  1300. list_add(&dev->close_list, &single);
  1301. dev_close_many(&single, true);
  1302. list_del(&single);
  1303. }
  1304. }
  1305. EXPORT_SYMBOL(dev_close);
  1306. /**
  1307. * dev_disable_lro - disable Large Receive Offload on a device
  1308. * @dev: device
  1309. *
  1310. * Disable Large Receive Offload (LRO) on a net device. Must be
  1311. * called under RTNL. This is needed if received packets may be
  1312. * forwarded to another interface.
  1313. */
  1314. void dev_disable_lro(struct net_device *dev)
  1315. {
  1316. struct net_device *lower_dev;
  1317. struct list_head *iter;
  1318. dev->wanted_features &= ~NETIF_F_LRO;
  1319. netdev_update_features(dev);
  1320. if (unlikely(dev->features & NETIF_F_LRO))
  1321. netdev_WARN(dev, "failed to disable LRO!\n");
  1322. netdev_for_each_lower_dev(dev, lower_dev, iter)
  1323. dev_disable_lro(lower_dev);
  1324. }
  1325. EXPORT_SYMBOL(dev_disable_lro);
  1326. static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
  1327. struct net_device *dev)
  1328. {
  1329. struct netdev_notifier_info info = {
  1330. .dev = dev,
  1331. };
  1332. return nb->notifier_call(nb, val, &info);
  1333. }
  1334. static int dev_boot_phase = 1;
  1335. /**
  1336. * register_netdevice_notifier - register a network notifier block
  1337. * @nb: notifier
  1338. *
  1339. * Register a notifier to be called when network device events occur.
  1340. * The notifier passed is linked into the kernel structures and must
  1341. * not be reused until it has been unregistered. A negative errno code
  1342. * is returned on a failure.
  1343. *
  1344. * When registered all registration and up events are replayed
  1345. * to the new notifier to allow device to have a race free
  1346. * view of the network device list.
  1347. */
  1348. int register_netdevice_notifier(struct notifier_block *nb)
  1349. {
  1350. struct net_device *dev;
  1351. struct net_device *last;
  1352. struct net *net;
  1353. int err;
  1354. rtnl_lock();
  1355. err = raw_notifier_chain_register(&netdev_chain, nb);
  1356. if (err)
  1357. goto unlock;
  1358. if (dev_boot_phase)
  1359. goto unlock;
  1360. for_each_net(net) {
  1361. for_each_netdev(net, dev) {
  1362. err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
  1363. err = notifier_to_errno(err);
  1364. if (err)
  1365. goto rollback;
  1366. if (!(dev->flags & IFF_UP))
  1367. continue;
  1368. call_netdevice_notifier(nb, NETDEV_UP, dev);
  1369. }
  1370. }
  1371. unlock:
  1372. rtnl_unlock();
  1373. return err;
  1374. rollback:
  1375. last = dev;
  1376. for_each_net(net) {
  1377. for_each_netdev(net, dev) {
  1378. if (dev == last)
  1379. goto outroll;
  1380. if (dev->flags & IFF_UP) {
  1381. call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
  1382. dev);
  1383. call_netdevice_notifier(nb, NETDEV_DOWN, dev);
  1384. }
  1385. call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
  1386. }
  1387. }
  1388. outroll:
  1389. raw_notifier_chain_unregister(&netdev_chain, nb);
  1390. goto unlock;
  1391. }
  1392. EXPORT_SYMBOL(register_netdevice_notifier);
  1393. /**
  1394. * unregister_netdevice_notifier - unregister a network notifier block
  1395. * @nb: notifier
  1396. *
  1397. * Unregister a notifier previously registered by
  1398. * register_netdevice_notifier(). The notifier is unlinked into the
  1399. * kernel structures and may then be reused. A negative errno code
  1400. * is returned on a failure.
  1401. *
  1402. * After unregistering unregister and down device events are synthesized
  1403. * for all devices on the device list to the removed notifier to remove
  1404. * the need for special case cleanup code.
  1405. */
  1406. int unregister_netdevice_notifier(struct notifier_block *nb)
  1407. {
  1408. struct net_device *dev;
  1409. struct net *net;
  1410. int err;
  1411. rtnl_lock();
  1412. err = raw_notifier_chain_unregister(&netdev_chain, nb);
  1413. if (err)
  1414. goto unlock;
  1415. for_each_net(net) {
  1416. for_each_netdev(net, dev) {
  1417. if (dev->flags & IFF_UP) {
  1418. call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
  1419. dev);
  1420. call_netdevice_notifier(nb, NETDEV_DOWN, dev);
  1421. }
  1422. call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
  1423. }
  1424. }
  1425. unlock:
  1426. rtnl_unlock();
  1427. return err;
  1428. }
  1429. EXPORT_SYMBOL(unregister_netdevice_notifier);
  1430. /**
  1431. * call_netdevice_notifiers_info - call all network notifier blocks
  1432. * @val: value passed unmodified to notifier function
  1433. * @dev: net_device pointer passed unmodified to notifier function
  1434. * @info: notifier information data
  1435. *
  1436. * Call all network notifier blocks. Parameters and return value
  1437. * are as for raw_notifier_call_chain().
  1438. */
  1439. static int call_netdevice_notifiers_info(unsigned long val,
  1440. struct netdev_notifier_info *info)
  1441. {
  1442. ASSERT_RTNL();
  1443. return raw_notifier_call_chain(&netdev_chain, val, info);
  1444. }
  1445. /**
  1446. * call_netdevice_notifiers - call all network notifier blocks
  1447. * @val: value passed unmodified to notifier function
  1448. * @dev: net_device pointer passed unmodified to notifier function
  1449. *
  1450. * Call all network notifier blocks. Parameters and return value
  1451. * are as for raw_notifier_call_chain().
  1452. */
  1453. int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
  1454. {
  1455. struct netdev_notifier_info info = {
  1456. .dev = dev,
  1457. };
  1458. return call_netdevice_notifiers_info(val, &info);
  1459. }
  1460. EXPORT_SYMBOL(call_netdevice_notifiers);
  1461. #ifdef CONFIG_NET_INGRESS
  1462. static struct static_key ingress_needed __read_mostly;
  1463. void net_inc_ingress_queue(void)
  1464. {
  1465. static_key_slow_inc(&ingress_needed);
  1466. }
  1467. EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
  1468. void net_dec_ingress_queue(void)
  1469. {
  1470. static_key_slow_dec(&ingress_needed);
  1471. }
  1472. EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
  1473. #endif
  1474. #ifdef CONFIG_NET_EGRESS
  1475. static struct static_key egress_needed __read_mostly;
  1476. void net_inc_egress_queue(void)
  1477. {
  1478. static_key_slow_inc(&egress_needed);
  1479. }
  1480. EXPORT_SYMBOL_GPL(net_inc_egress_queue);
  1481. void net_dec_egress_queue(void)
  1482. {
  1483. static_key_slow_dec(&egress_needed);
  1484. }
  1485. EXPORT_SYMBOL_GPL(net_dec_egress_queue);
  1486. #endif
  1487. static struct static_key netstamp_needed __read_mostly;
  1488. #ifdef HAVE_JUMP_LABEL
  1489. static atomic_t netstamp_needed_deferred;
  1490. static atomic_t netstamp_wanted;
  1491. static void netstamp_clear(struct work_struct *work)
  1492. {
  1493. int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
  1494. int wanted;
  1495. wanted = atomic_add_return(deferred, &netstamp_wanted);
  1496. if (wanted > 0)
  1497. static_key_enable(&netstamp_needed);
  1498. else
  1499. static_key_disable(&netstamp_needed);
  1500. }
  1501. static DECLARE_WORK(netstamp_work, netstamp_clear);
  1502. #endif
  1503. void net_enable_timestamp(void)
  1504. {
  1505. #ifdef HAVE_JUMP_LABEL
  1506. int wanted;
  1507. while (1) {
  1508. wanted = atomic_read(&netstamp_wanted);
  1509. if (wanted <= 0)
  1510. break;
  1511. if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted)
  1512. return;
  1513. }
  1514. atomic_inc(&netstamp_needed_deferred);
  1515. schedule_work(&netstamp_work);
  1516. #else
  1517. static_key_slow_inc(&netstamp_needed);
  1518. #endif
  1519. }
  1520. EXPORT_SYMBOL(net_enable_timestamp);
  1521. void net_disable_timestamp(void)
  1522. {
  1523. #ifdef HAVE_JUMP_LABEL
  1524. int wanted;
  1525. while (1) {
  1526. wanted = atomic_read(&netstamp_wanted);
  1527. if (wanted <= 1)
  1528. break;
  1529. if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted)
  1530. return;
  1531. }
  1532. atomic_dec(&netstamp_needed_deferred);
  1533. schedule_work(&netstamp_work);
  1534. #else
  1535. static_key_slow_dec(&netstamp_needed);
  1536. #endif
  1537. }
  1538. EXPORT_SYMBOL(net_disable_timestamp);
  1539. static inline void net_timestamp_set(struct sk_buff *skb)
  1540. {
  1541. skb->tstamp = 0;
  1542. if (static_key_false(&netstamp_needed))
  1543. __net_timestamp(skb);
  1544. }
  1545. #define net_timestamp_check(COND, SKB) \
  1546. if (static_key_false(&netstamp_needed)) { \
  1547. if ((COND) && !(SKB)->tstamp) \
  1548. __net_timestamp(SKB); \
  1549. } \
  1550. bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
  1551. {
  1552. unsigned int len;
  1553. if (!(dev->flags & IFF_UP))
  1554. return false;
  1555. len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
  1556. if (skb->len <= len)
  1557. return true;
  1558. /* if TSO is enabled, we don't care about the length as the packet
  1559. * could be forwarded without being segmented before
  1560. */
  1561. if (skb_is_gso(skb))
  1562. return true;
  1563. return false;
  1564. }
  1565. EXPORT_SYMBOL_GPL(is_skb_forwardable);
  1566. int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
  1567. {
  1568. int ret = ____dev_forward_skb(dev, skb);
  1569. if (likely(!ret)) {
  1570. skb->protocol = eth_type_trans(skb, dev);
  1571. skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
  1572. }
  1573. return ret;
  1574. }
  1575. EXPORT_SYMBOL_GPL(__dev_forward_skb);
  1576. /**
  1577. * dev_forward_skb - loopback an skb to another netif
  1578. *
  1579. * @dev: destination network device
  1580. * @skb: buffer to forward
  1581. *
  1582. * return values:
  1583. * NET_RX_SUCCESS (no congestion)
  1584. * NET_RX_DROP (packet was dropped, but freed)
  1585. *
  1586. * dev_forward_skb can be used for injecting an skb from the
  1587. * start_xmit function of one device into the receive queue
  1588. * of another device.
  1589. *
  1590. * The receiving device may be in another namespace, so
  1591. * we have to clear all information in the skb that could
  1592. * impact namespace isolation.
  1593. */
  1594. int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
  1595. {
  1596. return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
  1597. }
  1598. EXPORT_SYMBOL_GPL(dev_forward_skb);
  1599. static inline int deliver_skb(struct sk_buff *skb,
  1600. struct packet_type *pt_prev,
  1601. struct net_device *orig_dev)
  1602. {
  1603. if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
  1604. return -ENOMEM;
  1605. refcount_inc(&skb->users);
  1606. return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
  1607. }
  1608. static inline void deliver_ptype_list_skb(struct sk_buff *skb,
  1609. struct packet_type **pt,
  1610. struct net_device *orig_dev,
  1611. __be16 type,
  1612. struct list_head *ptype_list)
  1613. {
  1614. struct packet_type *ptype, *pt_prev = *pt;
  1615. list_for_each_entry_rcu(ptype, ptype_list, list) {
  1616. if (ptype->type != type)
  1617. continue;
  1618. if (pt_prev)
  1619. deliver_skb(skb, pt_prev, orig_dev);
  1620. pt_prev = ptype;
  1621. }
  1622. *pt = pt_prev;
  1623. }
  1624. static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
  1625. {
  1626. if (!ptype->af_packet_priv || !skb->sk)
  1627. return false;
  1628. if (ptype->id_match)
  1629. return ptype->id_match(ptype, skb->sk);
  1630. else if ((struct sock *)ptype->af_packet_priv == skb->sk)
  1631. return true;
  1632. return false;
  1633. }
  1634. /*
  1635. * Support routine. Sends outgoing frames to any network
  1636. * taps currently in use.
  1637. */
  1638. void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
  1639. {
  1640. struct packet_type *ptype;
  1641. struct sk_buff *skb2 = NULL;
  1642. struct packet_type *pt_prev = NULL;
  1643. struct list_head *ptype_list = &ptype_all;
  1644. rcu_read_lock();
  1645. again:
  1646. list_for_each_entry_rcu(ptype, ptype_list, list) {
  1647. /* Never send packets back to the socket
  1648. * they originated from - MvS (miquels@drinkel.ow.org)
  1649. */
  1650. if (skb_loop_sk(ptype, skb))
  1651. continue;
  1652. if (pt_prev) {
  1653. deliver_skb(skb2, pt_prev, skb->dev);
  1654. pt_prev = ptype;
  1655. continue;
  1656. }
  1657. /* need to clone skb, done only once */
  1658. skb2 = skb_clone(skb, GFP_ATOMIC);
  1659. if (!skb2)
  1660. goto out_unlock;
  1661. net_timestamp_set(skb2);
  1662. /* skb->nh should be correctly
  1663. * set by sender, so that the second statement is
  1664. * just protection against buggy protocols.
  1665. */
  1666. skb_reset_mac_header(skb2);
  1667. if (skb_network_header(skb2) < skb2->data ||
  1668. skb_network_header(skb2) > skb_tail_pointer(skb2)) {
  1669. net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
  1670. ntohs(skb2->protocol),
  1671. dev->name);
  1672. skb_reset_network_header(skb2);
  1673. }
  1674. skb2->transport_header = skb2->network_header;
  1675. skb2->pkt_type = PACKET_OUTGOING;
  1676. pt_prev = ptype;
  1677. }
  1678. if (ptype_list == &ptype_all) {
  1679. ptype_list = &dev->ptype_all;
  1680. goto again;
  1681. }
  1682. out_unlock:
  1683. if (pt_prev) {
  1684. if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC))
  1685. pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
  1686. else
  1687. kfree_skb(skb2);
  1688. }
  1689. rcu_read_unlock();
  1690. }
  1691. EXPORT_SYMBOL_GPL(dev_queue_xmit_nit);
  1692. /**
  1693. * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
  1694. * @dev: Network device
  1695. * @txq: number of queues available
  1696. *
  1697. * If real_num_tx_queues is changed the tc mappings may no longer be
  1698. * valid. To resolve this verify the tc mapping remains valid and if
  1699. * not NULL the mapping. With no priorities mapping to this
  1700. * offset/count pair it will no longer be used. In the worst case TC0
  1701. * is invalid nothing can be done so disable priority mappings. If is
  1702. * expected that drivers will fix this mapping if they can before
  1703. * calling netif_set_real_num_tx_queues.
  1704. */
  1705. static void netif_setup_tc(struct net_device *dev, unsigned int txq)
  1706. {
  1707. int i;
  1708. struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
  1709. /* If TC0 is invalidated disable TC mapping */
  1710. if (tc->offset + tc->count > txq) {
  1711. pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
  1712. dev->num_tc = 0;
  1713. return;
  1714. }
  1715. /* Invalidated prio to tc mappings set to TC0 */
  1716. for (i = 1; i < TC_BITMASK + 1; i++) {
  1717. int q = netdev_get_prio_tc_map(dev, i);
  1718. tc = &dev->tc_to_txq[q];
  1719. if (tc->offset + tc->count > txq) {
  1720. pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
  1721. i, q);
  1722. netdev_set_prio_tc_map(dev, i, 0);
  1723. }
  1724. }
  1725. }
  1726. int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
  1727. {
  1728. if (dev->num_tc) {
  1729. struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
  1730. int i;
  1731. for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
  1732. if ((txq - tc->offset) < tc->count)
  1733. return i;
  1734. }
  1735. return -1;
  1736. }
  1737. return 0;
  1738. }
  1739. EXPORT_SYMBOL(netdev_txq_to_tc);
  1740. #ifdef CONFIG_XPS
  1741. static DEFINE_MUTEX(xps_map_mutex);
  1742. #define xmap_dereference(P) \
  1743. rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
  1744. static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
  1745. int tci, u16 index)
  1746. {
  1747. struct xps_map *map = NULL;
  1748. int pos;
  1749. if (dev_maps)
  1750. map = xmap_dereference(dev_maps->cpu_map[tci]);
  1751. if (!map)
  1752. return false;
  1753. for (pos = map->len; pos--;) {
  1754. if (map->queues[pos] != index)
  1755. continue;
  1756. if (map->len > 1) {
  1757. map->queues[pos] = map->queues[--map->len];
  1758. break;
  1759. }
  1760. RCU_INIT_POINTER(dev_maps->cpu_map[tci], NULL);
  1761. kfree_rcu(map, rcu);
  1762. return false;
  1763. }
  1764. return true;
  1765. }
  1766. static bool remove_xps_queue_cpu(struct net_device *dev,
  1767. struct xps_dev_maps *dev_maps,
  1768. int cpu, u16 offset, u16 count)
  1769. {
  1770. int num_tc = dev->num_tc ? : 1;
  1771. bool active = false;
  1772. int tci;
  1773. for (tci = cpu * num_tc; num_tc--; tci++) {
  1774. int i, j;
  1775. for (i = count, j = offset; i--; j++) {
  1776. if (!remove_xps_queue(dev_maps, cpu, j))
  1777. break;
  1778. }
  1779. active |= i < 0;
  1780. }
  1781. return active;
  1782. }
  1783. static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
  1784. u16 count)
  1785. {
  1786. struct xps_dev_maps *dev_maps;
  1787. int cpu, i;
  1788. bool active = false;
  1789. mutex_lock(&xps_map_mutex);
  1790. dev_maps = xmap_dereference(dev->xps_maps);
  1791. if (!dev_maps)
  1792. goto out_no_maps;
  1793. for_each_possible_cpu(cpu)
  1794. active |= remove_xps_queue_cpu(dev, dev_maps, cpu,
  1795. offset, count);
  1796. if (!active) {
  1797. RCU_INIT_POINTER(dev->xps_maps, NULL);
  1798. kfree_rcu(dev_maps, rcu);
  1799. }
  1800. for (i = offset + (count - 1); count--; i--)
  1801. netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
  1802. NUMA_NO_NODE);
  1803. out_no_maps:
  1804. mutex_unlock(&xps_map_mutex);
  1805. }
  1806. static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
  1807. {
  1808. netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
  1809. }
  1810. static struct xps_map *expand_xps_map(struct xps_map *map,
  1811. int cpu, u16 index)
  1812. {
  1813. struct xps_map *new_map;
  1814. int alloc_len = XPS_MIN_MAP_ALLOC;
  1815. int i, pos;
  1816. for (pos = 0; map && pos < map->len; pos++) {
  1817. if (map->queues[pos] != index)
  1818. continue;
  1819. return map;
  1820. }
  1821. /* Need to add queue to this CPU's existing map */
  1822. if (map) {
  1823. if (pos < map->alloc_len)
  1824. return map;
  1825. alloc_len = map->alloc_len * 2;
  1826. }
  1827. /* Need to allocate new map to store queue on this CPU's map */
  1828. new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
  1829. cpu_to_node(cpu));
  1830. if (!new_map)
  1831. return NULL;
  1832. for (i = 0; i < pos; i++)
  1833. new_map->queues[i] = map->queues[i];
  1834. new_map->alloc_len = alloc_len;
  1835. new_map->len = pos;
  1836. return new_map;
  1837. }
  1838. int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
  1839. u16 index)
  1840. {
  1841. struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
  1842. int i, cpu, tci, numa_node_id = -2;
  1843. int maps_sz, num_tc = 1, tc = 0;
  1844. struct xps_map *map, *new_map;
  1845. bool active = false;
  1846. if (dev->num_tc) {
  1847. num_tc = dev->num_tc;
  1848. tc = netdev_txq_to_tc(dev, index);
  1849. if (tc < 0)
  1850. return -EINVAL;
  1851. }
  1852. maps_sz = XPS_DEV_MAPS_SIZE(num_tc);
  1853. if (maps_sz < L1_CACHE_BYTES)
  1854. maps_sz = L1_CACHE_BYTES;
  1855. mutex_lock(&xps_map_mutex);
  1856. dev_maps = xmap_dereference(dev->xps_maps);
  1857. /* allocate memory for queue storage */
  1858. for_each_cpu_and(cpu, cpu_online_mask, mask) {
  1859. if (!new_dev_maps)
  1860. new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
  1861. if (!new_dev_maps) {
  1862. mutex_unlock(&xps_map_mutex);
  1863. return -ENOMEM;
  1864. }
  1865. tci = cpu * num_tc + tc;
  1866. map = dev_maps ? xmap_dereference(dev_maps->cpu_map[tci]) :
  1867. NULL;
  1868. map = expand_xps_map(map, cpu, index);
  1869. if (!map)
  1870. goto error;
  1871. RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
  1872. }
  1873. if (!new_dev_maps)
  1874. goto out_no_new_maps;
  1875. for_each_possible_cpu(cpu) {
  1876. /* copy maps belonging to foreign traffic classes */
  1877. for (i = tc, tci = cpu * num_tc; dev_maps && i--; tci++) {
  1878. /* fill in the new device map from the old device map */
  1879. map = xmap_dereference(dev_maps->cpu_map[tci]);
  1880. RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
  1881. }
  1882. /* We need to explicitly update tci as prevous loop
  1883. * could break out early if dev_maps is NULL.
  1884. */
  1885. tci = cpu * num_tc + tc;
  1886. if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
  1887. /* add queue to CPU maps */
  1888. int pos = 0;
  1889. map = xmap_dereference(new_dev_maps->cpu_map[tci]);
  1890. while ((pos < map->len) && (map->queues[pos] != index))
  1891. pos++;
  1892. if (pos == map->len)
  1893. map->queues[map->len++] = index;
  1894. #ifdef CONFIG_NUMA
  1895. if (numa_node_id == -2)
  1896. numa_node_id = cpu_to_node(cpu);
  1897. else if (numa_node_id != cpu_to_node(cpu))
  1898. numa_node_id = -1;
  1899. #endif
  1900. } else if (dev_maps) {
  1901. /* fill in the new device map from the old device map */
  1902. map = xmap_dereference(dev_maps->cpu_map[tci]);
  1903. RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
  1904. }
  1905. /* copy maps belonging to foreign traffic classes */
  1906. for (i = num_tc - tc, tci++; dev_maps && --i; tci++) {
  1907. /* fill in the new device map from the old device map */
  1908. map = xmap_dereference(dev_maps->cpu_map[tci]);
  1909. RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
  1910. }
  1911. }
  1912. rcu_assign_pointer(dev->xps_maps, new_dev_maps);
  1913. /* Cleanup old maps */
  1914. if (!dev_maps)
  1915. goto out_no_old_maps;
  1916. for_each_possible_cpu(cpu) {
  1917. for (i = num_tc, tci = cpu * num_tc; i--; tci++) {
  1918. new_map = xmap_dereference(new_dev_maps->cpu_map[tci]);
  1919. map = xmap_dereference(dev_maps->cpu_map[tci]);
  1920. if (map && map != new_map)
  1921. kfree_rcu(map, rcu);
  1922. }
  1923. }
  1924. kfree_rcu(dev_maps, rcu);
  1925. out_no_old_maps:
  1926. dev_maps = new_dev_maps;
  1927. active = true;
  1928. out_no_new_maps:
  1929. /* update Tx queue numa node */
  1930. netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
  1931. (numa_node_id >= 0) ? numa_node_id :
  1932. NUMA_NO_NODE);
  1933. if (!dev_maps)
  1934. goto out_no_maps;
  1935. /* removes queue from unused CPUs */
  1936. for_each_possible_cpu(cpu) {
  1937. for (i = tc, tci = cpu * num_tc; i--; tci++)
  1938. active |= remove_xps_queue(dev_maps, tci, index);
  1939. if (!cpumask_test_cpu(cpu, mask) || !cpu_online(cpu))
  1940. active |= remove_xps_queue(dev_maps, tci, index);
  1941. for (i = num_tc - tc, tci++; --i; tci++)
  1942. active |= remove_xps_queue(dev_maps, tci, index);
  1943. }
  1944. /* free map if not active */
  1945. if (!active) {
  1946. RCU_INIT_POINTER(dev->xps_maps, NULL);
  1947. kfree_rcu(dev_maps, rcu);
  1948. }
  1949. out_no_maps:
  1950. mutex_unlock(&xps_map_mutex);
  1951. return 0;
  1952. error:
  1953. /* remove any maps that we added */
  1954. for_each_possible_cpu(cpu) {
  1955. for (i = num_tc, tci = cpu * num_tc; i--; tci++) {
  1956. new_map = xmap_dereference(new_dev_maps->cpu_map[tci]);
  1957. map = dev_maps ?
  1958. xmap_dereference(dev_maps->cpu_map[tci]) :
  1959. NULL;
  1960. if (new_map && new_map != map)
  1961. kfree(new_map);
  1962. }
  1963. }
  1964. mutex_unlock(&xps_map_mutex);
  1965. kfree(new_dev_maps);
  1966. return -ENOMEM;
  1967. }
  1968. EXPORT_SYMBOL(netif_set_xps_queue);
  1969. #endif
  1970. void netdev_reset_tc(struct net_device *dev)
  1971. {
  1972. #ifdef CONFIG_XPS
  1973. netif_reset_xps_queues_gt(dev, 0);
  1974. #endif
  1975. dev->num_tc = 0;
  1976. memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
  1977. memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
  1978. }
  1979. EXPORT_SYMBOL(netdev_reset_tc);
  1980. int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
  1981. {
  1982. if (tc >= dev->num_tc)
  1983. return -EINVAL;
  1984. #ifdef CONFIG_XPS
  1985. netif_reset_xps_queues(dev, offset, count);
  1986. #endif
  1987. dev->tc_to_txq[tc].count = count;
  1988. dev->tc_to_txq[tc].offset = offset;
  1989. return 0;
  1990. }
  1991. EXPORT_SYMBOL(netdev_set_tc_queue);
  1992. int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
  1993. {
  1994. if (num_tc > TC_MAX_QUEUE)
  1995. return -EINVAL;
  1996. #ifdef CONFIG_XPS
  1997. netif_reset_xps_queues_gt(dev, 0);
  1998. #endif
  1999. dev->num_tc = num_tc;
  2000. return 0;
  2001. }
  2002. EXPORT_SYMBOL(netdev_set_num_tc);
  2003. /*
  2004. * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
  2005. * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
  2006. */
  2007. int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
  2008. {
  2009. int rc;
  2010. if (txq < 1 || txq > dev->num_tx_queues)
  2011. return -EINVAL;
  2012. if (dev->reg_state == NETREG_REGISTERED ||
  2013. dev->reg_state == NETREG_UNREGISTERING) {
  2014. ASSERT_RTNL();
  2015. rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
  2016. txq);
  2017. if (rc)
  2018. return rc;
  2019. if (dev->num_tc)
  2020. netif_setup_tc(dev, txq);
  2021. if (txq < dev->real_num_tx_queues) {
  2022. qdisc_reset_all_tx_gt(dev, txq);
  2023. #ifdef CONFIG_XPS
  2024. netif_reset_xps_queues_gt(dev, txq);
  2025. #endif
  2026. }
  2027. }
  2028. dev->real_num_tx_queues = txq;
  2029. return 0;
  2030. }
  2031. EXPORT_SYMBOL(netif_set_real_num_tx_queues);
  2032. #ifdef CONFIG_SYSFS
  2033. /**
  2034. * netif_set_real_num_rx_queues - set actual number of RX queues used
  2035. * @dev: Network device
  2036. * @rxq: Actual number of RX queues
  2037. *
  2038. * This must be called either with the rtnl_lock held or before
  2039. * registration of the net device. Returns 0 on success, or a
  2040. * negative error code. If called before registration, it always
  2041. * succeeds.
  2042. */
  2043. int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
  2044. {
  2045. int rc;
  2046. if (rxq < 1 || rxq > dev->num_rx_queues)
  2047. return -EINVAL;
  2048. if (dev->reg_state == NETREG_REGISTERED) {
  2049. ASSERT_RTNL();
  2050. rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
  2051. rxq);
  2052. if (rc)
  2053. return rc;
  2054. }
  2055. dev->real_num_rx_queues = rxq;
  2056. return 0;
  2057. }
  2058. EXPORT_SYMBOL(netif_set_real_num_rx_queues);
  2059. #endif
  2060. /**
  2061. * netif_get_num_default_rss_queues - default number of RSS queues
  2062. *
  2063. * This routine should set an upper limit on the number of RSS queues
  2064. * used by default by multiqueue devices.
  2065. */
  2066. int netif_get_num_default_rss_queues(void)
  2067. {
  2068. return is_kdump_kernel() ?
  2069. 1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
  2070. }
  2071. EXPORT_SYMBOL(netif_get_num_default_rss_queues);
  2072. static void __netif_reschedule(struct Qdisc *q)
  2073. {
  2074. struct softnet_data *sd;
  2075. unsigned long flags;
  2076. local_irq_save(flags);
  2077. sd = this_cpu_ptr(&softnet_data);
  2078. q->next_sched = NULL;
  2079. *sd->output_queue_tailp = q;
  2080. sd->output_queue_tailp = &q->next_sched;
  2081. raise_softirq_irqoff(NET_TX_SOFTIRQ);
  2082. local_irq_restore(flags);
  2083. }
  2084. void __netif_schedule(struct Qdisc *q)
  2085. {
  2086. if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
  2087. __netif_reschedule(q);
  2088. }
  2089. EXPORT_SYMBOL(__netif_schedule);
  2090. struct dev_kfree_skb_cb {
  2091. enum skb_free_reason reason;
  2092. };
  2093. static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
  2094. {
  2095. return (struct dev_kfree_skb_cb *)skb->cb;
  2096. }
  2097. void netif_schedule_queue(struct netdev_queue *txq)
  2098. {
  2099. rcu_read_lock();
  2100. if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
  2101. struct Qdisc *q = rcu_dereference(txq->qdisc);
  2102. __netif_schedule(q);
  2103. }
  2104. rcu_read_unlock();
  2105. }
  2106. EXPORT_SYMBOL(netif_schedule_queue);
  2107. void netif_tx_wake_queue(struct netdev_queue *dev_queue)
  2108. {
  2109. if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
  2110. struct Qdisc *q;
  2111. rcu_read_lock();
  2112. q = rcu_dereference(dev_queue->qdisc);
  2113. __netif_schedule(q);
  2114. rcu_read_unlock();
  2115. }
  2116. }
  2117. EXPORT_SYMBOL(netif_tx_wake_queue);
  2118. void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
  2119. {
  2120. unsigned long flags;
  2121. if (unlikely(!skb))
  2122. return;
  2123. if (likely(refcount_read(&skb->users) == 1)) {
  2124. smp_rmb();
  2125. refcount_set(&skb->users, 0);
  2126. } else if (likely(!refcount_dec_and_test(&skb->users))) {
  2127. return;
  2128. }
  2129. get_kfree_skb_cb(skb)->reason = reason;
  2130. local_irq_save(flags);
  2131. skb->next = __this_cpu_read(softnet_data.completion_queue);
  2132. __this_cpu_write(softnet_data.completion_queue, skb);
  2133. raise_softirq_irqoff(NET_TX_SOFTIRQ);
  2134. local_irq_restore(flags);
  2135. }
  2136. EXPORT_SYMBOL(__dev_kfree_skb_irq);
  2137. void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
  2138. {
  2139. if (in_irq() || irqs_disabled())
  2140. __dev_kfree_skb_irq(skb, reason);
  2141. else
  2142. dev_kfree_skb(skb);
  2143. }
  2144. EXPORT_SYMBOL(__dev_kfree_skb_any);
  2145. /**
  2146. * netif_device_detach - mark device as removed
  2147. * @dev: network device
  2148. *
  2149. * Mark device as removed from system and therefore no longer available.
  2150. */
  2151. void netif_device_detach(struct net_device *dev)
  2152. {
  2153. if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
  2154. netif_running(dev)) {
  2155. netif_tx_stop_all_queues(dev);
  2156. }
  2157. }
  2158. EXPORT_SYMBOL(netif_device_detach);
  2159. /**
  2160. * netif_device_attach - mark device as attached
  2161. * @dev: network device
  2162. *
  2163. * Mark device as attached from system and restart if needed.
  2164. */
  2165. void netif_device_attach(struct net_device *dev)
  2166. {
  2167. if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
  2168. netif_running(dev)) {
  2169. netif_tx_wake_all_queues(dev);
  2170. __netdev_watchdog_up(dev);
  2171. }
  2172. }
  2173. EXPORT_SYMBOL(netif_device_attach);
  2174. /*
  2175. * Returns a Tx hash based on the given packet descriptor a Tx queues' number
  2176. * to be used as a distribution range.
  2177. */
  2178. u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
  2179. unsigned int num_tx_queues)
  2180. {
  2181. u32 hash;
  2182. u16 qoffset = 0;
  2183. u16 qcount = num_tx_queues;
  2184. if (skb_rx_queue_recorded(skb)) {
  2185. hash = skb_get_rx_queue(skb);
  2186. while (unlikely(hash >= num_tx_queues))
  2187. hash -= num_tx_queues;
  2188. return hash;
  2189. }
  2190. if (dev->num_tc) {
  2191. u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
  2192. qoffset = dev->tc_to_txq[tc].offset;
  2193. qcount = dev->tc_to_txq[tc].count;
  2194. }
  2195. return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
  2196. }
  2197. EXPORT_SYMBOL(__skb_tx_hash);
  2198. static void skb_warn_bad_offload(const struct sk_buff *skb)
  2199. {
  2200. static const netdev_features_t null_features;
  2201. struct net_device *dev = skb->dev;
  2202. const char *name = "";
  2203. if (!net_ratelimit())
  2204. return;
  2205. if (dev) {
  2206. if (dev->dev.parent)
  2207. name = dev_driver_string(dev->dev.parent);
  2208. else
  2209. name = netdev_name(dev);
  2210. }
  2211. WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
  2212. "gso_type=%d ip_summed=%d\n",
  2213. name, dev ? &dev->features : &null_features,
  2214. skb->sk ? &skb->sk->sk_route_caps : &null_features,
  2215. skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
  2216. skb_shinfo(skb)->gso_type, skb->ip_summed);
  2217. }
  2218. /*
  2219. * Invalidate hardware checksum when packet is to be mangled, and
  2220. * complete checksum manually on outgoing path.
  2221. */
  2222. int skb_checksum_help(struct sk_buff *skb)
  2223. {
  2224. __wsum csum;
  2225. int ret = 0, offset;
  2226. if (skb->ip_summed == CHECKSUM_COMPLETE)
  2227. goto out_set_summed;
  2228. if (unlikely(skb_shinfo(skb)->gso_size)) {
  2229. skb_warn_bad_offload(skb);
  2230. return -EINVAL;
  2231. }
  2232. /* Before computing a checksum, we should make sure no frag could
  2233. * be modified by an external entity : checksum could be wrong.
  2234. */
  2235. if (skb_has_shared_frag(skb)) {
  2236. ret = __skb_linearize(skb);
  2237. if (ret)
  2238. goto out;
  2239. }
  2240. offset = skb_checksum_start_offset(skb);
  2241. BUG_ON(offset >= skb_headlen(skb));
  2242. csum = skb_checksum(skb, offset, skb->len - offset, 0);
  2243. offset += skb->csum_offset;
  2244. BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
  2245. if (skb_cloned(skb) &&
  2246. !skb_clone_writable(skb, offset + sizeof(__sum16))) {
  2247. ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
  2248. if (ret)
  2249. goto out;
  2250. }
  2251. *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
  2252. out_set_summed:
  2253. skb->ip_summed = CHECKSUM_NONE;
  2254. out:
  2255. return ret;
  2256. }
  2257. EXPORT_SYMBOL(skb_checksum_help);
  2258. int skb_crc32c_csum_help(struct sk_buff *skb)
  2259. {
  2260. __le32 crc32c_csum;
  2261. int ret = 0, offset, start;
  2262. if (skb->ip_summed != CHECKSUM_PARTIAL)
  2263. goto out;
  2264. if (unlikely(skb_is_gso(skb)))
  2265. goto out;
  2266. /* Before computing a checksum, we should make sure no frag could
  2267. * be modified by an external entity : checksum could be wrong.
  2268. */
  2269. if (unlikely(skb_has_shared_frag(skb))) {
  2270. ret = __skb_linearize(skb);
  2271. if (ret)
  2272. goto out;
  2273. }
  2274. start = skb_checksum_start_offset(skb);
  2275. offset = start + offsetof(struct sctphdr, checksum);
  2276. if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
  2277. ret = -EINVAL;
  2278. goto out;
  2279. }
  2280. if (skb_cloned(skb) &&
  2281. !skb_clone_writable(skb, offset + sizeof(__le32))) {
  2282. ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
  2283. if (ret)
  2284. goto out;
  2285. }
  2286. crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start,
  2287. skb->len - start, ~(__u32)0,
  2288. crc32c_csum_stub));
  2289. *(__le32 *)(skb->data + offset) = crc32c_csum;
  2290. skb->ip_summed = CHECKSUM_NONE;
  2291. skb->csum_not_inet = 0;
  2292. out:
  2293. return ret;
  2294. }
  2295. __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
  2296. {
  2297. __be16 type = skb->protocol;
  2298. /* Tunnel gso handlers can set protocol to ethernet. */
  2299. if (type == htons(ETH_P_TEB)) {
  2300. struct ethhdr *eth;
  2301. if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
  2302. return 0;
  2303. eth = (struct ethhdr *)skb_mac_header(skb);
  2304. type = eth->h_proto;
  2305. }
  2306. return __vlan_get_protocol(skb, type, depth);
  2307. }
  2308. /**
  2309. * skb_mac_gso_segment - mac layer segmentation handler.
  2310. * @skb: buffer to segment
  2311. * @features: features for the output path (see dev->features)
  2312. */
  2313. struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
  2314. netdev_features_t features)
  2315. {
  2316. struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
  2317. struct packet_offload *ptype;
  2318. int vlan_depth = skb->mac_len;
  2319. __be16 type = skb_network_protocol(skb, &vlan_depth);
  2320. if (unlikely(!type))
  2321. return ERR_PTR(-EINVAL);
  2322. __skb_pull(skb, vlan_depth);
  2323. rcu_read_lock();
  2324. list_for_each_entry_rcu(ptype, &offload_base, list) {
  2325. if (ptype->type == type && ptype->callbacks.gso_segment) {
  2326. segs = ptype->callbacks.gso_segment(skb, features);
  2327. break;
  2328. }
  2329. }
  2330. rcu_read_unlock();
  2331. __skb_push(skb, skb->data - skb_mac_header(skb));
  2332. return segs;
  2333. }
  2334. EXPORT_SYMBOL(skb_mac_gso_segment);
  2335. /* openvswitch calls this on rx path, so we need a different check.
  2336. */
  2337. static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
  2338. {
  2339. if (tx_path)
  2340. return skb->ip_summed != CHECKSUM_PARTIAL &&
  2341. skb->ip_summed != CHECKSUM_UNNECESSARY;
  2342. return skb->ip_summed == CHECKSUM_NONE;
  2343. }
  2344. /**
  2345. * __skb_gso_segment - Perform segmentation on skb.
  2346. * @skb: buffer to segment
  2347. * @features: features for the output path (see dev->features)
  2348. * @tx_path: whether it is called in TX path
  2349. *
  2350. * This function segments the given skb and returns a list of segments.
  2351. *
  2352. * It may return NULL if the skb requires no segmentation. This is
  2353. * only possible when GSO is used for verifying header integrity.
  2354. *
  2355. * Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb.
  2356. */
  2357. struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
  2358. netdev_features_t features, bool tx_path)
  2359. {
  2360. struct sk_buff *segs;
  2361. if (unlikely(skb_needs_check(skb, tx_path))) {
  2362. int err;
  2363. /* We're going to init ->check field in TCP or UDP header */
  2364. err = skb_cow_head(skb, 0);
  2365. if (err < 0)
  2366. return ERR_PTR(err);
  2367. }
  2368. /* Only report GSO partial support if it will enable us to
  2369. * support segmentation on this frame without needing additional
  2370. * work.
  2371. */
  2372. if (features & NETIF_F_GSO_PARTIAL) {
  2373. netdev_features_t partial_features = NETIF_F_GSO_ROBUST;
  2374. struct net_device *dev = skb->dev;
  2375. partial_features |= dev->features & dev->gso_partial_features;
  2376. if (!skb_gso_ok(skb, features | partial_features))
  2377. features &= ~NETIF_F_GSO_PARTIAL;
  2378. }
  2379. BUILD_BUG_ON(SKB_SGO_CB_OFFSET +
  2380. sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
  2381. SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
  2382. SKB_GSO_CB(skb)->encap_level = 0;
  2383. skb_reset_mac_header(skb);
  2384. skb_reset_mac_len(skb);
  2385. segs = skb_mac_gso_segment(skb, features);
  2386. if (unlikely(skb_needs_check(skb, tx_path)))
  2387. skb_warn_bad_offload(skb);
  2388. return segs;
  2389. }
  2390. EXPORT_SYMBOL(__skb_gso_segment);
  2391. /* Take action when hardware reception checksum errors are detected. */
  2392. #ifdef CONFIG_BUG
  2393. void netdev_rx_csum_fault(struct net_device *dev)
  2394. {
  2395. if (net_ratelimit()) {
  2396. pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
  2397. dump_stack();
  2398. }
  2399. }
  2400. EXPORT_SYMBOL(netdev_rx_csum_fault);
  2401. #endif
  2402. /* Actually, we should eliminate this check as soon as we know, that:
  2403. * 1. IOMMU is present and allows to map all the memory.
  2404. * 2. No high memory really exists on this machine.
  2405. */
  2406. static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
  2407. {
  2408. #ifdef CONFIG_HIGHMEM
  2409. int i;
  2410. if (!(dev->features & NETIF_F_HIGHDMA)) {
  2411. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  2412. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  2413. if (PageHighMem(skb_frag_page(frag)))
  2414. return 1;
  2415. }
  2416. }
  2417. if (PCI_DMA_BUS_IS_PHYS) {
  2418. struct device *pdev = dev->dev.parent;
  2419. if (!pdev)
  2420. return 0;
  2421. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  2422. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  2423. dma_addr_t addr = page_to_phys(skb_frag_page(frag));
  2424. if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
  2425. return 1;
  2426. }
  2427. }
  2428. #endif
  2429. return 0;
  2430. }
  2431. /* If MPLS offload request, verify we are testing hardware MPLS features
  2432. * instead of standard features for the netdev.
  2433. */
  2434. #if IS_ENABLED(CONFIG_NET_MPLS_GSO)
  2435. static netdev_features_t net_mpls_features(struct sk_buff *skb,
  2436. netdev_features_t features,
  2437. __be16 type)
  2438. {
  2439. if (eth_p_mpls(type))
  2440. features &= skb->dev->mpls_features;
  2441. return features;
  2442. }
  2443. #else
  2444. static netdev_features_t net_mpls_features(struct sk_buff *skb,
  2445. netdev_features_t features,
  2446. __be16 type)
  2447. {
  2448. return features;
  2449. }
  2450. #endif
  2451. static netdev_features_t harmonize_features(struct sk_buff *skb,
  2452. netdev_features_t features)
  2453. {
  2454. int tmp;
  2455. __be16 type;
  2456. type = skb_network_protocol(skb, &tmp);
  2457. features = net_mpls_features(skb, features, type);
  2458. if (skb->ip_summed != CHECKSUM_NONE &&
  2459. !can_checksum_protocol(features, type)) {
  2460. features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
  2461. }
  2462. if (illegal_highdma(skb->dev, skb))
  2463. features &= ~NETIF_F_SG;
  2464. return features;
  2465. }
  2466. netdev_features_t passthru_features_check(struct sk_buff *skb,
  2467. struct net_device *dev,
  2468. netdev_features_t features)
  2469. {
  2470. return features;
  2471. }
  2472. EXPORT_SYMBOL(passthru_features_check);
  2473. static netdev_features_t dflt_features_check(const struct sk_buff *skb,
  2474. struct net_device *dev,
  2475. netdev_features_t features)
  2476. {
  2477. return vlan_features_check(skb, features);
  2478. }
  2479. static netdev_features_t gso_features_check(const struct sk_buff *skb,
  2480. struct net_device *dev,
  2481. netdev_features_t features)
  2482. {
  2483. u16 gso_segs = skb_shinfo(skb)->gso_segs;
  2484. if (gso_segs > dev->gso_max_segs)
  2485. return features & ~NETIF_F_GSO_MASK;
  2486. /* Support for GSO partial features requires software
  2487. * intervention before we can actually process the packets
  2488. * so we need to strip support for any partial features now
  2489. * and we can pull them back in after we have partially
  2490. * segmented the frame.
  2491. */
  2492. if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
  2493. features &= ~dev->gso_partial_features;
  2494. /* Make sure to clear the IPv4 ID mangling feature if the
  2495. * IPv4 header has the potential to be fragmented.
  2496. */
  2497. if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
  2498. struct iphdr *iph = skb->encapsulation ?
  2499. inner_ip_hdr(skb) : ip_hdr(skb);
  2500. if (!(iph->frag_off & htons(IP_DF)))
  2501. features &= ~NETIF_F_TSO_MANGLEID;
  2502. }
  2503. return features;
  2504. }
  2505. netdev_features_t netif_skb_features(struct sk_buff *skb)
  2506. {
  2507. struct net_device *dev = skb->dev;
  2508. netdev_features_t features = dev->features;
  2509. if (skb_is_gso(skb))
  2510. features = gso_features_check(skb, dev, features);
  2511. /* If encapsulation offload request, verify we are testing
  2512. * hardware encapsulation features instead of standard
  2513. * features for the netdev
  2514. */
  2515. if (skb->encapsulation)
  2516. features &= dev->hw_enc_features;
  2517. if (skb_vlan_tagged(skb))
  2518. features = netdev_intersect_features(features,
  2519. dev->vlan_features |
  2520. NETIF_F_HW_VLAN_CTAG_TX |
  2521. NETIF_F_HW_VLAN_STAG_TX);
  2522. if (dev->netdev_ops->ndo_features_check)
  2523. features &= dev->netdev_ops->ndo_features_check(skb, dev,
  2524. features);
  2525. else
  2526. features &= dflt_features_check(skb, dev, features);
  2527. return harmonize_features(skb, features);
  2528. }
  2529. EXPORT_SYMBOL(netif_skb_features);
  2530. static int xmit_one(struct sk_buff *skb, struct net_device *dev,
  2531. struct netdev_queue *txq, bool more)
  2532. {
  2533. unsigned int len;
  2534. int rc;
  2535. if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
  2536. dev_queue_xmit_nit(skb, dev);
  2537. len = skb->len;
  2538. trace_net_dev_start_xmit(skb, dev);
  2539. rc = netdev_start_xmit(skb, dev, txq, more);
  2540. trace_net_dev_xmit(skb, rc, dev, len);
  2541. return rc;
  2542. }
  2543. struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
  2544. struct netdev_queue *txq, int *ret)
  2545. {
  2546. struct sk_buff *skb = first;
  2547. int rc = NETDEV_TX_OK;
  2548. while (skb) {
  2549. struct sk_buff *next = skb->next;
  2550. skb->next = NULL;
  2551. rc = xmit_one(skb, dev, txq, next != NULL);
  2552. if (unlikely(!dev_xmit_complete(rc))) {
  2553. skb->next = next;
  2554. goto out;
  2555. }
  2556. skb = next;
  2557. if (netif_xmit_stopped(txq) && skb) {
  2558. rc = NETDEV_TX_BUSY;
  2559. break;
  2560. }
  2561. }
  2562. out:
  2563. *ret = rc;
  2564. return skb;
  2565. }
  2566. static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
  2567. netdev_features_t features)
  2568. {
  2569. if (skb_vlan_tag_present(skb) &&
  2570. !vlan_hw_offload_capable(features, skb->vlan_proto))
  2571. skb = __vlan_hwaccel_push_inside(skb);
  2572. return skb;
  2573. }
  2574. int skb_csum_hwoffload_help(struct sk_buff *skb,
  2575. const netdev_features_t features)
  2576. {
  2577. if (unlikely(skb->csum_not_inet))
  2578. return !!(features & NETIF_F_SCTP_CRC) ? 0 :
  2579. skb_crc32c_csum_help(skb);
  2580. return !!(features & NETIF_F_CSUM_MASK) ? 0 : skb_checksum_help(skb);
  2581. }
  2582. EXPORT_SYMBOL(skb_csum_hwoffload_help);
  2583. static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
  2584. {
  2585. netdev_features_t features;
  2586. features = netif_skb_features(skb);
  2587. skb = validate_xmit_vlan(skb, features);
  2588. if (unlikely(!skb))
  2589. goto out_null;
  2590. if (netif_needs_gso(skb, features)) {
  2591. struct sk_buff *segs;
  2592. segs = skb_gso_segment(skb, features);
  2593. if (IS_ERR(segs)) {
  2594. goto out_kfree_skb;
  2595. } else if (segs) {
  2596. consume_skb(skb);
  2597. skb = segs;
  2598. }
  2599. } else {
  2600. if (skb_needs_linearize(skb, features) &&
  2601. __skb_linearize(skb))
  2602. goto out_kfree_skb;
  2603. if (validate_xmit_xfrm(skb, features))
  2604. goto out_kfree_skb;
  2605. /* If packet is not checksummed and device does not
  2606. * support checksumming for this protocol, complete
  2607. * checksumming here.
  2608. */
  2609. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  2610. if (skb->encapsulation)
  2611. skb_set_inner_transport_header(skb,
  2612. skb_checksum_start_offset(skb));
  2613. else
  2614. skb_set_transport_header(skb,
  2615. skb_checksum_start_offset(skb));
  2616. if (skb_csum_hwoffload_help(skb, features))
  2617. goto out_kfree_skb;
  2618. }
  2619. }
  2620. return skb;
  2621. out_kfree_skb:
  2622. kfree_skb(skb);
  2623. out_null:
  2624. atomic_long_inc(&dev->tx_dropped);
  2625. return NULL;
  2626. }
  2627. struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
  2628. {
  2629. struct sk_buff *next, *head = NULL, *tail;
  2630. for (; skb != NULL; skb = next) {
  2631. next = skb->next;
  2632. skb->next = NULL;
  2633. /* in case skb wont be segmented, point to itself */
  2634. skb->prev = skb;
  2635. skb = validate_xmit_skb(skb, dev);
  2636. if (!skb)
  2637. continue;
  2638. if (!head)
  2639. head = skb;
  2640. else
  2641. tail->next = skb;
  2642. /* If skb was segmented, skb->prev points to
  2643. * the last segment. If not, it still contains skb.
  2644. */
  2645. tail = skb->prev;
  2646. }
  2647. return head;
  2648. }
  2649. EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
  2650. static void qdisc_pkt_len_init(struct sk_buff *skb)
  2651. {
  2652. const struct skb_shared_info *shinfo = skb_shinfo(skb);
  2653. qdisc_skb_cb(skb)->pkt_len = skb->len;
  2654. /* To get more precise estimation of bytes sent on wire,
  2655. * we add to pkt_len the headers size of all segments
  2656. */
  2657. if (shinfo->gso_size) {
  2658. unsigned int hdr_len;
  2659. u16 gso_segs = shinfo->gso_segs;
  2660. /* mac layer + network layer */
  2661. hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
  2662. /* + transport layer */
  2663. if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
  2664. hdr_len += tcp_hdrlen(skb);
  2665. else
  2666. hdr_len += sizeof(struct udphdr);
  2667. if (shinfo->gso_type & SKB_GSO_DODGY)
  2668. gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
  2669. shinfo->gso_size);
  2670. qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
  2671. }
  2672. }
  2673. static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
  2674. struct net_device *dev,
  2675. struct netdev_queue *txq)
  2676. {
  2677. spinlock_t *root_lock = qdisc_lock(q);
  2678. struct sk_buff *to_free = NULL;
  2679. bool contended;
  2680. int rc;
  2681. qdisc_calculate_pkt_len(skb, q);
  2682. /*
  2683. * Heuristic to force contended enqueues to serialize on a
  2684. * separate lock before trying to get qdisc main lock.
  2685. * This permits qdisc->running owner to get the lock more
  2686. * often and dequeue packets faster.
  2687. */
  2688. contended = qdisc_is_running(q);
  2689. if (unlikely(contended))
  2690. spin_lock(&q->busylock);
  2691. spin_lock(root_lock);
  2692. if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
  2693. __qdisc_drop(skb, &to_free);
  2694. rc = NET_XMIT_DROP;
  2695. } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
  2696. qdisc_run_begin(q)) {
  2697. /*
  2698. * This is a work-conserving queue; there are no old skbs
  2699. * waiting to be sent out; and the qdisc is not running -
  2700. * xmit the skb directly.
  2701. */
  2702. qdisc_bstats_update(q, skb);
  2703. if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
  2704. if (unlikely(contended)) {
  2705. spin_unlock(&q->busylock);
  2706. contended = false;
  2707. }
  2708. __qdisc_run(q);
  2709. } else
  2710. qdisc_run_end(q);
  2711. rc = NET_XMIT_SUCCESS;
  2712. } else {
  2713. rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
  2714. if (qdisc_run_begin(q)) {
  2715. if (unlikely(contended)) {
  2716. spin_unlock(&q->busylock);
  2717. contended = false;
  2718. }
  2719. __qdisc_run(q);
  2720. }
  2721. }
  2722. spin_unlock(root_lock);
  2723. if (unlikely(to_free))
  2724. kfree_skb_list(to_free);
  2725. if (unlikely(contended))
  2726. spin_unlock(&q->busylock);
  2727. return rc;
  2728. }
  2729. #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
  2730. static void skb_update_prio(struct sk_buff *skb)
  2731. {
  2732. struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
  2733. if (!skb->priority && skb->sk && map) {
  2734. unsigned int prioidx =
  2735. sock_cgroup_prioidx(&skb->sk->sk_cgrp_data);
  2736. if (prioidx < map->priomap_len)
  2737. skb->priority = map->priomap[prioidx];
  2738. }
  2739. }
  2740. #else
  2741. #define skb_update_prio(skb)
  2742. #endif
  2743. DEFINE_PER_CPU(int, xmit_recursion);
  2744. EXPORT_SYMBOL(xmit_recursion);
  2745. /**
  2746. * dev_loopback_xmit - loop back @skb
  2747. * @net: network namespace this loopback is happening in
  2748. * @sk: sk needed to be a netfilter okfn
  2749. * @skb: buffer to transmit
  2750. */
  2751. int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
  2752. {
  2753. skb_reset_mac_header(skb);
  2754. __skb_pull(skb, skb_network_offset(skb));
  2755. skb->pkt_type = PACKET_LOOPBACK;
  2756. skb->ip_summed = CHECKSUM_UNNECESSARY;
  2757. WARN_ON(!skb_dst(skb));
  2758. skb_dst_force(skb);
  2759. netif_rx_ni(skb);
  2760. return 0;
  2761. }
  2762. EXPORT_SYMBOL(dev_loopback_xmit);
  2763. #ifdef CONFIG_NET_EGRESS
  2764. static struct sk_buff *
  2765. sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
  2766. {
  2767. struct mini_Qdisc *miniq = rcu_dereference_bh(dev->miniq_egress);
  2768. struct tcf_result cl_res;
  2769. if (!miniq)
  2770. return skb;
  2771. /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
  2772. mini_qdisc_bstats_cpu_update(miniq, skb);
  2773. switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) {
  2774. case TC_ACT_OK:
  2775. case TC_ACT_RECLASSIFY:
  2776. skb->tc_index = TC_H_MIN(cl_res.classid);
  2777. break;
  2778. case TC_ACT_SHOT:
  2779. mini_qdisc_qstats_cpu_drop(miniq);
  2780. *ret = NET_XMIT_DROP;
  2781. kfree_skb(skb);
  2782. return NULL;
  2783. case TC_ACT_STOLEN:
  2784. case TC_ACT_QUEUED:
  2785. case TC_ACT_TRAP:
  2786. *ret = NET_XMIT_SUCCESS;
  2787. consume_skb(skb);
  2788. return NULL;
  2789. case TC_ACT_REDIRECT:
  2790. /* No need to push/pop skb's mac_header here on egress! */
  2791. skb_do_redirect(skb);
  2792. *ret = NET_XMIT_SUCCESS;
  2793. return NULL;
  2794. default:
  2795. break;
  2796. }
  2797. return skb;
  2798. }
  2799. #endif /* CONFIG_NET_EGRESS */
  2800. static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
  2801. {
  2802. #ifdef CONFIG_XPS
  2803. struct xps_dev_maps *dev_maps;
  2804. struct xps_map *map;
  2805. int queue_index = -1;
  2806. rcu_read_lock();
  2807. dev_maps = rcu_dereference(dev->xps_maps);
  2808. if (dev_maps) {
  2809. unsigned int tci = skb->sender_cpu - 1;
  2810. if (dev->num_tc) {
  2811. tci *= dev->num_tc;
  2812. tci += netdev_get_prio_tc_map(dev, skb->priority);
  2813. }
  2814. map = rcu_dereference(dev_maps->cpu_map[tci]);
  2815. if (map) {
  2816. if (map->len == 1)
  2817. queue_index = map->queues[0];
  2818. else
  2819. queue_index = map->queues[reciprocal_scale(skb_get_hash(skb),
  2820. map->len)];
  2821. if (unlikely(queue_index >= dev->real_num_tx_queues))
  2822. queue_index = -1;
  2823. }
  2824. }
  2825. rcu_read_unlock();
  2826. return queue_index;
  2827. #else
  2828. return -1;
  2829. #endif
  2830. }
  2831. static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
  2832. {
  2833. struct sock *sk = skb->sk;
  2834. int queue_index = sk_tx_queue_get(sk);
  2835. if (queue_index < 0 || skb->ooo_okay ||
  2836. queue_index >= dev->real_num_tx_queues) {
  2837. int new_index = get_xps_queue(dev, skb);
  2838. if (new_index < 0)
  2839. new_index = skb_tx_hash(dev, skb);
  2840. if (queue_index != new_index && sk &&
  2841. sk_fullsock(sk) &&
  2842. rcu_access_pointer(sk->sk_dst_cache))
  2843. sk_tx_queue_set(sk, new_index);
  2844. queue_index = new_index;
  2845. }
  2846. return queue_index;
  2847. }
  2848. struct netdev_queue *netdev_pick_tx(struct net_device *dev,
  2849. struct sk_buff *skb,
  2850. void *accel_priv)
  2851. {
  2852. int queue_index = 0;
  2853. #ifdef CONFIG_XPS
  2854. u32 sender_cpu = skb->sender_cpu - 1;
  2855. if (sender_cpu >= (u32)NR_CPUS)
  2856. skb->sender_cpu = raw_smp_processor_id() + 1;
  2857. #endif
  2858. if (dev->real_num_tx_queues != 1) {
  2859. const struct net_device_ops *ops = dev->netdev_ops;
  2860. if (ops->ndo_select_queue)
  2861. queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
  2862. __netdev_pick_tx);
  2863. else
  2864. queue_index = __netdev_pick_tx(dev, skb);
  2865. if (!accel_priv)
  2866. queue_index = netdev_cap_txqueue(dev, queue_index);
  2867. }
  2868. skb_set_queue_mapping(skb, queue_index);
  2869. return netdev_get_tx_queue(dev, queue_index);
  2870. }
  2871. /**
  2872. * __dev_queue_xmit - transmit a buffer
  2873. * @skb: buffer to transmit
  2874. * @accel_priv: private data used for L2 forwarding offload
  2875. *
  2876. * Queue a buffer for transmission to a network device. The caller must
  2877. * have set the device and priority and built the buffer before calling
  2878. * this function. The function can be called from an interrupt.
  2879. *
  2880. * A negative errno code is returned on a failure. A success does not
  2881. * guarantee the frame will be transmitted as it may be dropped due
  2882. * to congestion or traffic shaping.
  2883. *
  2884. * -----------------------------------------------------------------------------------
  2885. * I notice this method can also return errors from the queue disciplines,
  2886. * including NET_XMIT_DROP, which is a positive value. So, errors can also
  2887. * be positive.
  2888. *
  2889. * Regardless of the return value, the skb is consumed, so it is currently
  2890. * difficult to retry a send to this method. (You can bump the ref count
  2891. * before sending to hold a reference for retry if you are careful.)
  2892. *
  2893. * When calling this method, interrupts MUST be enabled. This is because
  2894. * the BH enable code must have IRQs enabled so that it will not deadlock.
  2895. * --BLG
  2896. */
  2897. static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
  2898. {
  2899. struct net_device *dev = skb->dev;
  2900. struct netdev_queue *txq;
  2901. struct Qdisc *q;
  2902. int rc = -ENOMEM;
  2903. skb_reset_mac_header(skb);
  2904. if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
  2905. __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
  2906. /* Disable soft irqs for various locks below. Also
  2907. * stops preemption for RCU.
  2908. */
  2909. rcu_read_lock_bh();
  2910. skb_update_prio(skb);
  2911. qdisc_pkt_len_init(skb);
  2912. #ifdef CONFIG_NET_CLS_ACT
  2913. skb->tc_at_ingress = 0;
  2914. # ifdef CONFIG_NET_EGRESS
  2915. if (static_key_false(&egress_needed)) {
  2916. skb = sch_handle_egress(skb, &rc, dev);
  2917. if (!skb)
  2918. goto out;
  2919. }
  2920. # endif
  2921. #endif
  2922. /* If device/qdisc don't need skb->dst, release it right now while
  2923. * its hot in this cpu cache.
  2924. */
  2925. if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
  2926. skb_dst_drop(skb);
  2927. else
  2928. skb_dst_force(skb);
  2929. txq = netdev_pick_tx(dev, skb, accel_priv);
  2930. q = rcu_dereference_bh(txq->qdisc);
  2931. trace_net_dev_queue(skb);
  2932. if (q->enqueue) {
  2933. rc = __dev_xmit_skb(skb, q, dev, txq);
  2934. goto out;
  2935. }
  2936. /* The device has no queue. Common case for software devices:
  2937. * loopback, all the sorts of tunnels...
  2938. * Really, it is unlikely that netif_tx_lock protection is necessary
  2939. * here. (f.e. loopback and IP tunnels are clean ignoring statistics
  2940. * counters.)
  2941. * However, it is possible, that they rely on protection
  2942. * made by us here.
  2943. * Check this and shot the lock. It is not prone from deadlocks.
  2944. *Either shot noqueue qdisc, it is even simpler 8)
  2945. */
  2946. if (dev->flags & IFF_UP) {
  2947. int cpu = smp_processor_id(); /* ok because BHs are off */
  2948. if (txq->xmit_lock_owner != cpu) {
  2949. if (unlikely(__this_cpu_read(xmit_recursion) >
  2950. XMIT_RECURSION_LIMIT))
  2951. goto recursion_alert;
  2952. skb = validate_xmit_skb(skb, dev);
  2953. if (!skb)
  2954. goto out;
  2955. HARD_TX_LOCK(dev, txq, cpu);
  2956. if (!netif_xmit_stopped(txq)) {
  2957. __this_cpu_inc(xmit_recursion);
  2958. skb = dev_hard_start_xmit(skb, dev, txq, &rc);
  2959. __this_cpu_dec(xmit_recursion);
  2960. if (dev_xmit_complete(rc)) {
  2961. HARD_TX_UNLOCK(dev, txq);
  2962. goto out;
  2963. }
  2964. }
  2965. HARD_TX_UNLOCK(dev, txq);
  2966. net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
  2967. dev->name);
  2968. } else {
  2969. /* Recursion is detected! It is possible,
  2970. * unfortunately
  2971. */
  2972. recursion_alert:
  2973. net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
  2974. dev->name);
  2975. }
  2976. }
  2977. rc = -ENETDOWN;
  2978. rcu_read_unlock_bh();
  2979. atomic_long_inc(&dev->tx_dropped);
  2980. kfree_skb_list(skb);
  2981. return rc;
  2982. out:
  2983. rcu_read_unlock_bh();
  2984. return rc;
  2985. }
  2986. int dev_queue_xmit(struct sk_buff *skb)
  2987. {
  2988. return __dev_queue_xmit(skb, NULL);
  2989. }
  2990. EXPORT_SYMBOL(dev_queue_xmit);
  2991. int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
  2992. {
  2993. return __dev_queue_xmit(skb, accel_priv);
  2994. }
  2995. EXPORT_SYMBOL(dev_queue_xmit_accel);
  2996. /*************************************************************************
  2997. * Receiver routines
  2998. *************************************************************************/
  2999. int netdev_max_backlog __read_mostly = 1000;
  3000. EXPORT_SYMBOL(netdev_max_backlog);
  3001. int netdev_tstamp_prequeue __read_mostly = 1;
  3002. int netdev_budget __read_mostly = 300;
  3003. unsigned int __read_mostly netdev_budget_usecs = 2000;
  3004. int weight_p __read_mostly = 64; /* old backlog weight */
  3005. int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */
  3006. int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */
  3007. int dev_rx_weight __read_mostly = 64;
  3008. int dev_tx_weight __read_mostly = 64;
  3009. /* Called with irq disabled */
  3010. static inline void ____napi_schedule(struct softnet_data *sd,
  3011. struct napi_struct *napi)
  3012. {
  3013. list_add_tail(&napi->poll_list, &sd->poll_list);
  3014. __raise_softirq_irqoff(NET_RX_SOFTIRQ);
  3015. }
  3016. #ifdef CONFIG_RPS
  3017. /* One global table that all flow-based protocols share. */
  3018. struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
  3019. EXPORT_SYMBOL(rps_sock_flow_table);
  3020. u32 rps_cpu_mask __read_mostly;
  3021. EXPORT_SYMBOL(rps_cpu_mask);
  3022. struct static_key rps_needed __read_mostly;
  3023. EXPORT_SYMBOL(rps_needed);
  3024. struct static_key rfs_needed __read_mostly;
  3025. EXPORT_SYMBOL(rfs_needed);
  3026. static struct rps_dev_flow *
  3027. set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
  3028. struct rps_dev_flow *rflow, u16 next_cpu)
  3029. {
  3030. if (next_cpu < nr_cpu_ids) {
  3031. #ifdef CONFIG_RFS_ACCEL
  3032. struct netdev_rx_queue *rxqueue;
  3033. struct rps_dev_flow_table *flow_table;
  3034. struct rps_dev_flow *old_rflow;
  3035. u32 flow_id;
  3036. u16 rxq_index;
  3037. int rc;
  3038. /* Should we steer this flow to a different hardware queue? */
  3039. if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
  3040. !(dev->features & NETIF_F_NTUPLE))
  3041. goto out;
  3042. rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
  3043. if (rxq_index == skb_get_rx_queue(skb))
  3044. goto out;
  3045. rxqueue = dev->_rx + rxq_index;
  3046. flow_table = rcu_dereference(rxqueue->rps_flow_table);
  3047. if (!flow_table)
  3048. goto out;
  3049. flow_id = skb_get_hash(skb) & flow_table->mask;
  3050. rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
  3051. rxq_index, flow_id);
  3052. if (rc < 0)
  3053. goto out;
  3054. old_rflow = rflow;
  3055. rflow = &flow_table->flows[flow_id];
  3056. rflow->filter = rc;
  3057. if (old_rflow->filter == rflow->filter)
  3058. old_rflow->filter = RPS_NO_FILTER;
  3059. out:
  3060. #endif
  3061. rflow->last_qtail =
  3062. per_cpu(softnet_data, next_cpu).input_queue_head;
  3063. }
  3064. rflow->cpu = next_cpu;
  3065. return rflow;
  3066. }
  3067. /*
  3068. * get_rps_cpu is called from netif_receive_skb and returns the target
  3069. * CPU from the RPS map of the receiving queue for a given skb.
  3070. * rcu_read_lock must be held on entry.
  3071. */
  3072. static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
  3073. struct rps_dev_flow **rflowp)
  3074. {
  3075. const struct rps_sock_flow_table *sock_flow_table;
  3076. struct netdev_rx_queue *rxqueue = dev->_rx;
  3077. struct rps_dev_flow_table *flow_table;
  3078. struct rps_map *map;
  3079. int cpu = -1;
  3080. u32 tcpu;
  3081. u32 hash;
  3082. if (skb_rx_queue_recorded(skb)) {
  3083. u16 index = skb_get_rx_queue(skb);
  3084. if (unlikely(index >= dev->real_num_rx_queues)) {
  3085. WARN_ONCE(dev->real_num_rx_queues > 1,
  3086. "%s received packet on queue %u, but number "
  3087. "of RX queues is %u\n",
  3088. dev->name, index, dev->real_num_rx_queues);
  3089. goto done;
  3090. }
  3091. rxqueue += index;
  3092. }
  3093. /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
  3094. flow_table = rcu_dereference(rxqueue->rps_flow_table);
  3095. map = rcu_dereference(rxqueue->rps_map);
  3096. if (!flow_table && !map)
  3097. goto done;
  3098. skb_reset_network_header(skb);
  3099. hash = skb_get_hash(skb);
  3100. if (!hash)
  3101. goto done;
  3102. sock_flow_table = rcu_dereference(rps_sock_flow_table);
  3103. if (flow_table && sock_flow_table) {
  3104. struct rps_dev_flow *rflow;
  3105. u32 next_cpu;
  3106. u32 ident;
  3107. /* First check into global flow table if there is a match */
  3108. ident = sock_flow_table->ents[hash & sock_flow_table->mask];
  3109. if ((ident ^ hash) & ~rps_cpu_mask)
  3110. goto try_rps;
  3111. next_cpu = ident & rps_cpu_mask;
  3112. /* OK, now we know there is a match,
  3113. * we can look at the local (per receive queue) flow table
  3114. */
  3115. rflow = &flow_table->flows[hash & flow_table->mask];
  3116. tcpu = rflow->cpu;
  3117. /*
  3118. * If the desired CPU (where last recvmsg was done) is
  3119. * different from current CPU (one in the rx-queue flow
  3120. * table entry), switch if one of the following holds:
  3121. * - Current CPU is unset (>= nr_cpu_ids).
  3122. * - Current CPU is offline.
  3123. * - The current CPU's queue tail has advanced beyond the
  3124. * last packet that was enqueued using this table entry.
  3125. * This guarantees that all previous packets for the flow
  3126. * have been dequeued, thus preserving in order delivery.
  3127. */
  3128. if (unlikely(tcpu != next_cpu) &&
  3129. (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
  3130. ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
  3131. rflow->last_qtail)) >= 0)) {
  3132. tcpu = next_cpu;
  3133. rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
  3134. }
  3135. if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
  3136. *rflowp = rflow;
  3137. cpu = tcpu;
  3138. goto done;
  3139. }
  3140. }
  3141. try_rps:
  3142. if (map) {
  3143. tcpu = map->cpus[reciprocal_scale(hash, map->len)];
  3144. if (cpu_online(tcpu)) {
  3145. cpu = tcpu;
  3146. goto done;
  3147. }
  3148. }
  3149. done:
  3150. return cpu;
  3151. }
  3152. #ifdef CONFIG_RFS_ACCEL
  3153. /**
  3154. * rps_may_expire_flow - check whether an RFS hardware filter may be removed
  3155. * @dev: Device on which the filter was set
  3156. * @rxq_index: RX queue index
  3157. * @flow_id: Flow ID passed to ndo_rx_flow_steer()
  3158. * @filter_id: Filter ID returned by ndo_rx_flow_steer()
  3159. *
  3160. * Drivers that implement ndo_rx_flow_steer() should periodically call
  3161. * this function for each installed filter and remove the filters for
  3162. * which it returns %true.
  3163. */
  3164. bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
  3165. u32 flow_id, u16 filter_id)
  3166. {
  3167. struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
  3168. struct rps_dev_flow_table *flow_table;
  3169. struct rps_dev_flow *rflow;
  3170. bool expire = true;
  3171. unsigned int cpu;
  3172. rcu_read_lock();
  3173. flow_table = rcu_dereference(rxqueue->rps_flow_table);
  3174. if (flow_table && flow_id <= flow_table->mask) {
  3175. rflow = &flow_table->flows[flow_id];
  3176. cpu = READ_ONCE(rflow->cpu);
  3177. if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
  3178. ((int)(per_cpu(softnet_data, cpu).input_queue_head -
  3179. rflow->last_qtail) <
  3180. (int)(10 * flow_table->mask)))
  3181. expire = false;
  3182. }
  3183. rcu_read_unlock();
  3184. return expire;
  3185. }
  3186. EXPORT_SYMBOL(rps_may_expire_flow);
  3187. #endif /* CONFIG_RFS_ACCEL */
  3188. /* Called from hardirq (IPI) context */
  3189. static void rps_trigger_softirq(void *data)
  3190. {
  3191. struct softnet_data *sd = data;
  3192. ____napi_schedule(sd, &sd->backlog);
  3193. sd->received_rps++;
  3194. }
  3195. #endif /* CONFIG_RPS */
  3196. /*
  3197. * Check if this softnet_data structure is another cpu one
  3198. * If yes, queue it to our IPI list and return 1
  3199. * If no, return 0
  3200. */
  3201. static int rps_ipi_queued(struct softnet_data *sd)
  3202. {
  3203. #ifdef CONFIG_RPS
  3204. struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
  3205. if (sd != mysd) {
  3206. sd->rps_ipi_next = mysd->rps_ipi_list;
  3207. mysd->rps_ipi_list = sd;
  3208. __raise_softirq_irqoff(NET_RX_SOFTIRQ);
  3209. return 1;
  3210. }
  3211. #endif /* CONFIG_RPS */
  3212. return 0;
  3213. }
  3214. #ifdef CONFIG_NET_FLOW_LIMIT
  3215. int netdev_flow_limit_table_len __read_mostly = (1 << 12);
  3216. #endif
  3217. static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
  3218. {
  3219. #ifdef CONFIG_NET_FLOW_LIMIT
  3220. struct sd_flow_limit *fl;
  3221. struct softnet_data *sd;
  3222. unsigned int old_flow, new_flow;
  3223. if (qlen < (netdev_max_backlog >> 1))
  3224. return false;
  3225. sd = this_cpu_ptr(&softnet_data);
  3226. rcu_read_lock();
  3227. fl = rcu_dereference(sd->flow_limit);
  3228. if (fl) {
  3229. new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
  3230. old_flow = fl->history[fl->history_head];
  3231. fl->history[fl->history_head] = new_flow;
  3232. fl->history_head++;
  3233. fl->history_head &= FLOW_LIMIT_HISTORY - 1;
  3234. if (likely(fl->buckets[old_flow]))
  3235. fl->buckets[old_flow]--;
  3236. if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
  3237. fl->count++;
  3238. rcu_read_unlock();
  3239. return true;
  3240. }
  3241. }
  3242. rcu_read_unlock();
  3243. #endif
  3244. return false;
  3245. }
  3246. /*
  3247. * enqueue_to_backlog is called to queue an skb to a per CPU backlog
  3248. * queue (may be a remote CPU queue).
  3249. */
  3250. static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
  3251. unsigned int *qtail)
  3252. {
  3253. struct softnet_data *sd;
  3254. unsigned long flags;
  3255. unsigned int qlen;
  3256. sd = &per_cpu(softnet_data, cpu);
  3257. local_irq_save(flags);
  3258. rps_lock(sd);
  3259. if (!netif_running(skb->dev))
  3260. goto drop;
  3261. qlen = skb_queue_len(&sd->input_pkt_queue);
  3262. if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
  3263. if (qlen) {
  3264. enqueue:
  3265. __skb_queue_tail(&sd->input_pkt_queue, skb);
  3266. input_queue_tail_incr_save(sd, qtail);
  3267. rps_unlock(sd);
  3268. local_irq_restore(flags);
  3269. return NET_RX_SUCCESS;
  3270. }
  3271. /* Schedule NAPI for backlog device
  3272. * We can use non atomic operation since we own the queue lock
  3273. */
  3274. if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
  3275. if (!rps_ipi_queued(sd))
  3276. ____napi_schedule(sd, &sd->backlog);
  3277. }
  3278. goto enqueue;
  3279. }
  3280. drop:
  3281. sd->dropped++;
  3282. rps_unlock(sd);
  3283. local_irq_restore(flags);
  3284. atomic_long_inc(&skb->dev->rx_dropped);
  3285. kfree_skb(skb);
  3286. return NET_RX_DROP;
  3287. }
  3288. static u32 netif_receive_generic_xdp(struct sk_buff *skb,
  3289. struct bpf_prog *xdp_prog)
  3290. {
  3291. u32 metalen, act = XDP_DROP;
  3292. struct xdp_buff xdp;
  3293. void *orig_data;
  3294. int hlen, off;
  3295. u32 mac_len;
  3296. /* Reinjected packets coming from act_mirred or similar should
  3297. * not get XDP generic processing.
  3298. */
  3299. if (skb_cloned(skb))
  3300. return XDP_PASS;
  3301. /* XDP packets must be linear and must have sufficient headroom
  3302. * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
  3303. * native XDP provides, thus we need to do it here as well.
  3304. */
  3305. if (skb_is_nonlinear(skb) ||
  3306. skb_headroom(skb) < XDP_PACKET_HEADROOM) {
  3307. int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
  3308. int troom = skb->tail + skb->data_len - skb->end;
  3309. /* In case we have to go down the path and also linearize,
  3310. * then lets do the pskb_expand_head() work just once here.
  3311. */
  3312. if (pskb_expand_head(skb,
  3313. hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
  3314. troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
  3315. goto do_drop;
  3316. if (troom > 0 && __skb_linearize(skb))
  3317. goto do_drop;
  3318. }
  3319. /* The XDP program wants to see the packet starting at the MAC
  3320. * header.
  3321. */
  3322. mac_len = skb->data - skb_mac_header(skb);
  3323. hlen = skb_headlen(skb) + mac_len;
  3324. xdp.data = skb->data - mac_len;
  3325. xdp.data_meta = xdp.data;
  3326. xdp.data_end = xdp.data + hlen;
  3327. xdp.data_hard_start = skb->data - skb_headroom(skb);
  3328. orig_data = xdp.data;
  3329. act = bpf_prog_run_xdp(xdp_prog, &xdp);
  3330. off = xdp.data - orig_data;
  3331. if (off > 0)
  3332. __skb_pull(skb, off);
  3333. else if (off < 0)
  3334. __skb_push(skb, -off);
  3335. skb->mac_header += off;
  3336. switch (act) {
  3337. case XDP_REDIRECT:
  3338. case XDP_TX:
  3339. __skb_push(skb, mac_len);
  3340. break;
  3341. case XDP_PASS:
  3342. metalen = xdp.data - xdp.data_meta;
  3343. if (metalen)
  3344. skb_metadata_set(skb, metalen);
  3345. break;
  3346. default:
  3347. bpf_warn_invalid_xdp_action(act);
  3348. /* fall through */
  3349. case XDP_ABORTED:
  3350. trace_xdp_exception(skb->dev, xdp_prog, act);
  3351. /* fall through */
  3352. case XDP_DROP:
  3353. do_drop:
  3354. kfree_skb(skb);
  3355. break;
  3356. }
  3357. return act;
  3358. }
  3359. /* When doing generic XDP we have to bypass the qdisc layer and the
  3360. * network taps in order to match in-driver-XDP behavior.
  3361. */
  3362. void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
  3363. {
  3364. struct net_device *dev = skb->dev;
  3365. struct netdev_queue *txq;
  3366. bool free_skb = true;
  3367. int cpu, rc;
  3368. txq = netdev_pick_tx(dev, skb, NULL);
  3369. cpu = smp_processor_id();
  3370. HARD_TX_LOCK(dev, txq, cpu);
  3371. if (!netif_xmit_stopped(txq)) {
  3372. rc = netdev_start_xmit(skb, dev, txq, 0);
  3373. if (dev_xmit_complete(rc))
  3374. free_skb = false;
  3375. }
  3376. HARD_TX_UNLOCK(dev, txq);
  3377. if (free_skb) {
  3378. trace_xdp_exception(dev, xdp_prog, XDP_TX);
  3379. kfree_skb(skb);
  3380. }
  3381. }
  3382. EXPORT_SYMBOL_GPL(generic_xdp_tx);
  3383. static struct static_key generic_xdp_needed __read_mostly;
  3384. int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
  3385. {
  3386. if (xdp_prog) {
  3387. u32 act = netif_receive_generic_xdp(skb, xdp_prog);
  3388. int err;
  3389. if (act != XDP_PASS) {
  3390. switch (act) {
  3391. case XDP_REDIRECT:
  3392. err = xdp_do_generic_redirect(skb->dev, skb,
  3393. xdp_prog);
  3394. if (err)
  3395. goto out_redir;
  3396. /* fallthru to submit skb */
  3397. case XDP_TX:
  3398. generic_xdp_tx(skb, xdp_prog);
  3399. break;
  3400. }
  3401. return XDP_DROP;
  3402. }
  3403. }
  3404. return XDP_PASS;
  3405. out_redir:
  3406. kfree_skb(skb);
  3407. return XDP_DROP;
  3408. }
  3409. EXPORT_SYMBOL_GPL(do_xdp_generic);
  3410. static int netif_rx_internal(struct sk_buff *skb)
  3411. {
  3412. int ret;
  3413. net_timestamp_check(netdev_tstamp_prequeue, skb);
  3414. trace_netif_rx(skb);
  3415. if (static_key_false(&generic_xdp_needed)) {
  3416. int ret;
  3417. preempt_disable();
  3418. rcu_read_lock();
  3419. ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
  3420. rcu_read_unlock();
  3421. preempt_enable();
  3422. /* Consider XDP consuming the packet a success from
  3423. * the netdev point of view we do not want to count
  3424. * this as an error.
  3425. */
  3426. if (ret != XDP_PASS)
  3427. return NET_RX_SUCCESS;
  3428. }
  3429. #ifdef CONFIG_RPS
  3430. if (static_key_false(&rps_needed)) {
  3431. struct rps_dev_flow voidflow, *rflow = &voidflow;
  3432. int cpu;
  3433. preempt_disable();
  3434. rcu_read_lock();
  3435. cpu = get_rps_cpu(skb->dev, skb, &rflow);
  3436. if (cpu < 0)
  3437. cpu = smp_processor_id();
  3438. ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
  3439. rcu_read_unlock();
  3440. preempt_enable();
  3441. } else
  3442. #endif
  3443. {
  3444. unsigned int qtail;
  3445. ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
  3446. put_cpu();
  3447. }
  3448. return ret;
  3449. }
  3450. /**
  3451. * netif_rx - post buffer to the network code
  3452. * @skb: buffer to post
  3453. *
  3454. * This function receives a packet from a device driver and queues it for
  3455. * the upper (protocol) levels to process. It always succeeds. The buffer
  3456. * may be dropped during processing for congestion control or by the
  3457. * protocol layers.
  3458. *
  3459. * return values:
  3460. * NET_RX_SUCCESS (no congestion)
  3461. * NET_RX_DROP (packet was dropped)
  3462. *
  3463. */
  3464. int netif_rx(struct sk_buff *skb)
  3465. {
  3466. trace_netif_rx_entry(skb);
  3467. return netif_rx_internal(skb);
  3468. }
  3469. EXPORT_SYMBOL(netif_rx);
  3470. int netif_rx_ni(struct sk_buff *skb)
  3471. {
  3472. int err;
  3473. trace_netif_rx_ni_entry(skb);
  3474. preempt_disable();
  3475. err = netif_rx_internal(skb);
  3476. if (local_softirq_pending())
  3477. do_softirq();
  3478. preempt_enable();
  3479. return err;
  3480. }
  3481. EXPORT_SYMBOL(netif_rx_ni);
  3482. static __latent_entropy void net_tx_action(struct softirq_action *h)
  3483. {
  3484. struct softnet_data *sd = this_cpu_ptr(&softnet_data);
  3485. if (sd->completion_queue) {
  3486. struct sk_buff *clist;
  3487. local_irq_disable();
  3488. clist = sd->completion_queue;
  3489. sd->completion_queue = NULL;
  3490. local_irq_enable();
  3491. while (clist) {
  3492. struct sk_buff *skb = clist;
  3493. clist = clist->next;
  3494. WARN_ON(refcount_read(&skb->users));
  3495. if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
  3496. trace_consume_skb(skb);
  3497. else
  3498. trace_kfree_skb(skb, net_tx_action);
  3499. if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
  3500. __kfree_skb(skb);
  3501. else
  3502. __kfree_skb_defer(skb);
  3503. }
  3504. __kfree_skb_flush();
  3505. }
  3506. if (sd->output_queue) {
  3507. struct Qdisc *head;
  3508. local_irq_disable();
  3509. head = sd->output_queue;
  3510. sd->output_queue = NULL;
  3511. sd->output_queue_tailp = &sd->output_queue;
  3512. local_irq_enable();
  3513. while (head) {
  3514. struct Qdisc *q = head;
  3515. spinlock_t *root_lock;
  3516. head = head->next_sched;
  3517. root_lock = qdisc_lock(q);
  3518. spin_lock(root_lock);
  3519. /* We need to make sure head->next_sched is read
  3520. * before clearing __QDISC_STATE_SCHED
  3521. */
  3522. smp_mb__before_atomic();
  3523. clear_bit(__QDISC_STATE_SCHED, &q->state);
  3524. qdisc_run(q);
  3525. spin_unlock(root_lock);
  3526. }
  3527. }
  3528. }
  3529. #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
  3530. /* This hook is defined here for ATM LANE */
  3531. int (*br_fdb_test_addr_hook)(struct net_device *dev,
  3532. unsigned char *addr) __read_mostly;
  3533. EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
  3534. #endif
  3535. static inline struct sk_buff *
  3536. sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
  3537. struct net_device *orig_dev)
  3538. {
  3539. #ifdef CONFIG_NET_CLS_ACT
  3540. struct mini_Qdisc *miniq = rcu_dereference_bh(skb->dev->miniq_ingress);
  3541. struct tcf_result cl_res;
  3542. /* If there's at least one ingress present somewhere (so
  3543. * we get here via enabled static key), remaining devices
  3544. * that are not configured with an ingress qdisc will bail
  3545. * out here.
  3546. */
  3547. if (!miniq)
  3548. return skb;
  3549. if (*pt_prev) {
  3550. *ret = deliver_skb(skb, *pt_prev, orig_dev);
  3551. *pt_prev = NULL;
  3552. }
  3553. qdisc_skb_cb(skb)->pkt_len = skb->len;
  3554. skb->tc_at_ingress = 1;
  3555. mini_qdisc_bstats_cpu_update(miniq, skb);
  3556. switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) {
  3557. case TC_ACT_OK:
  3558. case TC_ACT_RECLASSIFY:
  3559. skb->tc_index = TC_H_MIN(cl_res.classid);
  3560. break;
  3561. case TC_ACT_SHOT:
  3562. mini_qdisc_qstats_cpu_drop(miniq);
  3563. kfree_skb(skb);
  3564. return NULL;
  3565. case TC_ACT_STOLEN:
  3566. case TC_ACT_QUEUED:
  3567. case TC_ACT_TRAP:
  3568. consume_skb(skb);
  3569. return NULL;
  3570. case TC_ACT_REDIRECT:
  3571. /* skb_mac_header check was done by cls/act_bpf, so
  3572. * we can safely push the L2 header back before
  3573. * redirecting to another netdev
  3574. */
  3575. __skb_push(skb, skb->mac_len);
  3576. skb_do_redirect(skb);
  3577. return NULL;
  3578. default:
  3579. break;
  3580. }
  3581. #endif /* CONFIG_NET_CLS_ACT */
  3582. return skb;
  3583. }
  3584. /**
  3585. * netdev_is_rx_handler_busy - check if receive handler is registered
  3586. * @dev: device to check
  3587. *
  3588. * Check if a receive handler is already registered for a given device.
  3589. * Return true if there one.
  3590. *
  3591. * The caller must hold the rtnl_mutex.
  3592. */
  3593. bool netdev_is_rx_handler_busy(struct net_device *dev)
  3594. {
  3595. ASSERT_RTNL();
  3596. return dev && rtnl_dereference(dev->rx_handler);
  3597. }
  3598. EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
  3599. /**
  3600. * netdev_rx_handler_register - register receive handler
  3601. * @dev: device to register a handler for
  3602. * @rx_handler: receive handler to register
  3603. * @rx_handler_data: data pointer that is used by rx handler
  3604. *
  3605. * Register a receive handler for a device. This handler will then be
  3606. * called from __netif_receive_skb. A negative errno code is returned
  3607. * on a failure.
  3608. *
  3609. * The caller must hold the rtnl_mutex.
  3610. *
  3611. * For a general description of rx_handler, see enum rx_handler_result.
  3612. */
  3613. int netdev_rx_handler_register(struct net_device *dev,
  3614. rx_handler_func_t *rx_handler,
  3615. void *rx_handler_data)
  3616. {
  3617. if (netdev_is_rx_handler_busy(dev))
  3618. return -EBUSY;
  3619. /* Note: rx_handler_data must be set before rx_handler */
  3620. rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
  3621. rcu_assign_pointer(dev->rx_handler, rx_handler);
  3622. return 0;
  3623. }
  3624. EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
  3625. /**
  3626. * netdev_rx_handler_unregister - unregister receive handler
  3627. * @dev: device to unregister a handler from
  3628. *
  3629. * Unregister a receive handler from a device.
  3630. *
  3631. * The caller must hold the rtnl_mutex.
  3632. */
  3633. void netdev_rx_handler_unregister(struct net_device *dev)
  3634. {
  3635. ASSERT_RTNL();
  3636. RCU_INIT_POINTER(dev->rx_handler, NULL);
  3637. /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
  3638. * section has a guarantee to see a non NULL rx_handler_data
  3639. * as well.
  3640. */
  3641. synchronize_net();
  3642. RCU_INIT_POINTER(dev->rx_handler_data, NULL);
  3643. }
  3644. EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
  3645. /*
  3646. * Limit the use of PFMEMALLOC reserves to those protocols that implement
  3647. * the special handling of PFMEMALLOC skbs.
  3648. */
  3649. static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
  3650. {
  3651. switch (skb->protocol) {
  3652. case htons(ETH_P_ARP):
  3653. case htons(ETH_P_IP):
  3654. case htons(ETH_P_IPV6):
  3655. case htons(ETH_P_8021Q):
  3656. case htons(ETH_P_8021AD):
  3657. return true;
  3658. default:
  3659. return false;
  3660. }
  3661. }
  3662. static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
  3663. int *ret, struct net_device *orig_dev)
  3664. {
  3665. #ifdef CONFIG_NETFILTER_INGRESS
  3666. if (nf_hook_ingress_active(skb)) {
  3667. int ingress_retval;
  3668. if (*pt_prev) {
  3669. *ret = deliver_skb(skb, *pt_prev, orig_dev);
  3670. *pt_prev = NULL;
  3671. }
  3672. rcu_read_lock();
  3673. ingress_retval = nf_hook_ingress(skb);
  3674. rcu_read_unlock();
  3675. return ingress_retval;
  3676. }
  3677. #endif /* CONFIG_NETFILTER_INGRESS */
  3678. return 0;
  3679. }
  3680. static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
  3681. {
  3682. struct packet_type *ptype, *pt_prev;
  3683. rx_handler_func_t *rx_handler;
  3684. struct net_device *orig_dev;
  3685. bool deliver_exact = false;
  3686. int ret = NET_RX_DROP;
  3687. __be16 type;
  3688. net_timestamp_check(!netdev_tstamp_prequeue, skb);
  3689. trace_netif_receive_skb(skb);
  3690. orig_dev = skb->dev;
  3691. skb_reset_network_header(skb);
  3692. if (!skb_transport_header_was_set(skb))
  3693. skb_reset_transport_header(skb);
  3694. skb_reset_mac_len(skb);
  3695. pt_prev = NULL;
  3696. another_round:
  3697. skb->skb_iif = skb->dev->ifindex;
  3698. __this_cpu_inc(softnet_data.processed);
  3699. if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
  3700. skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
  3701. skb = skb_vlan_untag(skb);
  3702. if (unlikely(!skb))
  3703. goto out;
  3704. }
  3705. if (skb_skip_tc_classify(skb))
  3706. goto skip_classify;
  3707. if (pfmemalloc)
  3708. goto skip_taps;
  3709. list_for_each_entry_rcu(ptype, &ptype_all, list) {
  3710. if (pt_prev)
  3711. ret = deliver_skb(skb, pt_prev, orig_dev);
  3712. pt_prev = ptype;
  3713. }
  3714. list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
  3715. if (pt_prev)
  3716. ret = deliver_skb(skb, pt_prev, orig_dev);
  3717. pt_prev = ptype;
  3718. }
  3719. skip_taps:
  3720. #ifdef CONFIG_NET_INGRESS
  3721. if (static_key_false(&ingress_needed)) {
  3722. skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev);
  3723. if (!skb)
  3724. goto out;
  3725. if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
  3726. goto out;
  3727. }
  3728. #endif
  3729. skb_reset_tc(skb);
  3730. skip_classify:
  3731. if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
  3732. goto drop;
  3733. if (skb_vlan_tag_present(skb)) {
  3734. if (pt_prev) {
  3735. ret = deliver_skb(skb, pt_prev, orig_dev);
  3736. pt_prev = NULL;
  3737. }
  3738. if (vlan_do_receive(&skb))
  3739. goto another_round;
  3740. else if (unlikely(!skb))
  3741. goto out;
  3742. }
  3743. rx_handler = rcu_dereference(skb->dev->rx_handler);
  3744. if (rx_handler) {
  3745. if (pt_prev) {
  3746. ret = deliver_skb(skb, pt_prev, orig_dev);
  3747. pt_prev = NULL;
  3748. }
  3749. switch (rx_handler(&skb)) {
  3750. case RX_HANDLER_CONSUMED:
  3751. ret = NET_RX_SUCCESS;
  3752. goto out;
  3753. case RX_HANDLER_ANOTHER:
  3754. goto another_round;
  3755. case RX_HANDLER_EXACT:
  3756. deliver_exact = true;
  3757. case RX_HANDLER_PASS:
  3758. break;
  3759. default:
  3760. BUG();
  3761. }
  3762. }
  3763. if (unlikely(skb_vlan_tag_present(skb))) {
  3764. if (skb_vlan_tag_get_id(skb))
  3765. skb->pkt_type = PACKET_OTHERHOST;
  3766. /* Note: we might in the future use prio bits
  3767. * and set skb->priority like in vlan_do_receive()
  3768. * For the time being, just ignore Priority Code Point
  3769. */
  3770. skb->vlan_tci = 0;
  3771. }
  3772. type = skb->protocol;
  3773. /* deliver only exact match when indicated */
  3774. if (likely(!deliver_exact)) {
  3775. deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
  3776. &ptype_base[ntohs(type) &
  3777. PTYPE_HASH_MASK]);
  3778. }
  3779. deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
  3780. &orig_dev->ptype_specific);
  3781. if (unlikely(skb->dev != orig_dev)) {
  3782. deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
  3783. &skb->dev->ptype_specific);
  3784. }
  3785. if (pt_prev) {
  3786. if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
  3787. goto drop;
  3788. else
  3789. ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
  3790. } else {
  3791. drop:
  3792. if (!deliver_exact)
  3793. atomic_long_inc(&skb->dev->rx_dropped);
  3794. else
  3795. atomic_long_inc(&skb->dev->rx_nohandler);
  3796. kfree_skb(skb);
  3797. /* Jamal, now you will not able to escape explaining
  3798. * me how you were going to use this. :-)
  3799. */
  3800. ret = NET_RX_DROP;
  3801. }
  3802. out:
  3803. return ret;
  3804. }
  3805. /**
  3806. * netif_receive_skb_core - special purpose version of netif_receive_skb
  3807. * @skb: buffer to process
  3808. *
  3809. * More direct receive version of netif_receive_skb(). It should
  3810. * only be used by callers that have a need to skip RPS and Generic XDP.
  3811. * Caller must also take care of handling if (page_is_)pfmemalloc.
  3812. *
  3813. * This function may only be called from softirq context and interrupts
  3814. * should be enabled.
  3815. *
  3816. * Return values (usually ignored):
  3817. * NET_RX_SUCCESS: no congestion
  3818. * NET_RX_DROP: packet was dropped
  3819. */
  3820. int netif_receive_skb_core(struct sk_buff *skb)
  3821. {
  3822. int ret;
  3823. rcu_read_lock();
  3824. ret = __netif_receive_skb_core(skb, false);
  3825. rcu_read_unlock();
  3826. return ret;
  3827. }
  3828. EXPORT_SYMBOL(netif_receive_skb_core);
  3829. static int __netif_receive_skb(struct sk_buff *skb)
  3830. {
  3831. int ret;
  3832. if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
  3833. unsigned int noreclaim_flag;
  3834. /*
  3835. * PFMEMALLOC skbs are special, they should
  3836. * - be delivered to SOCK_MEMALLOC sockets only
  3837. * - stay away from userspace
  3838. * - have bounded memory usage
  3839. *
  3840. * Use PF_MEMALLOC as this saves us from propagating the allocation
  3841. * context down to all allocation sites.
  3842. */
  3843. noreclaim_flag = memalloc_noreclaim_save();
  3844. ret = __netif_receive_skb_core(skb, true);
  3845. memalloc_noreclaim_restore(noreclaim_flag);
  3846. } else
  3847. ret = __netif_receive_skb_core(skb, false);
  3848. return ret;
  3849. }
  3850. static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
  3851. {
  3852. struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
  3853. struct bpf_prog *new = xdp->prog;
  3854. int ret = 0;
  3855. switch (xdp->command) {
  3856. case XDP_SETUP_PROG:
  3857. rcu_assign_pointer(dev->xdp_prog, new);
  3858. if (old)
  3859. bpf_prog_put(old);
  3860. if (old && !new) {
  3861. static_key_slow_dec(&generic_xdp_needed);
  3862. } else if (new && !old) {
  3863. static_key_slow_inc(&generic_xdp_needed);
  3864. dev_disable_lro(dev);
  3865. }
  3866. break;
  3867. case XDP_QUERY_PROG:
  3868. xdp->prog_attached = !!old;
  3869. xdp->prog_id = old ? old->aux->id : 0;
  3870. break;
  3871. default:
  3872. ret = -EINVAL;
  3873. break;
  3874. }
  3875. return ret;
  3876. }
  3877. static int netif_receive_skb_internal(struct sk_buff *skb)
  3878. {
  3879. int ret;
  3880. net_timestamp_check(netdev_tstamp_prequeue, skb);
  3881. if (skb_defer_rx_timestamp(skb))
  3882. return NET_RX_SUCCESS;
  3883. if (static_key_false(&generic_xdp_needed)) {
  3884. int ret;
  3885. preempt_disable();
  3886. rcu_read_lock();
  3887. ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
  3888. rcu_read_unlock();
  3889. preempt_enable();
  3890. if (ret != XDP_PASS)
  3891. return NET_RX_DROP;
  3892. }
  3893. rcu_read_lock();
  3894. #ifdef CONFIG_RPS
  3895. if (static_key_false(&rps_needed)) {
  3896. struct rps_dev_flow voidflow, *rflow = &voidflow;
  3897. int cpu = get_rps_cpu(skb->dev, skb, &rflow);
  3898. if (cpu >= 0) {
  3899. ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
  3900. rcu_read_unlock();
  3901. return ret;
  3902. }
  3903. }
  3904. #endif
  3905. ret = __netif_receive_skb(skb);
  3906. rcu_read_unlock();
  3907. return ret;
  3908. }
  3909. /**
  3910. * netif_receive_skb - process receive buffer from network
  3911. * @skb: buffer to process
  3912. *
  3913. * netif_receive_skb() is the main receive data processing function.
  3914. * It always succeeds. The buffer may be dropped during processing
  3915. * for congestion control or by the protocol layers.
  3916. *
  3917. * This function may only be called from softirq context and interrupts
  3918. * should be enabled.
  3919. *
  3920. * Return values (usually ignored):
  3921. * NET_RX_SUCCESS: no congestion
  3922. * NET_RX_DROP: packet was dropped
  3923. */
  3924. int netif_receive_skb(struct sk_buff *skb)
  3925. {
  3926. trace_netif_receive_skb_entry(skb);
  3927. return netif_receive_skb_internal(skb);
  3928. }
  3929. EXPORT_SYMBOL(netif_receive_skb);
  3930. DEFINE_PER_CPU(struct work_struct, flush_works);
  3931. /* Network device is going away, flush any packets still pending */
  3932. static void flush_backlog(struct work_struct *work)
  3933. {
  3934. struct sk_buff *skb, *tmp;
  3935. struct softnet_data *sd;
  3936. local_bh_disable();
  3937. sd = this_cpu_ptr(&softnet_data);
  3938. local_irq_disable();
  3939. rps_lock(sd);
  3940. skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
  3941. if (skb->dev->reg_state == NETREG_UNREGISTERING) {
  3942. __skb_unlink(skb, &sd->input_pkt_queue);
  3943. kfree_skb(skb);
  3944. input_queue_head_incr(sd);
  3945. }
  3946. }
  3947. rps_unlock(sd);
  3948. local_irq_enable();
  3949. skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
  3950. if (skb->dev->reg_state == NETREG_UNREGISTERING) {
  3951. __skb_unlink(skb, &sd->process_queue);
  3952. kfree_skb(skb);
  3953. input_queue_head_incr(sd);
  3954. }
  3955. }
  3956. local_bh_enable();
  3957. }
  3958. static void flush_all_backlogs(void)
  3959. {
  3960. unsigned int cpu;
  3961. get_online_cpus();
  3962. for_each_online_cpu(cpu)
  3963. queue_work_on(cpu, system_highpri_wq,
  3964. per_cpu_ptr(&flush_works, cpu));
  3965. for_each_online_cpu(cpu)
  3966. flush_work(per_cpu_ptr(&flush_works, cpu));
  3967. put_online_cpus();
  3968. }
  3969. static int napi_gro_complete(struct sk_buff *skb)
  3970. {
  3971. struct packet_offload *ptype;
  3972. __be16 type = skb->protocol;
  3973. struct list_head *head = &offload_base;
  3974. int err = -ENOENT;
  3975. BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
  3976. if (NAPI_GRO_CB(skb)->count == 1) {
  3977. skb_shinfo(skb)->gso_size = 0;
  3978. goto out;
  3979. }
  3980. rcu_read_lock();
  3981. list_for_each_entry_rcu(ptype, head, list) {
  3982. if (ptype->type != type || !ptype->callbacks.gro_complete)
  3983. continue;
  3984. err = ptype->callbacks.gro_complete(skb, 0);
  3985. break;
  3986. }
  3987. rcu_read_unlock();
  3988. if (err) {
  3989. WARN_ON(&ptype->list == head);
  3990. kfree_skb(skb);
  3991. return NET_RX_SUCCESS;
  3992. }
  3993. out:
  3994. return netif_receive_skb_internal(skb);
  3995. }
  3996. /* napi->gro_list contains packets ordered by age.
  3997. * youngest packets at the head of it.
  3998. * Complete skbs in reverse order to reduce latencies.
  3999. */
  4000. void napi_gro_flush(struct napi_struct *napi, bool flush_old)
  4001. {
  4002. struct sk_buff *skb, *prev = NULL;
  4003. /* scan list and build reverse chain */
  4004. for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
  4005. skb->prev = prev;
  4006. prev = skb;
  4007. }
  4008. for (skb = prev; skb; skb = prev) {
  4009. skb->next = NULL;
  4010. if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
  4011. return;
  4012. prev = skb->prev;
  4013. napi_gro_complete(skb);
  4014. napi->gro_count--;
  4015. }
  4016. napi->gro_list = NULL;
  4017. }
  4018. EXPORT_SYMBOL(napi_gro_flush);
  4019. static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
  4020. {
  4021. struct sk_buff *p;
  4022. unsigned int maclen = skb->dev->hard_header_len;
  4023. u32 hash = skb_get_hash_raw(skb);
  4024. for (p = napi->gro_list; p; p = p->next) {
  4025. unsigned long diffs;
  4026. NAPI_GRO_CB(p)->flush = 0;
  4027. if (hash != skb_get_hash_raw(p)) {
  4028. NAPI_GRO_CB(p)->same_flow = 0;
  4029. continue;
  4030. }
  4031. diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
  4032. diffs |= p->vlan_tci ^ skb->vlan_tci;
  4033. diffs |= skb_metadata_dst_cmp(p, skb);
  4034. diffs |= skb_metadata_differs(p, skb);
  4035. if (maclen == ETH_HLEN)
  4036. diffs |= compare_ether_header(skb_mac_header(p),
  4037. skb_mac_header(skb));
  4038. else if (!diffs)
  4039. diffs = memcmp(skb_mac_header(p),
  4040. skb_mac_header(skb),
  4041. maclen);
  4042. NAPI_GRO_CB(p)->same_flow = !diffs;
  4043. }
  4044. }
  4045. static void skb_gro_reset_offset(struct sk_buff *skb)
  4046. {
  4047. const struct skb_shared_info *pinfo = skb_shinfo(skb);
  4048. const skb_frag_t *frag0 = &pinfo->frags[0];
  4049. NAPI_GRO_CB(skb)->data_offset = 0;
  4050. NAPI_GRO_CB(skb)->frag0 = NULL;
  4051. NAPI_GRO_CB(skb)->frag0_len = 0;
  4052. if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
  4053. pinfo->nr_frags &&
  4054. !PageHighMem(skb_frag_page(frag0))) {
  4055. NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
  4056. NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
  4057. skb_frag_size(frag0),
  4058. skb->end - skb->tail);
  4059. }
  4060. }
  4061. static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
  4062. {
  4063. struct skb_shared_info *pinfo = skb_shinfo(skb);
  4064. BUG_ON(skb->end - skb->tail < grow);
  4065. memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
  4066. skb->data_len -= grow;
  4067. skb->tail += grow;
  4068. pinfo->frags[0].page_offset += grow;
  4069. skb_frag_size_sub(&pinfo->frags[0], grow);
  4070. if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
  4071. skb_frag_unref(skb, 0);
  4072. memmove(pinfo->frags, pinfo->frags + 1,
  4073. --pinfo->nr_frags * sizeof(pinfo->frags[0]));
  4074. }
  4075. }
  4076. static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
  4077. {
  4078. struct sk_buff **pp = NULL;
  4079. struct packet_offload *ptype;
  4080. __be16 type = skb->protocol;
  4081. struct list_head *head = &offload_base;
  4082. int same_flow;
  4083. enum gro_result ret;
  4084. int grow;
  4085. if (netif_elide_gro(skb->dev))
  4086. goto normal;
  4087. gro_list_prepare(napi, skb);
  4088. rcu_read_lock();
  4089. list_for_each_entry_rcu(ptype, head, list) {
  4090. if (ptype->type != type || !ptype->callbacks.gro_receive)
  4091. continue;
  4092. skb_set_network_header(skb, skb_gro_offset(skb));
  4093. skb_reset_mac_len(skb);
  4094. NAPI_GRO_CB(skb)->same_flow = 0;
  4095. NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb);
  4096. NAPI_GRO_CB(skb)->free = 0;
  4097. NAPI_GRO_CB(skb)->encap_mark = 0;
  4098. NAPI_GRO_CB(skb)->recursion_counter = 0;
  4099. NAPI_GRO_CB(skb)->is_fou = 0;
  4100. NAPI_GRO_CB(skb)->is_atomic = 1;
  4101. NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
  4102. /* Setup for GRO checksum validation */
  4103. switch (skb->ip_summed) {
  4104. case CHECKSUM_COMPLETE:
  4105. NAPI_GRO_CB(skb)->csum = skb->csum;
  4106. NAPI_GRO_CB(skb)->csum_valid = 1;
  4107. NAPI_GRO_CB(skb)->csum_cnt = 0;
  4108. break;
  4109. case CHECKSUM_UNNECESSARY:
  4110. NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
  4111. NAPI_GRO_CB(skb)->csum_valid = 0;
  4112. break;
  4113. default:
  4114. NAPI_GRO_CB(skb)->csum_cnt = 0;
  4115. NAPI_GRO_CB(skb)->csum_valid = 0;
  4116. }
  4117. pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
  4118. break;
  4119. }
  4120. rcu_read_unlock();
  4121. if (&ptype->list == head)
  4122. goto normal;
  4123. if (IS_ERR(pp) && PTR_ERR(pp) == -EINPROGRESS) {
  4124. ret = GRO_CONSUMED;
  4125. goto ok;
  4126. }
  4127. same_flow = NAPI_GRO_CB(skb)->same_flow;
  4128. ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
  4129. if (pp) {
  4130. struct sk_buff *nskb = *pp;
  4131. *pp = nskb->next;
  4132. nskb->next = NULL;
  4133. napi_gro_complete(nskb);
  4134. napi->gro_count--;
  4135. }
  4136. if (same_flow)
  4137. goto ok;
  4138. if (NAPI_GRO_CB(skb)->flush)
  4139. goto normal;
  4140. if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {
  4141. struct sk_buff *nskb = napi->gro_list;
  4142. /* locate the end of the list to select the 'oldest' flow */
  4143. while (nskb->next) {
  4144. pp = &nskb->next;
  4145. nskb = *pp;
  4146. }
  4147. *pp = NULL;
  4148. nskb->next = NULL;
  4149. napi_gro_complete(nskb);
  4150. } else {
  4151. napi->gro_count++;
  4152. }
  4153. NAPI_GRO_CB(skb)->count = 1;
  4154. NAPI_GRO_CB(skb)->age = jiffies;
  4155. NAPI_GRO_CB(skb)->last = skb;
  4156. skb_shinfo(skb)->gso_size = skb_gro_len(skb);
  4157. skb->next = napi->gro_list;
  4158. napi->gro_list = skb;
  4159. ret = GRO_HELD;
  4160. pull:
  4161. grow = skb_gro_offset(skb) - skb_headlen(skb);
  4162. if (grow > 0)
  4163. gro_pull_from_frag0(skb, grow);
  4164. ok:
  4165. return ret;
  4166. normal:
  4167. ret = GRO_NORMAL;
  4168. goto pull;
  4169. }
  4170. struct packet_offload *gro_find_receive_by_type(__be16 type)
  4171. {
  4172. struct list_head *offload_head = &offload_base;
  4173. struct packet_offload *ptype;
  4174. list_for_each_entry_rcu(ptype, offload_head, list) {
  4175. if (ptype->type != type || !ptype->callbacks.gro_receive)
  4176. continue;
  4177. return ptype;
  4178. }
  4179. return NULL;
  4180. }
  4181. EXPORT_SYMBOL(gro_find_receive_by_type);
  4182. struct packet_offload *gro_find_complete_by_type(__be16 type)
  4183. {
  4184. struct list_head *offload_head = &offload_base;
  4185. struct packet_offload *ptype;
  4186. list_for_each_entry_rcu(ptype, offload_head, list) {
  4187. if (ptype->type != type || !ptype->callbacks.gro_complete)
  4188. continue;
  4189. return ptype;
  4190. }
  4191. return NULL;
  4192. }
  4193. EXPORT_SYMBOL(gro_find_complete_by_type);
  4194. static void napi_skb_free_stolen_head(struct sk_buff *skb)
  4195. {
  4196. skb_dst_drop(skb);
  4197. secpath_reset(skb);
  4198. kmem_cache_free(skbuff_head_cache, skb);
  4199. }
  4200. static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
  4201. {
  4202. switch (ret) {
  4203. case GRO_NORMAL:
  4204. if (netif_receive_skb_internal(skb))
  4205. ret = GRO_DROP;
  4206. break;
  4207. case GRO_DROP:
  4208. kfree_skb(skb);
  4209. break;
  4210. case GRO_MERGED_FREE:
  4211. if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
  4212. napi_skb_free_stolen_head(skb);
  4213. else
  4214. __kfree_skb(skb);
  4215. break;
  4216. case GRO_HELD:
  4217. case GRO_MERGED:
  4218. case GRO_CONSUMED:
  4219. break;
  4220. }
  4221. return ret;
  4222. }
  4223. gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
  4224. {
  4225. skb_mark_napi_id(skb, napi);
  4226. trace_napi_gro_receive_entry(skb);
  4227. skb_gro_reset_offset(skb);
  4228. return napi_skb_finish(dev_gro_receive(napi, skb), skb);
  4229. }
  4230. EXPORT_SYMBOL(napi_gro_receive);
  4231. static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
  4232. {
  4233. if (unlikely(skb->pfmemalloc)) {
  4234. consume_skb(skb);
  4235. return;
  4236. }
  4237. __skb_pull(skb, skb_headlen(skb));
  4238. /* restore the reserve we had after netdev_alloc_skb_ip_align() */
  4239. skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
  4240. skb->vlan_tci = 0;
  4241. skb->dev = napi->dev;
  4242. skb->skb_iif = 0;
  4243. skb->encapsulation = 0;
  4244. skb_shinfo(skb)->gso_type = 0;
  4245. skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
  4246. secpath_reset(skb);
  4247. napi->skb = skb;
  4248. }
  4249. struct sk_buff *napi_get_frags(struct napi_struct *napi)
  4250. {
  4251. struct sk_buff *skb = napi->skb;
  4252. if (!skb) {
  4253. skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
  4254. if (skb) {
  4255. napi->skb = skb;
  4256. skb_mark_napi_id(skb, napi);
  4257. }
  4258. }
  4259. return skb;
  4260. }
  4261. EXPORT_SYMBOL(napi_get_frags);
  4262. static gro_result_t napi_frags_finish(struct napi_struct *napi,
  4263. struct sk_buff *skb,
  4264. gro_result_t ret)
  4265. {
  4266. switch (ret) {
  4267. case GRO_NORMAL:
  4268. case GRO_HELD:
  4269. __skb_push(skb, ETH_HLEN);
  4270. skb->protocol = eth_type_trans(skb, skb->dev);
  4271. if (ret == GRO_NORMAL && netif_receive_skb_internal(skb))
  4272. ret = GRO_DROP;
  4273. break;
  4274. case GRO_DROP:
  4275. napi_reuse_skb(napi, skb);
  4276. break;
  4277. case GRO_MERGED_FREE:
  4278. if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
  4279. napi_skb_free_stolen_head(skb);
  4280. else
  4281. napi_reuse_skb(napi, skb);
  4282. break;
  4283. case GRO_MERGED:
  4284. case GRO_CONSUMED:
  4285. break;
  4286. }
  4287. return ret;
  4288. }
  4289. /* Upper GRO stack assumes network header starts at gro_offset=0
  4290. * Drivers could call both napi_gro_frags() and napi_gro_receive()
  4291. * We copy ethernet header into skb->data to have a common layout.
  4292. */
  4293. static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
  4294. {
  4295. struct sk_buff *skb = napi->skb;
  4296. const struct ethhdr *eth;
  4297. unsigned int hlen = sizeof(*eth);
  4298. napi->skb = NULL;
  4299. skb_reset_mac_header(skb);
  4300. skb_gro_reset_offset(skb);
  4301. eth = skb_gro_header_fast(skb, 0);
  4302. if (unlikely(skb_gro_header_hard(skb, hlen))) {
  4303. eth = skb_gro_header_slow(skb, hlen, 0);
  4304. if (unlikely(!eth)) {
  4305. net_warn_ratelimited("%s: dropping impossible skb from %s\n",
  4306. __func__, napi->dev->name);
  4307. napi_reuse_skb(napi, skb);
  4308. return NULL;
  4309. }
  4310. } else {
  4311. gro_pull_from_frag0(skb, hlen);
  4312. NAPI_GRO_CB(skb)->frag0 += hlen;
  4313. NAPI_GRO_CB(skb)->frag0_len -= hlen;
  4314. }
  4315. __skb_pull(skb, hlen);
  4316. /*
  4317. * This works because the only protocols we care about don't require
  4318. * special handling.
  4319. * We'll fix it up properly in napi_frags_finish()
  4320. */
  4321. skb->protocol = eth->h_proto;
  4322. return skb;
  4323. }
  4324. gro_result_t napi_gro_frags(struct napi_struct *napi)
  4325. {
  4326. struct sk_buff *skb = napi_frags_skb(napi);
  4327. if (!skb)
  4328. return GRO_DROP;
  4329. trace_napi_gro_frags_entry(skb);
  4330. return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
  4331. }
  4332. EXPORT_SYMBOL(napi_gro_frags);
  4333. /* Compute the checksum from gro_offset and return the folded value
  4334. * after adding in any pseudo checksum.
  4335. */
  4336. __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
  4337. {
  4338. __wsum wsum;
  4339. __sum16 sum;
  4340. wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
  4341. /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
  4342. sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
  4343. if (likely(!sum)) {
  4344. if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
  4345. !skb->csum_complete_sw)
  4346. netdev_rx_csum_fault(skb->dev);
  4347. }
  4348. NAPI_GRO_CB(skb)->csum = wsum;
  4349. NAPI_GRO_CB(skb)->csum_valid = 1;
  4350. return sum;
  4351. }
  4352. EXPORT_SYMBOL(__skb_gro_checksum_complete);
  4353. static void net_rps_send_ipi(struct softnet_data *remsd)
  4354. {
  4355. #ifdef CONFIG_RPS
  4356. while (remsd) {
  4357. struct softnet_data *next = remsd->rps_ipi_next;
  4358. if (cpu_online(remsd->cpu))
  4359. smp_call_function_single_async(remsd->cpu, &remsd->csd);
  4360. remsd = next;
  4361. }
  4362. #endif
  4363. }
  4364. /*
  4365. * net_rps_action_and_irq_enable sends any pending IPI's for rps.
  4366. * Note: called with local irq disabled, but exits with local irq enabled.
  4367. */
  4368. static void net_rps_action_and_irq_enable(struct softnet_data *sd)
  4369. {
  4370. #ifdef CONFIG_RPS
  4371. struct softnet_data *remsd = sd->rps_ipi_list;
  4372. if (remsd) {
  4373. sd->rps_ipi_list = NULL;
  4374. local_irq_enable();
  4375. /* Send pending IPI's to kick RPS processing on remote cpus. */
  4376. net_rps_send_ipi(remsd);
  4377. } else
  4378. #endif
  4379. local_irq_enable();
  4380. }
  4381. static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
  4382. {
  4383. #ifdef CONFIG_RPS
  4384. return sd->rps_ipi_list != NULL;
  4385. #else
  4386. return false;
  4387. #endif
  4388. }
  4389. static int process_backlog(struct napi_struct *napi, int quota)
  4390. {
  4391. struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
  4392. bool again = true;
  4393. int work = 0;
  4394. /* Check if we have pending ipi, its better to send them now,
  4395. * not waiting net_rx_action() end.
  4396. */
  4397. if (sd_has_rps_ipi_waiting(sd)) {
  4398. local_irq_disable();
  4399. net_rps_action_and_irq_enable(sd);
  4400. }
  4401. napi->weight = dev_rx_weight;
  4402. while (again) {
  4403. struct sk_buff *skb;
  4404. while ((skb = __skb_dequeue(&sd->process_queue))) {
  4405. rcu_read_lock();
  4406. __netif_receive_skb(skb);
  4407. rcu_read_unlock();
  4408. input_queue_head_incr(sd);
  4409. if (++work >= quota)
  4410. return work;
  4411. }
  4412. local_irq_disable();
  4413. rps_lock(sd);
  4414. if (skb_queue_empty(&sd->input_pkt_queue)) {
  4415. /*
  4416. * Inline a custom version of __napi_complete().
  4417. * only current cpu owns and manipulates this napi,
  4418. * and NAPI_STATE_SCHED is the only possible flag set
  4419. * on backlog.
  4420. * We can use a plain write instead of clear_bit(),
  4421. * and we dont need an smp_mb() memory barrier.
  4422. */
  4423. napi->state = 0;
  4424. again = false;
  4425. } else {
  4426. skb_queue_splice_tail_init(&sd->input_pkt_queue,
  4427. &sd->process_queue);
  4428. }
  4429. rps_unlock(sd);
  4430. local_irq_enable();
  4431. }
  4432. return work;
  4433. }
  4434. /**
  4435. * __napi_schedule - schedule for receive
  4436. * @n: entry to schedule
  4437. *
  4438. * The entry's receive function will be scheduled to run.
  4439. * Consider using __napi_schedule_irqoff() if hard irqs are masked.
  4440. */
  4441. void __napi_schedule(struct napi_struct *n)
  4442. {
  4443. unsigned long flags;
  4444. local_irq_save(flags);
  4445. ____napi_schedule(this_cpu_ptr(&softnet_data), n);
  4446. local_irq_restore(flags);
  4447. }
  4448. EXPORT_SYMBOL(__napi_schedule);
  4449. /**
  4450. * napi_schedule_prep - check if napi can be scheduled
  4451. * @n: napi context
  4452. *
  4453. * Test if NAPI routine is already running, and if not mark
  4454. * it as running. This is used as a condition variable
  4455. * insure only one NAPI poll instance runs. We also make
  4456. * sure there is no pending NAPI disable.
  4457. */
  4458. bool napi_schedule_prep(struct napi_struct *n)
  4459. {
  4460. unsigned long val, new;
  4461. do {
  4462. val = READ_ONCE(n->state);
  4463. if (unlikely(val & NAPIF_STATE_DISABLE))
  4464. return false;
  4465. new = val | NAPIF_STATE_SCHED;
  4466. /* Sets STATE_MISSED bit if STATE_SCHED was already set
  4467. * This was suggested by Alexander Duyck, as compiler
  4468. * emits better code than :
  4469. * if (val & NAPIF_STATE_SCHED)
  4470. * new |= NAPIF_STATE_MISSED;
  4471. */
  4472. new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED *
  4473. NAPIF_STATE_MISSED;
  4474. } while (cmpxchg(&n->state, val, new) != val);
  4475. return !(val & NAPIF_STATE_SCHED);
  4476. }
  4477. EXPORT_SYMBOL(napi_schedule_prep);
  4478. /**
  4479. * __napi_schedule_irqoff - schedule for receive
  4480. * @n: entry to schedule
  4481. *
  4482. * Variant of __napi_schedule() assuming hard irqs are masked
  4483. */
  4484. void __napi_schedule_irqoff(struct napi_struct *n)
  4485. {
  4486. ____napi_schedule(this_cpu_ptr(&softnet_data), n);
  4487. }
  4488. EXPORT_SYMBOL(__napi_schedule_irqoff);
  4489. bool napi_complete_done(struct napi_struct *n, int work_done)
  4490. {
  4491. unsigned long flags, val, new;
  4492. /*
  4493. * 1) Don't let napi dequeue from the cpu poll list
  4494. * just in case its running on a different cpu.
  4495. * 2) If we are busy polling, do nothing here, we have
  4496. * the guarantee we will be called later.
  4497. */
  4498. if (unlikely(n->state & (NAPIF_STATE_NPSVC |
  4499. NAPIF_STATE_IN_BUSY_POLL)))
  4500. return false;
  4501. if (n->gro_list) {
  4502. unsigned long timeout = 0;
  4503. if (work_done)
  4504. timeout = n->dev->gro_flush_timeout;
  4505. if (timeout)
  4506. hrtimer_start(&n->timer, ns_to_ktime(timeout),
  4507. HRTIMER_MODE_REL_PINNED);
  4508. else
  4509. napi_gro_flush(n, false);
  4510. }
  4511. if (unlikely(!list_empty(&n->poll_list))) {
  4512. /* If n->poll_list is not empty, we need to mask irqs */
  4513. local_irq_save(flags);
  4514. list_del_init(&n->poll_list);
  4515. local_irq_restore(flags);
  4516. }
  4517. do {
  4518. val = READ_ONCE(n->state);
  4519. WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
  4520. new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED);
  4521. /* If STATE_MISSED was set, leave STATE_SCHED set,
  4522. * because we will call napi->poll() one more time.
  4523. * This C code was suggested by Alexander Duyck to help gcc.
  4524. */
  4525. new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED *
  4526. NAPIF_STATE_SCHED;
  4527. } while (cmpxchg(&n->state, val, new) != val);
  4528. if (unlikely(val & NAPIF_STATE_MISSED)) {
  4529. __napi_schedule(n);
  4530. return false;
  4531. }
  4532. return true;
  4533. }
  4534. EXPORT_SYMBOL(napi_complete_done);
  4535. /* must be called under rcu_read_lock(), as we dont take a reference */
  4536. static struct napi_struct *napi_by_id(unsigned int napi_id)
  4537. {
  4538. unsigned int hash = napi_id % HASH_SIZE(napi_hash);
  4539. struct napi_struct *napi;
  4540. hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
  4541. if (napi->napi_id == napi_id)
  4542. return napi;
  4543. return NULL;
  4544. }
  4545. #if defined(CONFIG_NET_RX_BUSY_POLL)
  4546. #define BUSY_POLL_BUDGET 8
  4547. static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
  4548. {
  4549. int rc;
  4550. /* Busy polling means there is a high chance device driver hard irq
  4551. * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
  4552. * set in napi_schedule_prep().
  4553. * Since we are about to call napi->poll() once more, we can safely
  4554. * clear NAPI_STATE_MISSED.
  4555. *
  4556. * Note: x86 could use a single "lock and ..." instruction
  4557. * to perform these two clear_bit()
  4558. */
  4559. clear_bit(NAPI_STATE_MISSED, &napi->state);
  4560. clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
  4561. local_bh_disable();
  4562. /* All we really want here is to re-enable device interrupts.
  4563. * Ideally, a new ndo_busy_poll_stop() could avoid another round.
  4564. */
  4565. rc = napi->poll(napi, BUSY_POLL_BUDGET);
  4566. trace_napi_poll(napi, rc, BUSY_POLL_BUDGET);
  4567. netpoll_poll_unlock(have_poll_lock);
  4568. if (rc == BUSY_POLL_BUDGET)
  4569. __napi_schedule(napi);
  4570. local_bh_enable();
  4571. }
  4572. void napi_busy_loop(unsigned int napi_id,
  4573. bool (*loop_end)(void *, unsigned long),
  4574. void *loop_end_arg)
  4575. {
  4576. unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
  4577. int (*napi_poll)(struct napi_struct *napi, int budget);
  4578. void *have_poll_lock = NULL;
  4579. struct napi_struct *napi;
  4580. restart:
  4581. napi_poll = NULL;
  4582. rcu_read_lock();
  4583. napi = napi_by_id(napi_id);
  4584. if (!napi)
  4585. goto out;
  4586. preempt_disable();
  4587. for (;;) {
  4588. int work = 0;
  4589. local_bh_disable();
  4590. if (!napi_poll) {
  4591. unsigned long val = READ_ONCE(napi->state);
  4592. /* If multiple threads are competing for this napi,
  4593. * we avoid dirtying napi->state as much as we can.
  4594. */
  4595. if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
  4596. NAPIF_STATE_IN_BUSY_POLL))
  4597. goto count;
  4598. if (cmpxchg(&napi->state, val,
  4599. val | NAPIF_STATE_IN_BUSY_POLL |
  4600. NAPIF_STATE_SCHED) != val)
  4601. goto count;
  4602. have_poll_lock = netpoll_poll_lock(napi);
  4603. napi_poll = napi->poll;
  4604. }
  4605. work = napi_poll(napi, BUSY_POLL_BUDGET);
  4606. trace_napi_poll(napi, work, BUSY_POLL_BUDGET);
  4607. count:
  4608. if (work > 0)
  4609. __NET_ADD_STATS(dev_net(napi->dev),
  4610. LINUX_MIB_BUSYPOLLRXPACKETS, work);
  4611. local_bh_enable();
  4612. if (!loop_end || loop_end(loop_end_arg, start_time))
  4613. break;
  4614. if (unlikely(need_resched())) {
  4615. if (napi_poll)
  4616. busy_poll_stop(napi, have_poll_lock);
  4617. preempt_enable();
  4618. rcu_read_unlock();
  4619. cond_resched();
  4620. if (loop_end(loop_end_arg, start_time))
  4621. return;
  4622. goto restart;
  4623. }
  4624. cpu_relax();
  4625. }
  4626. if (napi_poll)
  4627. busy_poll_stop(napi, have_poll_lock);
  4628. preempt_enable();
  4629. out:
  4630. rcu_read_unlock();
  4631. }
  4632. EXPORT_SYMBOL(napi_busy_loop);
  4633. #endif /* CONFIG_NET_RX_BUSY_POLL */
  4634. static void napi_hash_add(struct napi_struct *napi)
  4635. {
  4636. if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state) ||
  4637. test_and_set_bit(NAPI_STATE_HASHED, &napi->state))
  4638. return;
  4639. spin_lock(&napi_hash_lock);
  4640. /* 0..NR_CPUS range is reserved for sender_cpu use */
  4641. do {
  4642. if (unlikely(++napi_gen_id < MIN_NAPI_ID))
  4643. napi_gen_id = MIN_NAPI_ID;
  4644. } while (napi_by_id(napi_gen_id));
  4645. napi->napi_id = napi_gen_id;
  4646. hlist_add_head_rcu(&napi->napi_hash_node,
  4647. &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
  4648. spin_unlock(&napi_hash_lock);
  4649. }
  4650. /* Warning : caller is responsible to make sure rcu grace period
  4651. * is respected before freeing memory containing @napi
  4652. */
  4653. bool napi_hash_del(struct napi_struct *napi)
  4654. {
  4655. bool rcu_sync_needed = false;
  4656. spin_lock(&napi_hash_lock);
  4657. if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state)) {
  4658. rcu_sync_needed = true;
  4659. hlist_del_rcu(&napi->napi_hash_node);
  4660. }
  4661. spin_unlock(&napi_hash_lock);
  4662. return rcu_sync_needed;
  4663. }
  4664. EXPORT_SYMBOL_GPL(napi_hash_del);
  4665. static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
  4666. {
  4667. struct napi_struct *napi;
  4668. napi = container_of(timer, struct napi_struct, timer);
  4669. /* Note : we use a relaxed variant of napi_schedule_prep() not setting
  4670. * NAPI_STATE_MISSED, since we do not react to a device IRQ.
  4671. */
  4672. if (napi->gro_list && !napi_disable_pending(napi) &&
  4673. !test_and_set_bit(NAPI_STATE_SCHED, &napi->state))
  4674. __napi_schedule_irqoff(napi);
  4675. return HRTIMER_NORESTART;
  4676. }
  4677. void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
  4678. int (*poll)(struct napi_struct *, int), int weight)
  4679. {
  4680. INIT_LIST_HEAD(&napi->poll_list);
  4681. hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
  4682. napi->timer.function = napi_watchdog;
  4683. napi->gro_count = 0;
  4684. napi->gro_list = NULL;
  4685. napi->skb = NULL;
  4686. napi->poll = poll;
  4687. if (weight > NAPI_POLL_WEIGHT)
  4688. pr_err_once("netif_napi_add() called with weight %d on device %s\n",
  4689. weight, dev->name);
  4690. napi->weight = weight;
  4691. list_add(&napi->dev_list, &dev->napi_list);
  4692. napi->dev = dev;
  4693. #ifdef CONFIG_NETPOLL
  4694. napi->poll_owner = -1;
  4695. #endif
  4696. set_bit(NAPI_STATE_SCHED, &napi->state);
  4697. napi_hash_add(napi);
  4698. }
  4699. EXPORT_SYMBOL(netif_napi_add);
  4700. void napi_disable(struct napi_struct *n)
  4701. {
  4702. might_sleep();
  4703. set_bit(NAPI_STATE_DISABLE, &n->state);
  4704. while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
  4705. msleep(1);
  4706. while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state))
  4707. msleep(1);
  4708. hrtimer_cancel(&n->timer);
  4709. clear_bit(NAPI_STATE_DISABLE, &n->state);
  4710. }
  4711. EXPORT_SYMBOL(napi_disable);
  4712. /* Must be called in process context */
  4713. void netif_napi_del(struct napi_struct *napi)
  4714. {
  4715. might_sleep();
  4716. if (napi_hash_del(napi))
  4717. synchronize_net();
  4718. list_del_init(&napi->dev_list);
  4719. napi_free_frags(napi);
  4720. kfree_skb_list(napi->gro_list);
  4721. napi->gro_list = NULL;
  4722. napi->gro_count = 0;
  4723. }
  4724. EXPORT_SYMBOL(netif_napi_del);
  4725. static int napi_poll(struct napi_struct *n, struct list_head *repoll)
  4726. {
  4727. void *have;
  4728. int work, weight;
  4729. list_del_init(&n->poll_list);
  4730. have = netpoll_poll_lock(n);
  4731. weight = n->weight;
  4732. /* This NAPI_STATE_SCHED test is for avoiding a race
  4733. * with netpoll's poll_napi(). Only the entity which
  4734. * obtains the lock and sees NAPI_STATE_SCHED set will
  4735. * actually make the ->poll() call. Therefore we avoid
  4736. * accidentally calling ->poll() when NAPI is not scheduled.
  4737. */
  4738. work = 0;
  4739. if (test_bit(NAPI_STATE_SCHED, &n->state)) {
  4740. work = n->poll(n, weight);
  4741. trace_napi_poll(n, work, weight);
  4742. }
  4743. WARN_ON_ONCE(work > weight);
  4744. if (likely(work < weight))
  4745. goto out_unlock;
  4746. /* Drivers must not modify the NAPI state if they
  4747. * consume the entire weight. In such cases this code
  4748. * still "owns" the NAPI instance and therefore can
  4749. * move the instance around on the list at-will.
  4750. */
  4751. if (unlikely(napi_disable_pending(n))) {
  4752. napi_complete(n);
  4753. goto out_unlock;
  4754. }
  4755. if (n->gro_list) {
  4756. /* flush too old packets
  4757. * If HZ < 1000, flush all packets.
  4758. */
  4759. napi_gro_flush(n, HZ >= 1000);
  4760. }
  4761. /* Some drivers may have called napi_schedule
  4762. * prior to exhausting their budget.
  4763. */
  4764. if (unlikely(!list_empty(&n->poll_list))) {
  4765. pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
  4766. n->dev ? n->dev->name : "backlog");
  4767. goto out_unlock;
  4768. }
  4769. list_add_tail(&n->poll_list, repoll);
  4770. out_unlock:
  4771. netpoll_poll_unlock(have);
  4772. return work;
  4773. }
  4774. static __latent_entropy void net_rx_action(struct softirq_action *h)
  4775. {
  4776. struct softnet_data *sd = this_cpu_ptr(&softnet_data);
  4777. unsigned long time_limit = jiffies +
  4778. usecs_to_jiffies(netdev_budget_usecs);
  4779. int budget = netdev_budget;
  4780. LIST_HEAD(list);
  4781. LIST_HEAD(repoll);
  4782. local_irq_disable();
  4783. list_splice_init(&sd->poll_list, &list);
  4784. local_irq_enable();
  4785. for (;;) {
  4786. struct napi_struct *n;
  4787. if (list_empty(&list)) {
  4788. if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
  4789. goto out;
  4790. break;
  4791. }
  4792. n = list_first_entry(&list, struct napi_struct, poll_list);
  4793. budget -= napi_poll(n, &repoll);
  4794. /* If softirq window is exhausted then punt.
  4795. * Allow this to run for 2 jiffies since which will allow
  4796. * an average latency of 1.5/HZ.
  4797. */
  4798. if (unlikely(budget <= 0 ||
  4799. time_after_eq(jiffies, time_limit))) {
  4800. sd->time_squeeze++;
  4801. break;
  4802. }
  4803. }
  4804. local_irq_disable();
  4805. list_splice_tail_init(&sd->poll_list, &list);
  4806. list_splice_tail(&repoll, &list);
  4807. list_splice(&list, &sd->poll_list);
  4808. if (!list_empty(&sd->poll_list))
  4809. __raise_softirq_irqoff(NET_RX_SOFTIRQ);
  4810. net_rps_action_and_irq_enable(sd);
  4811. out:
  4812. __kfree_skb_flush();
  4813. }
  4814. struct netdev_adjacent {
  4815. struct net_device *dev;
  4816. /* upper master flag, there can only be one master device per list */
  4817. bool master;
  4818. /* counter for the number of times this device was added to us */
  4819. u16 ref_nr;
  4820. /* private field for the users */
  4821. void *private;
  4822. struct list_head list;
  4823. struct rcu_head rcu;
  4824. };
  4825. static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
  4826. struct list_head *adj_list)
  4827. {
  4828. struct netdev_adjacent *adj;
  4829. list_for_each_entry(adj, adj_list, list) {
  4830. if (adj->dev == adj_dev)
  4831. return adj;
  4832. }
  4833. return NULL;
  4834. }
  4835. static int __netdev_has_upper_dev(struct net_device *upper_dev, void *data)
  4836. {
  4837. struct net_device *dev = data;
  4838. return upper_dev == dev;
  4839. }
  4840. /**
  4841. * netdev_has_upper_dev - Check if device is linked to an upper device
  4842. * @dev: device
  4843. * @upper_dev: upper device to check
  4844. *
  4845. * Find out if a device is linked to specified upper device and return true
  4846. * in case it is. Note that this checks only immediate upper device,
  4847. * not through a complete stack of devices. The caller must hold the RTNL lock.
  4848. */
  4849. bool netdev_has_upper_dev(struct net_device *dev,
  4850. struct net_device *upper_dev)
  4851. {
  4852. ASSERT_RTNL();
  4853. return netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
  4854. upper_dev);
  4855. }
  4856. EXPORT_SYMBOL(netdev_has_upper_dev);
  4857. /**
  4858. * netdev_has_upper_dev_all - Check if device is linked to an upper device
  4859. * @dev: device
  4860. * @upper_dev: upper device to check
  4861. *
  4862. * Find out if a device is linked to specified upper device and return true
  4863. * in case it is. Note that this checks the entire upper device chain.
  4864. * The caller must hold rcu lock.
  4865. */
  4866. bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
  4867. struct net_device *upper_dev)
  4868. {
  4869. return !!netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
  4870. upper_dev);
  4871. }
  4872. EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
  4873. /**
  4874. * netdev_has_any_upper_dev - Check if device is linked to some device
  4875. * @dev: device
  4876. *
  4877. * Find out if a device is linked to an upper device and return true in case
  4878. * it is. The caller must hold the RTNL lock.
  4879. */
  4880. bool netdev_has_any_upper_dev(struct net_device *dev)
  4881. {
  4882. ASSERT_RTNL();
  4883. return !list_empty(&dev->adj_list.upper);
  4884. }
  4885. EXPORT_SYMBOL(netdev_has_any_upper_dev);
  4886. /**
  4887. * netdev_master_upper_dev_get - Get master upper device
  4888. * @dev: device
  4889. *
  4890. * Find a master upper device and return pointer to it or NULL in case
  4891. * it's not there. The caller must hold the RTNL lock.
  4892. */
  4893. struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
  4894. {
  4895. struct netdev_adjacent *upper;
  4896. ASSERT_RTNL();
  4897. if (list_empty(&dev->adj_list.upper))
  4898. return NULL;
  4899. upper = list_first_entry(&dev->adj_list.upper,
  4900. struct netdev_adjacent, list);
  4901. if (likely(upper->master))
  4902. return upper->dev;
  4903. return NULL;
  4904. }
  4905. EXPORT_SYMBOL(netdev_master_upper_dev_get);
  4906. /**
  4907. * netdev_has_any_lower_dev - Check if device is linked to some device
  4908. * @dev: device
  4909. *
  4910. * Find out if a device is linked to a lower device and return true in case
  4911. * it is. The caller must hold the RTNL lock.
  4912. */
  4913. static bool netdev_has_any_lower_dev(struct net_device *dev)
  4914. {
  4915. ASSERT_RTNL();
  4916. return !list_empty(&dev->adj_list.lower);
  4917. }
  4918. void *netdev_adjacent_get_private(struct list_head *adj_list)
  4919. {
  4920. struct netdev_adjacent *adj;
  4921. adj = list_entry(adj_list, struct netdev_adjacent, list);
  4922. return adj->private;
  4923. }
  4924. EXPORT_SYMBOL(netdev_adjacent_get_private);
  4925. /**
  4926. * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
  4927. * @dev: device
  4928. * @iter: list_head ** of the current position
  4929. *
  4930. * Gets the next device from the dev's upper list, starting from iter
  4931. * position. The caller must hold RCU read lock.
  4932. */
  4933. struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
  4934. struct list_head **iter)
  4935. {
  4936. struct netdev_adjacent *upper;
  4937. WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
  4938. upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
  4939. if (&upper->list == &dev->adj_list.upper)
  4940. return NULL;
  4941. *iter = &upper->list;
  4942. return upper->dev;
  4943. }
  4944. EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
  4945. static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
  4946. struct list_head **iter)
  4947. {
  4948. struct netdev_adjacent *upper;
  4949. WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
  4950. upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
  4951. if (&upper->list == &dev->adj_list.upper)
  4952. return NULL;
  4953. *iter = &upper->list;
  4954. return upper->dev;
  4955. }
  4956. int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
  4957. int (*fn)(struct net_device *dev,
  4958. void *data),
  4959. void *data)
  4960. {
  4961. struct net_device *udev;
  4962. struct list_head *iter;
  4963. int ret;
  4964. for (iter = &dev->adj_list.upper,
  4965. udev = netdev_next_upper_dev_rcu(dev, &iter);
  4966. udev;
  4967. udev = netdev_next_upper_dev_rcu(dev, &iter)) {
  4968. /* first is the upper device itself */
  4969. ret = fn(udev, data);
  4970. if (ret)
  4971. return ret;
  4972. /* then look at all of its upper devices */
  4973. ret = netdev_walk_all_upper_dev_rcu(udev, fn, data);
  4974. if (ret)
  4975. return ret;
  4976. }
  4977. return 0;
  4978. }
  4979. EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
  4980. /**
  4981. * netdev_lower_get_next_private - Get the next ->private from the
  4982. * lower neighbour list
  4983. * @dev: device
  4984. * @iter: list_head ** of the current position
  4985. *
  4986. * Gets the next netdev_adjacent->private from the dev's lower neighbour
  4987. * list, starting from iter position. The caller must hold either hold the
  4988. * RTNL lock or its own locking that guarantees that the neighbour lower
  4989. * list will remain unchanged.
  4990. */
  4991. void *netdev_lower_get_next_private(struct net_device *dev,
  4992. struct list_head **iter)
  4993. {
  4994. struct netdev_adjacent *lower;
  4995. lower = list_entry(*iter, struct netdev_adjacent, list);
  4996. if (&lower->list == &dev->adj_list.lower)
  4997. return NULL;
  4998. *iter = lower->list.next;
  4999. return lower->private;
  5000. }
  5001. EXPORT_SYMBOL(netdev_lower_get_next_private);
  5002. /**
  5003. * netdev_lower_get_next_private_rcu - Get the next ->private from the
  5004. * lower neighbour list, RCU
  5005. * variant
  5006. * @dev: device
  5007. * @iter: list_head ** of the current position
  5008. *
  5009. * Gets the next netdev_adjacent->private from the dev's lower neighbour
  5010. * list, starting from iter position. The caller must hold RCU read lock.
  5011. */
  5012. void *netdev_lower_get_next_private_rcu(struct net_device *dev,
  5013. struct list_head **iter)
  5014. {
  5015. struct netdev_adjacent *lower;
  5016. WARN_ON_ONCE(!rcu_read_lock_held());
  5017. lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
  5018. if (&lower->list == &dev->adj_list.lower)
  5019. return NULL;
  5020. *iter = &lower->list;
  5021. return lower->private;
  5022. }
  5023. EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
  5024. /**
  5025. * netdev_lower_get_next - Get the next device from the lower neighbour
  5026. * list
  5027. * @dev: device
  5028. * @iter: list_head ** of the current position
  5029. *
  5030. * Gets the next netdev_adjacent from the dev's lower neighbour
  5031. * list, starting from iter position. The caller must hold RTNL lock or
  5032. * its own locking that guarantees that the neighbour lower
  5033. * list will remain unchanged.
  5034. */
  5035. void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
  5036. {
  5037. struct netdev_adjacent *lower;
  5038. lower = list_entry(*iter, struct netdev_adjacent, list);
  5039. if (&lower->list == &dev->adj_list.lower)
  5040. return NULL;
  5041. *iter = lower->list.next;
  5042. return lower->dev;
  5043. }
  5044. EXPORT_SYMBOL(netdev_lower_get_next);
  5045. static struct net_device *netdev_next_lower_dev(struct net_device *dev,
  5046. struct list_head **iter)
  5047. {
  5048. struct netdev_adjacent *lower;
  5049. lower = list_entry((*iter)->next, struct netdev_adjacent, list);
  5050. if (&lower->list == &dev->adj_list.lower)
  5051. return NULL;
  5052. *iter = &lower->list;
  5053. return lower->dev;
  5054. }
  5055. int netdev_walk_all_lower_dev(struct net_device *dev,
  5056. int (*fn)(struct net_device *dev,
  5057. void *data),
  5058. void *data)
  5059. {
  5060. struct net_device *ldev;
  5061. struct list_head *iter;
  5062. int ret;
  5063. for (iter = &dev->adj_list.lower,
  5064. ldev = netdev_next_lower_dev(dev, &iter);
  5065. ldev;
  5066. ldev = netdev_next_lower_dev(dev, &iter)) {
  5067. /* first is the lower device itself */
  5068. ret = fn(ldev, data);
  5069. if (ret)
  5070. return ret;
  5071. /* then look at all of its lower devices */
  5072. ret = netdev_walk_all_lower_dev(ldev, fn, data);
  5073. if (ret)
  5074. return ret;
  5075. }
  5076. return 0;
  5077. }
  5078. EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
  5079. static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
  5080. struct list_head **iter)
  5081. {
  5082. struct netdev_adjacent *lower;
  5083. lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
  5084. if (&lower->list == &dev->adj_list.lower)
  5085. return NULL;
  5086. *iter = &lower->list;
  5087. return lower->dev;
  5088. }
  5089. int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
  5090. int (*fn)(struct net_device *dev,
  5091. void *data),
  5092. void *data)
  5093. {
  5094. struct net_device *ldev;
  5095. struct list_head *iter;
  5096. int ret;
  5097. for (iter = &dev->adj_list.lower,
  5098. ldev = netdev_next_lower_dev_rcu(dev, &iter);
  5099. ldev;
  5100. ldev = netdev_next_lower_dev_rcu(dev, &iter)) {
  5101. /* first is the lower device itself */
  5102. ret = fn(ldev, data);
  5103. if (ret)
  5104. return ret;
  5105. /* then look at all of its lower devices */
  5106. ret = netdev_walk_all_lower_dev_rcu(ldev, fn, data);
  5107. if (ret)
  5108. return ret;
  5109. }
  5110. return 0;
  5111. }
  5112. EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu);
  5113. /**
  5114. * netdev_lower_get_first_private_rcu - Get the first ->private from the
  5115. * lower neighbour list, RCU
  5116. * variant
  5117. * @dev: device
  5118. *
  5119. * Gets the first netdev_adjacent->private from the dev's lower neighbour
  5120. * list. The caller must hold RCU read lock.
  5121. */
  5122. void *netdev_lower_get_first_private_rcu(struct net_device *dev)
  5123. {
  5124. struct netdev_adjacent *lower;
  5125. lower = list_first_or_null_rcu(&dev->adj_list.lower,
  5126. struct netdev_adjacent, list);
  5127. if (lower)
  5128. return lower->private;
  5129. return NULL;
  5130. }
  5131. EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
  5132. /**
  5133. * netdev_master_upper_dev_get_rcu - Get master upper device
  5134. * @dev: device
  5135. *
  5136. * Find a master upper device and return pointer to it or NULL in case
  5137. * it's not there. The caller must hold the RCU read lock.
  5138. */
  5139. struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
  5140. {
  5141. struct netdev_adjacent *upper;
  5142. upper = list_first_or_null_rcu(&dev->adj_list.upper,
  5143. struct netdev_adjacent, list);
  5144. if (upper && likely(upper->master))
  5145. return upper->dev;
  5146. return NULL;
  5147. }
  5148. EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
  5149. static int netdev_adjacent_sysfs_add(struct net_device *dev,
  5150. struct net_device *adj_dev,
  5151. struct list_head *dev_list)
  5152. {
  5153. char linkname[IFNAMSIZ+7];
  5154. sprintf(linkname, dev_list == &dev->adj_list.upper ?
  5155. "upper_%s" : "lower_%s", adj_dev->name);
  5156. return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
  5157. linkname);
  5158. }
  5159. static void netdev_adjacent_sysfs_del(struct net_device *dev,
  5160. char *name,
  5161. struct list_head *dev_list)
  5162. {
  5163. char linkname[IFNAMSIZ+7];
  5164. sprintf(linkname, dev_list == &dev->adj_list.upper ?
  5165. "upper_%s" : "lower_%s", name);
  5166. sysfs_remove_link(&(dev->dev.kobj), linkname);
  5167. }
  5168. static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
  5169. struct net_device *adj_dev,
  5170. struct list_head *dev_list)
  5171. {
  5172. return (dev_list == &dev->adj_list.upper ||
  5173. dev_list == &dev->adj_list.lower) &&
  5174. net_eq(dev_net(dev), dev_net(adj_dev));
  5175. }
  5176. static int __netdev_adjacent_dev_insert(struct net_device *dev,
  5177. struct net_device *adj_dev,
  5178. struct list_head *dev_list,
  5179. void *private, bool master)
  5180. {
  5181. struct netdev_adjacent *adj;
  5182. int ret;
  5183. adj = __netdev_find_adj(adj_dev, dev_list);
  5184. if (adj) {
  5185. adj->ref_nr += 1;
  5186. pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
  5187. dev->name, adj_dev->name, adj->ref_nr);
  5188. return 0;
  5189. }
  5190. adj = kmalloc(sizeof(*adj), GFP_KERNEL);
  5191. if (!adj)
  5192. return -ENOMEM;
  5193. adj->dev = adj_dev;
  5194. adj->master = master;
  5195. adj->ref_nr = 1;
  5196. adj->private = private;
  5197. dev_hold(adj_dev);
  5198. pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
  5199. dev->name, adj_dev->name, adj->ref_nr, adj_dev->name);
  5200. if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
  5201. ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
  5202. if (ret)
  5203. goto free_adj;
  5204. }
  5205. /* Ensure that master link is always the first item in list. */
  5206. if (master) {
  5207. ret = sysfs_create_link(&(dev->dev.kobj),
  5208. &(adj_dev->dev.kobj), "master");
  5209. if (ret)
  5210. goto remove_symlinks;
  5211. list_add_rcu(&adj->list, dev_list);
  5212. } else {
  5213. list_add_tail_rcu(&adj->list, dev_list);
  5214. }
  5215. return 0;
  5216. remove_symlinks:
  5217. if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
  5218. netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
  5219. free_adj:
  5220. kfree(adj);
  5221. dev_put(adj_dev);
  5222. return ret;
  5223. }
  5224. static void __netdev_adjacent_dev_remove(struct net_device *dev,
  5225. struct net_device *adj_dev,
  5226. u16 ref_nr,
  5227. struct list_head *dev_list)
  5228. {
  5229. struct netdev_adjacent *adj;
  5230. pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
  5231. dev->name, adj_dev->name, ref_nr);
  5232. adj = __netdev_find_adj(adj_dev, dev_list);
  5233. if (!adj) {
  5234. pr_err("Adjacency does not exist for device %s from %s\n",
  5235. dev->name, adj_dev->name);
  5236. WARN_ON(1);
  5237. return;
  5238. }
  5239. if (adj->ref_nr > ref_nr) {
  5240. pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
  5241. dev->name, adj_dev->name, ref_nr,
  5242. adj->ref_nr - ref_nr);
  5243. adj->ref_nr -= ref_nr;
  5244. return;
  5245. }
  5246. if (adj->master)
  5247. sysfs_remove_link(&(dev->dev.kobj), "master");
  5248. if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
  5249. netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
  5250. list_del_rcu(&adj->list);
  5251. pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
  5252. adj_dev->name, dev->name, adj_dev->name);
  5253. dev_put(adj_dev);
  5254. kfree_rcu(adj, rcu);
  5255. }
  5256. static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
  5257. struct net_device *upper_dev,
  5258. struct list_head *up_list,
  5259. struct list_head *down_list,
  5260. void *private, bool master)
  5261. {
  5262. int ret;
  5263. ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list,
  5264. private, master);
  5265. if (ret)
  5266. return ret;
  5267. ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list,
  5268. private, false);
  5269. if (ret) {
  5270. __netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list);
  5271. return ret;
  5272. }
  5273. return 0;
  5274. }
  5275. static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
  5276. struct net_device *upper_dev,
  5277. u16 ref_nr,
  5278. struct list_head *up_list,
  5279. struct list_head *down_list)
  5280. {
  5281. __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
  5282. __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
  5283. }
  5284. static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
  5285. struct net_device *upper_dev,
  5286. void *private, bool master)
  5287. {
  5288. return __netdev_adjacent_dev_link_lists(dev, upper_dev,
  5289. &dev->adj_list.upper,
  5290. &upper_dev->adj_list.lower,
  5291. private, master);
  5292. }
  5293. static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
  5294. struct net_device *upper_dev)
  5295. {
  5296. __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
  5297. &dev->adj_list.upper,
  5298. &upper_dev->adj_list.lower);
  5299. }
  5300. static int __netdev_upper_dev_link(struct net_device *dev,
  5301. struct net_device *upper_dev, bool master,
  5302. void *upper_priv, void *upper_info,
  5303. struct netlink_ext_ack *extack)
  5304. {
  5305. struct netdev_notifier_changeupper_info changeupper_info = {
  5306. .info = {
  5307. .dev = dev,
  5308. .extack = extack,
  5309. },
  5310. .upper_dev = upper_dev,
  5311. .master = master,
  5312. .linking = true,
  5313. .upper_info = upper_info,
  5314. };
  5315. int ret = 0;
  5316. ASSERT_RTNL();
  5317. if (dev == upper_dev)
  5318. return -EBUSY;
  5319. /* To prevent loops, check if dev is not upper device to upper_dev. */
  5320. if (netdev_has_upper_dev(upper_dev, dev))
  5321. return -EBUSY;
  5322. if (netdev_has_upper_dev(dev, upper_dev))
  5323. return -EEXIST;
  5324. if (master && netdev_master_upper_dev_get(dev))
  5325. return -EBUSY;
  5326. ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
  5327. &changeupper_info.info);
  5328. ret = notifier_to_errno(ret);
  5329. if (ret)
  5330. return ret;
  5331. ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
  5332. master);
  5333. if (ret)
  5334. return ret;
  5335. ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
  5336. &changeupper_info.info);
  5337. ret = notifier_to_errno(ret);
  5338. if (ret)
  5339. goto rollback;
  5340. return 0;
  5341. rollback:
  5342. __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
  5343. return ret;
  5344. }
  5345. /**
  5346. * netdev_upper_dev_link - Add a link to the upper device
  5347. * @dev: device
  5348. * @upper_dev: new upper device
  5349. *
  5350. * Adds a link to device which is upper to this one. The caller must hold
  5351. * the RTNL lock. On a failure a negative errno code is returned.
  5352. * On success the reference counts are adjusted and the function
  5353. * returns zero.
  5354. */
  5355. int netdev_upper_dev_link(struct net_device *dev,
  5356. struct net_device *upper_dev,
  5357. struct netlink_ext_ack *extack)
  5358. {
  5359. return __netdev_upper_dev_link(dev, upper_dev, false,
  5360. NULL, NULL, extack);
  5361. }
  5362. EXPORT_SYMBOL(netdev_upper_dev_link);
  5363. /**
  5364. * netdev_master_upper_dev_link - Add a master link to the upper device
  5365. * @dev: device
  5366. * @upper_dev: new upper device
  5367. * @upper_priv: upper device private
  5368. * @upper_info: upper info to be passed down via notifier
  5369. *
  5370. * Adds a link to device which is upper to this one. In this case, only
  5371. * one master upper device can be linked, although other non-master devices
  5372. * might be linked as well. The caller must hold the RTNL lock.
  5373. * On a failure a negative errno code is returned. On success the reference
  5374. * counts are adjusted and the function returns zero.
  5375. */
  5376. int netdev_master_upper_dev_link(struct net_device *dev,
  5377. struct net_device *upper_dev,
  5378. void *upper_priv, void *upper_info,
  5379. struct netlink_ext_ack *extack)
  5380. {
  5381. return __netdev_upper_dev_link(dev, upper_dev, true,
  5382. upper_priv, upper_info, extack);
  5383. }
  5384. EXPORT_SYMBOL(netdev_master_upper_dev_link);
  5385. /**
  5386. * netdev_upper_dev_unlink - Removes a link to upper device
  5387. * @dev: device
  5388. * @upper_dev: new upper device
  5389. *
  5390. * Removes a link to device which is upper to this one. The caller must hold
  5391. * the RTNL lock.
  5392. */
  5393. void netdev_upper_dev_unlink(struct net_device *dev,
  5394. struct net_device *upper_dev)
  5395. {
  5396. struct netdev_notifier_changeupper_info changeupper_info = {
  5397. .info = {
  5398. .dev = dev,
  5399. },
  5400. .upper_dev = upper_dev,
  5401. .linking = false,
  5402. };
  5403. ASSERT_RTNL();
  5404. changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
  5405. call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
  5406. &changeupper_info.info);
  5407. __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
  5408. call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
  5409. &changeupper_info.info);
  5410. }
  5411. EXPORT_SYMBOL(netdev_upper_dev_unlink);
  5412. /**
  5413. * netdev_bonding_info_change - Dispatch event about slave change
  5414. * @dev: device
  5415. * @bonding_info: info to dispatch
  5416. *
  5417. * Send NETDEV_BONDING_INFO to netdev notifiers with info.
  5418. * The caller must hold the RTNL lock.
  5419. */
  5420. void netdev_bonding_info_change(struct net_device *dev,
  5421. struct netdev_bonding_info *bonding_info)
  5422. {
  5423. struct netdev_notifier_bonding_info info = {
  5424. .info.dev = dev,
  5425. };
  5426. memcpy(&info.bonding_info, bonding_info,
  5427. sizeof(struct netdev_bonding_info));
  5428. call_netdevice_notifiers_info(NETDEV_BONDING_INFO,
  5429. &info.info);
  5430. }
  5431. EXPORT_SYMBOL(netdev_bonding_info_change);
  5432. static void netdev_adjacent_add_links(struct net_device *dev)
  5433. {
  5434. struct netdev_adjacent *iter;
  5435. struct net *net = dev_net(dev);
  5436. list_for_each_entry(iter, &dev->adj_list.upper, list) {
  5437. if (!net_eq(net, dev_net(iter->dev)))
  5438. continue;
  5439. netdev_adjacent_sysfs_add(iter->dev, dev,
  5440. &iter->dev->adj_list.lower);
  5441. netdev_adjacent_sysfs_add(dev, iter->dev,
  5442. &dev->adj_list.upper);
  5443. }
  5444. list_for_each_entry(iter, &dev->adj_list.lower, list) {
  5445. if (!net_eq(net, dev_net(iter->dev)))
  5446. continue;
  5447. netdev_adjacent_sysfs_add(iter->dev, dev,
  5448. &iter->dev->adj_list.upper);
  5449. netdev_adjacent_sysfs_add(dev, iter->dev,
  5450. &dev->adj_list.lower);
  5451. }
  5452. }
  5453. static void netdev_adjacent_del_links(struct net_device *dev)
  5454. {
  5455. struct netdev_adjacent *iter;
  5456. struct net *net = dev_net(dev);
  5457. list_for_each_entry(iter, &dev->adj_list.upper, list) {
  5458. if (!net_eq(net, dev_net(iter->dev)))
  5459. continue;
  5460. netdev_adjacent_sysfs_del(iter->dev, dev->name,
  5461. &iter->dev->adj_list.lower);
  5462. netdev_adjacent_sysfs_del(dev, iter->dev->name,
  5463. &dev->adj_list.upper);
  5464. }
  5465. list_for_each_entry(iter, &dev->adj_list.lower, list) {
  5466. if (!net_eq(net, dev_net(iter->dev)))
  5467. continue;
  5468. netdev_adjacent_sysfs_del(iter->dev, dev->name,
  5469. &iter->dev->adj_list.upper);
  5470. netdev_adjacent_sysfs_del(dev, iter->dev->name,
  5471. &dev->adj_list.lower);
  5472. }
  5473. }
  5474. void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
  5475. {
  5476. struct netdev_adjacent *iter;
  5477. struct net *net = dev_net(dev);
  5478. list_for_each_entry(iter, &dev->adj_list.upper, list) {
  5479. if (!net_eq(net, dev_net(iter->dev)))
  5480. continue;
  5481. netdev_adjacent_sysfs_del(iter->dev, oldname,
  5482. &iter->dev->adj_list.lower);
  5483. netdev_adjacent_sysfs_add(iter->dev, dev,
  5484. &iter->dev->adj_list.lower);
  5485. }
  5486. list_for_each_entry(iter, &dev->adj_list.lower, list) {
  5487. if (!net_eq(net, dev_net(iter->dev)))
  5488. continue;
  5489. netdev_adjacent_sysfs_del(iter->dev, oldname,
  5490. &iter->dev->adj_list.upper);
  5491. netdev_adjacent_sysfs_add(iter->dev, dev,
  5492. &iter->dev->adj_list.upper);
  5493. }
  5494. }
  5495. void *netdev_lower_dev_get_private(struct net_device *dev,
  5496. struct net_device *lower_dev)
  5497. {
  5498. struct netdev_adjacent *lower;
  5499. if (!lower_dev)
  5500. return NULL;
  5501. lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
  5502. if (!lower)
  5503. return NULL;
  5504. return lower->private;
  5505. }
  5506. EXPORT_SYMBOL(netdev_lower_dev_get_private);
  5507. int dev_get_nest_level(struct net_device *dev)
  5508. {
  5509. struct net_device *lower = NULL;
  5510. struct list_head *iter;
  5511. int max_nest = -1;
  5512. int nest;
  5513. ASSERT_RTNL();
  5514. netdev_for_each_lower_dev(dev, lower, iter) {
  5515. nest = dev_get_nest_level(lower);
  5516. if (max_nest < nest)
  5517. max_nest = nest;
  5518. }
  5519. return max_nest + 1;
  5520. }
  5521. EXPORT_SYMBOL(dev_get_nest_level);
  5522. /**
  5523. * netdev_lower_change - Dispatch event about lower device state change
  5524. * @lower_dev: device
  5525. * @lower_state_info: state to dispatch
  5526. *
  5527. * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
  5528. * The caller must hold the RTNL lock.
  5529. */
  5530. void netdev_lower_state_changed(struct net_device *lower_dev,
  5531. void *lower_state_info)
  5532. {
  5533. struct netdev_notifier_changelowerstate_info changelowerstate_info = {
  5534. .info.dev = lower_dev,
  5535. };
  5536. ASSERT_RTNL();
  5537. changelowerstate_info.lower_state_info = lower_state_info;
  5538. call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE,
  5539. &changelowerstate_info.info);
  5540. }
  5541. EXPORT_SYMBOL(netdev_lower_state_changed);
  5542. static void dev_change_rx_flags(struct net_device *dev, int flags)
  5543. {
  5544. const struct net_device_ops *ops = dev->netdev_ops;
  5545. if (ops->ndo_change_rx_flags)
  5546. ops->ndo_change_rx_flags(dev, flags);
  5547. }
  5548. static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
  5549. {
  5550. unsigned int old_flags = dev->flags;
  5551. kuid_t uid;
  5552. kgid_t gid;
  5553. ASSERT_RTNL();
  5554. dev->flags |= IFF_PROMISC;
  5555. dev->promiscuity += inc;
  5556. if (dev->promiscuity == 0) {
  5557. /*
  5558. * Avoid overflow.
  5559. * If inc causes overflow, untouch promisc and return error.
  5560. */
  5561. if (inc < 0)
  5562. dev->flags &= ~IFF_PROMISC;
  5563. else {
  5564. dev->promiscuity -= inc;
  5565. pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
  5566. dev->name);
  5567. return -EOVERFLOW;
  5568. }
  5569. }
  5570. if (dev->flags != old_flags) {
  5571. pr_info("device %s %s promiscuous mode\n",
  5572. dev->name,
  5573. dev->flags & IFF_PROMISC ? "entered" : "left");
  5574. if (audit_enabled) {
  5575. current_uid_gid(&uid, &gid);
  5576. audit_log(current->audit_context, GFP_ATOMIC,
  5577. AUDIT_ANOM_PROMISCUOUS,
  5578. "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
  5579. dev->name, (dev->flags & IFF_PROMISC),
  5580. (old_flags & IFF_PROMISC),
  5581. from_kuid(&init_user_ns, audit_get_loginuid(current)),
  5582. from_kuid(&init_user_ns, uid),
  5583. from_kgid(&init_user_ns, gid),
  5584. audit_get_sessionid(current));
  5585. }
  5586. dev_change_rx_flags(dev, IFF_PROMISC);
  5587. }
  5588. if (notify)
  5589. __dev_notify_flags(dev, old_flags, IFF_PROMISC);
  5590. return 0;
  5591. }
  5592. /**
  5593. * dev_set_promiscuity - update promiscuity count on a device
  5594. * @dev: device
  5595. * @inc: modifier
  5596. *
  5597. * Add or remove promiscuity from a device. While the count in the device
  5598. * remains above zero the interface remains promiscuous. Once it hits zero
  5599. * the device reverts back to normal filtering operation. A negative inc
  5600. * value is used to drop promiscuity on the device.
  5601. * Return 0 if successful or a negative errno code on error.
  5602. */
  5603. int dev_set_promiscuity(struct net_device *dev, int inc)
  5604. {
  5605. unsigned int old_flags = dev->flags;
  5606. int err;
  5607. err = __dev_set_promiscuity(dev, inc, true);
  5608. if (err < 0)
  5609. return err;
  5610. if (dev->flags != old_flags)
  5611. dev_set_rx_mode(dev);
  5612. return err;
  5613. }
  5614. EXPORT_SYMBOL(dev_set_promiscuity);
  5615. static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
  5616. {
  5617. unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
  5618. ASSERT_RTNL();
  5619. dev->flags |= IFF_ALLMULTI;
  5620. dev->allmulti += inc;
  5621. if (dev->allmulti == 0) {
  5622. /*
  5623. * Avoid overflow.
  5624. * If inc causes overflow, untouch allmulti and return error.
  5625. */
  5626. if (inc < 0)
  5627. dev->flags &= ~IFF_ALLMULTI;
  5628. else {
  5629. dev->allmulti -= inc;
  5630. pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
  5631. dev->name);
  5632. return -EOVERFLOW;
  5633. }
  5634. }
  5635. if (dev->flags ^ old_flags) {
  5636. dev_change_rx_flags(dev, IFF_ALLMULTI);
  5637. dev_set_rx_mode(dev);
  5638. if (notify)
  5639. __dev_notify_flags(dev, old_flags,
  5640. dev->gflags ^ old_gflags);
  5641. }
  5642. return 0;
  5643. }
  5644. /**
  5645. * dev_set_allmulti - update allmulti count on a device
  5646. * @dev: device
  5647. * @inc: modifier
  5648. *
  5649. * Add or remove reception of all multicast frames to a device. While the
  5650. * count in the device remains above zero the interface remains listening
  5651. * to all interfaces. Once it hits zero the device reverts back to normal
  5652. * filtering operation. A negative @inc value is used to drop the counter
  5653. * when releasing a resource needing all multicasts.
  5654. * Return 0 if successful or a negative errno code on error.
  5655. */
  5656. int dev_set_allmulti(struct net_device *dev, int inc)
  5657. {
  5658. return __dev_set_allmulti(dev, inc, true);
  5659. }
  5660. EXPORT_SYMBOL(dev_set_allmulti);
  5661. /*
  5662. * Upload unicast and multicast address lists to device and
  5663. * configure RX filtering. When the device doesn't support unicast
  5664. * filtering it is put in promiscuous mode while unicast addresses
  5665. * are present.
  5666. */
  5667. void __dev_set_rx_mode(struct net_device *dev)
  5668. {
  5669. const struct net_device_ops *ops = dev->netdev_ops;
  5670. /* dev_open will call this function so the list will stay sane. */
  5671. if (!(dev->flags&IFF_UP))
  5672. return;
  5673. if (!netif_device_present(dev))
  5674. return;
  5675. if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
  5676. /* Unicast addresses changes may only happen under the rtnl,
  5677. * therefore calling __dev_set_promiscuity here is safe.
  5678. */
  5679. if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
  5680. __dev_set_promiscuity(dev, 1, false);
  5681. dev->uc_promisc = true;
  5682. } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
  5683. __dev_set_promiscuity(dev, -1, false);
  5684. dev->uc_promisc = false;
  5685. }
  5686. }
  5687. if (ops->ndo_set_rx_mode)
  5688. ops->ndo_set_rx_mode(dev);
  5689. }
  5690. void dev_set_rx_mode(struct net_device *dev)
  5691. {
  5692. netif_addr_lock_bh(dev);
  5693. __dev_set_rx_mode(dev);
  5694. netif_addr_unlock_bh(dev);
  5695. }
  5696. /**
  5697. * dev_get_flags - get flags reported to userspace
  5698. * @dev: device
  5699. *
  5700. * Get the combination of flag bits exported through APIs to userspace.
  5701. */
  5702. unsigned int dev_get_flags(const struct net_device *dev)
  5703. {
  5704. unsigned int flags;
  5705. flags = (dev->flags & ~(IFF_PROMISC |
  5706. IFF_ALLMULTI |
  5707. IFF_RUNNING |
  5708. IFF_LOWER_UP |
  5709. IFF_DORMANT)) |
  5710. (dev->gflags & (IFF_PROMISC |
  5711. IFF_ALLMULTI));
  5712. if (netif_running(dev)) {
  5713. if (netif_oper_up(dev))
  5714. flags |= IFF_RUNNING;
  5715. if (netif_carrier_ok(dev))
  5716. flags |= IFF_LOWER_UP;
  5717. if (netif_dormant(dev))
  5718. flags |= IFF_DORMANT;
  5719. }
  5720. return flags;
  5721. }
  5722. EXPORT_SYMBOL(dev_get_flags);
  5723. int __dev_change_flags(struct net_device *dev, unsigned int flags)
  5724. {
  5725. unsigned int old_flags = dev->flags;
  5726. int ret;
  5727. ASSERT_RTNL();
  5728. /*
  5729. * Set the flags on our device.
  5730. */
  5731. dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
  5732. IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
  5733. IFF_AUTOMEDIA)) |
  5734. (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
  5735. IFF_ALLMULTI));
  5736. /*
  5737. * Load in the correct multicast list now the flags have changed.
  5738. */
  5739. if ((old_flags ^ flags) & IFF_MULTICAST)
  5740. dev_change_rx_flags(dev, IFF_MULTICAST);
  5741. dev_set_rx_mode(dev);
  5742. /*
  5743. * Have we downed the interface. We handle IFF_UP ourselves
  5744. * according to user attempts to set it, rather than blindly
  5745. * setting it.
  5746. */
  5747. ret = 0;
  5748. if ((old_flags ^ flags) & IFF_UP) {
  5749. if (old_flags & IFF_UP)
  5750. __dev_close(dev);
  5751. else
  5752. ret = __dev_open(dev);
  5753. }
  5754. if ((flags ^ dev->gflags) & IFF_PROMISC) {
  5755. int inc = (flags & IFF_PROMISC) ? 1 : -1;
  5756. unsigned int old_flags = dev->flags;
  5757. dev->gflags ^= IFF_PROMISC;
  5758. if (__dev_set_promiscuity(dev, inc, false) >= 0)
  5759. if (dev->flags != old_flags)
  5760. dev_set_rx_mode(dev);
  5761. }
  5762. /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
  5763. * is important. Some (broken) drivers set IFF_PROMISC, when
  5764. * IFF_ALLMULTI is requested not asking us and not reporting.
  5765. */
  5766. if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
  5767. int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
  5768. dev->gflags ^= IFF_ALLMULTI;
  5769. __dev_set_allmulti(dev, inc, false);
  5770. }
  5771. return ret;
  5772. }
  5773. void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
  5774. unsigned int gchanges)
  5775. {
  5776. unsigned int changes = dev->flags ^ old_flags;
  5777. if (gchanges)
  5778. rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
  5779. if (changes & IFF_UP) {
  5780. if (dev->flags & IFF_UP)
  5781. call_netdevice_notifiers(NETDEV_UP, dev);
  5782. else
  5783. call_netdevice_notifiers(NETDEV_DOWN, dev);
  5784. }
  5785. if (dev->flags & IFF_UP &&
  5786. (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
  5787. struct netdev_notifier_change_info change_info = {
  5788. .info = {
  5789. .dev = dev,
  5790. },
  5791. .flags_changed = changes,
  5792. };
  5793. call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info);
  5794. }
  5795. }
  5796. /**
  5797. * dev_change_flags - change device settings
  5798. * @dev: device
  5799. * @flags: device state flags
  5800. *
  5801. * Change settings on device based state flags. The flags are
  5802. * in the userspace exported format.
  5803. */
  5804. int dev_change_flags(struct net_device *dev, unsigned int flags)
  5805. {
  5806. int ret;
  5807. unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
  5808. ret = __dev_change_flags(dev, flags);
  5809. if (ret < 0)
  5810. return ret;
  5811. changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
  5812. __dev_notify_flags(dev, old_flags, changes);
  5813. return ret;
  5814. }
  5815. EXPORT_SYMBOL(dev_change_flags);
  5816. int __dev_set_mtu(struct net_device *dev, int new_mtu)
  5817. {
  5818. const struct net_device_ops *ops = dev->netdev_ops;
  5819. if (ops->ndo_change_mtu)
  5820. return ops->ndo_change_mtu(dev, new_mtu);
  5821. dev->mtu = new_mtu;
  5822. return 0;
  5823. }
  5824. EXPORT_SYMBOL(__dev_set_mtu);
  5825. /**
  5826. * dev_set_mtu - Change maximum transfer unit
  5827. * @dev: device
  5828. * @new_mtu: new transfer unit
  5829. *
  5830. * Change the maximum transfer size of the network device.
  5831. */
  5832. int dev_set_mtu(struct net_device *dev, int new_mtu)
  5833. {
  5834. int err, orig_mtu;
  5835. if (new_mtu == dev->mtu)
  5836. return 0;
  5837. /* MTU must be positive, and in range */
  5838. if (new_mtu < 0 || new_mtu < dev->min_mtu) {
  5839. net_err_ratelimited("%s: Invalid MTU %d requested, hw min %d\n",
  5840. dev->name, new_mtu, dev->min_mtu);
  5841. return -EINVAL;
  5842. }
  5843. if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
  5844. net_err_ratelimited("%s: Invalid MTU %d requested, hw max %d\n",
  5845. dev->name, new_mtu, dev->max_mtu);
  5846. return -EINVAL;
  5847. }
  5848. if (!netif_device_present(dev))
  5849. return -ENODEV;
  5850. err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
  5851. err = notifier_to_errno(err);
  5852. if (err)
  5853. return err;
  5854. orig_mtu = dev->mtu;
  5855. err = __dev_set_mtu(dev, new_mtu);
  5856. if (!err) {
  5857. err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
  5858. err = notifier_to_errno(err);
  5859. if (err) {
  5860. /* setting mtu back and notifying everyone again,
  5861. * so that they have a chance to revert changes.
  5862. */
  5863. __dev_set_mtu(dev, orig_mtu);
  5864. call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
  5865. }
  5866. }
  5867. return err;
  5868. }
  5869. EXPORT_SYMBOL(dev_set_mtu);
  5870. /**
  5871. * dev_set_group - Change group this device belongs to
  5872. * @dev: device
  5873. * @new_group: group this device should belong to
  5874. */
  5875. void dev_set_group(struct net_device *dev, int new_group)
  5876. {
  5877. dev->group = new_group;
  5878. }
  5879. EXPORT_SYMBOL(dev_set_group);
  5880. /**
  5881. * dev_set_mac_address - Change Media Access Control Address
  5882. * @dev: device
  5883. * @sa: new address
  5884. *
  5885. * Change the hardware (MAC) address of the device
  5886. */
  5887. int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
  5888. {
  5889. const struct net_device_ops *ops = dev->netdev_ops;
  5890. int err;
  5891. if (!ops->ndo_set_mac_address)
  5892. return -EOPNOTSUPP;
  5893. if (sa->sa_family != dev->type)
  5894. return -EINVAL;
  5895. if (!netif_device_present(dev))
  5896. return -ENODEV;
  5897. err = ops->ndo_set_mac_address(dev, sa);
  5898. if (err)
  5899. return err;
  5900. dev->addr_assign_type = NET_ADDR_SET;
  5901. call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
  5902. add_device_randomness(dev->dev_addr, dev->addr_len);
  5903. return 0;
  5904. }
  5905. EXPORT_SYMBOL(dev_set_mac_address);
  5906. /**
  5907. * dev_change_carrier - Change device carrier
  5908. * @dev: device
  5909. * @new_carrier: new value
  5910. *
  5911. * Change device carrier
  5912. */
  5913. int dev_change_carrier(struct net_device *dev, bool new_carrier)
  5914. {
  5915. const struct net_device_ops *ops = dev->netdev_ops;
  5916. if (!ops->ndo_change_carrier)
  5917. return -EOPNOTSUPP;
  5918. if (!netif_device_present(dev))
  5919. return -ENODEV;
  5920. return ops->ndo_change_carrier(dev, new_carrier);
  5921. }
  5922. EXPORT_SYMBOL(dev_change_carrier);
  5923. /**
  5924. * dev_get_phys_port_id - Get device physical port ID
  5925. * @dev: device
  5926. * @ppid: port ID
  5927. *
  5928. * Get device physical port ID
  5929. */
  5930. int dev_get_phys_port_id(struct net_device *dev,
  5931. struct netdev_phys_item_id *ppid)
  5932. {
  5933. const struct net_device_ops *ops = dev->netdev_ops;
  5934. if (!ops->ndo_get_phys_port_id)
  5935. return -EOPNOTSUPP;
  5936. return ops->ndo_get_phys_port_id(dev, ppid);
  5937. }
  5938. EXPORT_SYMBOL(dev_get_phys_port_id);
  5939. /**
  5940. * dev_get_phys_port_name - Get device physical port name
  5941. * @dev: device
  5942. * @name: port name
  5943. * @len: limit of bytes to copy to name
  5944. *
  5945. * Get device physical port name
  5946. */
  5947. int dev_get_phys_port_name(struct net_device *dev,
  5948. char *name, size_t len)
  5949. {
  5950. const struct net_device_ops *ops = dev->netdev_ops;
  5951. if (!ops->ndo_get_phys_port_name)
  5952. return -EOPNOTSUPP;
  5953. return ops->ndo_get_phys_port_name(dev, name, len);
  5954. }
  5955. EXPORT_SYMBOL(dev_get_phys_port_name);
  5956. /**
  5957. * dev_change_proto_down - update protocol port state information
  5958. * @dev: device
  5959. * @proto_down: new value
  5960. *
  5961. * This info can be used by switch drivers to set the phys state of the
  5962. * port.
  5963. */
  5964. int dev_change_proto_down(struct net_device *dev, bool proto_down)
  5965. {
  5966. const struct net_device_ops *ops = dev->netdev_ops;
  5967. if (!ops->ndo_change_proto_down)
  5968. return -EOPNOTSUPP;
  5969. if (!netif_device_present(dev))
  5970. return -ENODEV;
  5971. return ops->ndo_change_proto_down(dev, proto_down);
  5972. }
  5973. EXPORT_SYMBOL(dev_change_proto_down);
  5974. void __dev_xdp_query(struct net_device *dev, bpf_op_t bpf_op,
  5975. struct netdev_bpf *xdp)
  5976. {
  5977. memset(xdp, 0, sizeof(*xdp));
  5978. xdp->command = XDP_QUERY_PROG;
  5979. /* Query must always succeed. */
  5980. WARN_ON(bpf_op(dev, xdp) < 0);
  5981. }
  5982. static u8 __dev_xdp_attached(struct net_device *dev, bpf_op_t bpf_op)
  5983. {
  5984. struct netdev_bpf xdp;
  5985. __dev_xdp_query(dev, bpf_op, &xdp);
  5986. return xdp.prog_attached;
  5987. }
  5988. static int dev_xdp_install(struct net_device *dev, bpf_op_t bpf_op,
  5989. struct netlink_ext_ack *extack, u32 flags,
  5990. struct bpf_prog *prog)
  5991. {
  5992. struct netdev_bpf xdp;
  5993. memset(&xdp, 0, sizeof(xdp));
  5994. if (flags & XDP_FLAGS_HW_MODE)
  5995. xdp.command = XDP_SETUP_PROG_HW;
  5996. else
  5997. xdp.command = XDP_SETUP_PROG;
  5998. xdp.extack = extack;
  5999. xdp.flags = flags;
  6000. xdp.prog = prog;
  6001. return bpf_op(dev, &xdp);
  6002. }
  6003. /**
  6004. * dev_change_xdp_fd - set or clear a bpf program for a device rx path
  6005. * @dev: device
  6006. * @extack: netlink extended ack
  6007. * @fd: new program fd or negative value to clear
  6008. * @flags: xdp-related flags
  6009. *
  6010. * Set or clear a bpf program for a device
  6011. */
  6012. int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
  6013. int fd, u32 flags)
  6014. {
  6015. const struct net_device_ops *ops = dev->netdev_ops;
  6016. struct bpf_prog *prog = NULL;
  6017. bpf_op_t bpf_op, bpf_chk;
  6018. int err;
  6019. ASSERT_RTNL();
  6020. bpf_op = bpf_chk = ops->ndo_bpf;
  6021. if (!bpf_op && (flags & (XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE)))
  6022. return -EOPNOTSUPP;
  6023. if (!bpf_op || (flags & XDP_FLAGS_SKB_MODE))
  6024. bpf_op = generic_xdp_install;
  6025. if (bpf_op == bpf_chk)
  6026. bpf_chk = generic_xdp_install;
  6027. if (fd >= 0) {
  6028. if (bpf_chk && __dev_xdp_attached(dev, bpf_chk))
  6029. return -EEXIST;
  6030. if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) &&
  6031. __dev_xdp_attached(dev, bpf_op))
  6032. return -EBUSY;
  6033. prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
  6034. bpf_op == ops->ndo_bpf);
  6035. if (IS_ERR(prog))
  6036. return PTR_ERR(prog);
  6037. if (!(flags & XDP_FLAGS_HW_MODE) &&
  6038. bpf_prog_is_dev_bound(prog->aux)) {
  6039. NL_SET_ERR_MSG(extack, "using device-bound program without HW_MODE flag is not supported");
  6040. bpf_prog_put(prog);
  6041. return -EINVAL;
  6042. }
  6043. }
  6044. err = dev_xdp_install(dev, bpf_op, extack, flags, prog);
  6045. if (err < 0 && prog)
  6046. bpf_prog_put(prog);
  6047. return err;
  6048. }
  6049. /**
  6050. * dev_new_index - allocate an ifindex
  6051. * @net: the applicable net namespace
  6052. *
  6053. * Returns a suitable unique value for a new device interface
  6054. * number. The caller must hold the rtnl semaphore or the
  6055. * dev_base_lock to be sure it remains unique.
  6056. */
  6057. static int dev_new_index(struct net *net)
  6058. {
  6059. int ifindex = net->ifindex;
  6060. for (;;) {
  6061. if (++ifindex <= 0)
  6062. ifindex = 1;
  6063. if (!__dev_get_by_index(net, ifindex))
  6064. return net->ifindex = ifindex;
  6065. }
  6066. }
  6067. /* Delayed registration/unregisteration */
  6068. static LIST_HEAD(net_todo_list);
  6069. DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
  6070. static void net_set_todo(struct net_device *dev)
  6071. {
  6072. list_add_tail(&dev->todo_list, &net_todo_list);
  6073. dev_net(dev)->dev_unreg_count++;
  6074. }
  6075. static void rollback_registered_many(struct list_head *head)
  6076. {
  6077. struct net_device *dev, *tmp;
  6078. LIST_HEAD(close_head);
  6079. BUG_ON(dev_boot_phase);
  6080. ASSERT_RTNL();
  6081. list_for_each_entry_safe(dev, tmp, head, unreg_list) {
  6082. /* Some devices call without registering
  6083. * for initialization unwind. Remove those
  6084. * devices and proceed with the remaining.
  6085. */
  6086. if (dev->reg_state == NETREG_UNINITIALIZED) {
  6087. pr_debug("unregister_netdevice: device %s/%p never was registered\n",
  6088. dev->name, dev);
  6089. WARN_ON(1);
  6090. list_del(&dev->unreg_list);
  6091. continue;
  6092. }
  6093. dev->dismantle = true;
  6094. BUG_ON(dev->reg_state != NETREG_REGISTERED);
  6095. }
  6096. /* If device is running, close it first. */
  6097. list_for_each_entry(dev, head, unreg_list)
  6098. list_add_tail(&dev->close_list, &close_head);
  6099. dev_close_many(&close_head, true);
  6100. list_for_each_entry(dev, head, unreg_list) {
  6101. /* And unlink it from device chain. */
  6102. unlist_netdevice(dev);
  6103. dev->reg_state = NETREG_UNREGISTERING;
  6104. }
  6105. flush_all_backlogs();
  6106. synchronize_net();
  6107. list_for_each_entry(dev, head, unreg_list) {
  6108. struct sk_buff *skb = NULL;
  6109. /* Shutdown queueing discipline. */
  6110. dev_shutdown(dev);
  6111. /* Notify protocols, that we are about to destroy
  6112. * this device. They should clean all the things.
  6113. */
  6114. call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
  6115. if (!dev->rtnl_link_ops ||
  6116. dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
  6117. skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
  6118. GFP_KERNEL, NULL);
  6119. /*
  6120. * Flush the unicast and multicast chains
  6121. */
  6122. dev_uc_flush(dev);
  6123. dev_mc_flush(dev);
  6124. if (dev->netdev_ops->ndo_uninit)
  6125. dev->netdev_ops->ndo_uninit(dev);
  6126. if (skb)
  6127. rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
  6128. /* Notifier chain MUST detach us all upper devices. */
  6129. WARN_ON(netdev_has_any_upper_dev(dev));
  6130. WARN_ON(netdev_has_any_lower_dev(dev));
  6131. /* Remove entries from kobject tree */
  6132. netdev_unregister_kobject(dev);
  6133. #ifdef CONFIG_XPS
  6134. /* Remove XPS queueing entries */
  6135. netif_reset_xps_queues_gt(dev, 0);
  6136. #endif
  6137. }
  6138. synchronize_net();
  6139. list_for_each_entry(dev, head, unreg_list)
  6140. dev_put(dev);
  6141. }
  6142. static void rollback_registered(struct net_device *dev)
  6143. {
  6144. LIST_HEAD(single);
  6145. list_add(&dev->unreg_list, &single);
  6146. rollback_registered_many(&single);
  6147. list_del(&single);
  6148. }
  6149. static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
  6150. struct net_device *upper, netdev_features_t features)
  6151. {
  6152. netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
  6153. netdev_features_t feature;
  6154. int feature_bit;
  6155. for_each_netdev_feature(&upper_disables, feature_bit) {
  6156. feature = __NETIF_F_BIT(feature_bit);
  6157. if (!(upper->wanted_features & feature)
  6158. && (features & feature)) {
  6159. netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
  6160. &feature, upper->name);
  6161. features &= ~feature;
  6162. }
  6163. }
  6164. return features;
  6165. }
  6166. static void netdev_sync_lower_features(struct net_device *upper,
  6167. struct net_device *lower, netdev_features_t features)
  6168. {
  6169. netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
  6170. netdev_features_t feature;
  6171. int feature_bit;
  6172. for_each_netdev_feature(&upper_disables, feature_bit) {
  6173. feature = __NETIF_F_BIT(feature_bit);
  6174. if (!(features & feature) && (lower->features & feature)) {
  6175. netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
  6176. &feature, lower->name);
  6177. lower->wanted_features &= ~feature;
  6178. netdev_update_features(lower);
  6179. if (unlikely(lower->features & feature))
  6180. netdev_WARN(upper, "failed to disable %pNF on %s!\n",
  6181. &feature, lower->name);
  6182. }
  6183. }
  6184. }
  6185. static netdev_features_t netdev_fix_features(struct net_device *dev,
  6186. netdev_features_t features)
  6187. {
  6188. /* Fix illegal checksum combinations */
  6189. if ((features & NETIF_F_HW_CSUM) &&
  6190. (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
  6191. netdev_warn(dev, "mixed HW and IP checksum settings.\n");
  6192. features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
  6193. }
  6194. /* TSO requires that SG is present as well. */
  6195. if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
  6196. netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
  6197. features &= ~NETIF_F_ALL_TSO;
  6198. }
  6199. if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
  6200. !(features & NETIF_F_IP_CSUM)) {
  6201. netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
  6202. features &= ~NETIF_F_TSO;
  6203. features &= ~NETIF_F_TSO_ECN;
  6204. }
  6205. if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
  6206. !(features & NETIF_F_IPV6_CSUM)) {
  6207. netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
  6208. features &= ~NETIF_F_TSO6;
  6209. }
  6210. /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
  6211. if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO))
  6212. features &= ~NETIF_F_TSO_MANGLEID;
  6213. /* TSO ECN requires that TSO is present as well. */
  6214. if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
  6215. features &= ~NETIF_F_TSO_ECN;
  6216. /* Software GSO depends on SG. */
  6217. if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
  6218. netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
  6219. features &= ~NETIF_F_GSO;
  6220. }
  6221. /* GSO partial features require GSO partial be set */
  6222. if ((features & dev->gso_partial_features) &&
  6223. !(features & NETIF_F_GSO_PARTIAL)) {
  6224. netdev_dbg(dev,
  6225. "Dropping partially supported GSO features since no GSO partial.\n");
  6226. features &= ~dev->gso_partial_features;
  6227. }
  6228. return features;
  6229. }
  6230. int __netdev_update_features(struct net_device *dev)
  6231. {
  6232. struct net_device *upper, *lower;
  6233. netdev_features_t features;
  6234. struct list_head *iter;
  6235. int err = -1;
  6236. ASSERT_RTNL();
  6237. features = netdev_get_wanted_features(dev);
  6238. if (dev->netdev_ops->ndo_fix_features)
  6239. features = dev->netdev_ops->ndo_fix_features(dev, features);
  6240. /* driver might be less strict about feature dependencies */
  6241. features = netdev_fix_features(dev, features);
  6242. /* some features can't be enabled if they're off an an upper device */
  6243. netdev_for_each_upper_dev_rcu(dev, upper, iter)
  6244. features = netdev_sync_upper_features(dev, upper, features);
  6245. if (dev->features == features)
  6246. goto sync_lower;
  6247. netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
  6248. &dev->features, &features);
  6249. if (dev->netdev_ops->ndo_set_features)
  6250. err = dev->netdev_ops->ndo_set_features(dev, features);
  6251. else
  6252. err = 0;
  6253. if (unlikely(err < 0)) {
  6254. netdev_err(dev,
  6255. "set_features() failed (%d); wanted %pNF, left %pNF\n",
  6256. err, &features, &dev->features);
  6257. /* return non-0 since some features might have changed and
  6258. * it's better to fire a spurious notification than miss it
  6259. */
  6260. return -1;
  6261. }
  6262. sync_lower:
  6263. /* some features must be disabled on lower devices when disabled
  6264. * on an upper device (think: bonding master or bridge)
  6265. */
  6266. netdev_for_each_lower_dev(dev, lower, iter)
  6267. netdev_sync_lower_features(dev, lower, features);
  6268. if (!err) {
  6269. netdev_features_t diff = features ^ dev->features;
  6270. if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) {
  6271. /* udp_tunnel_{get,drop}_rx_info both need
  6272. * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the
  6273. * device, or they won't do anything.
  6274. * Thus we need to update dev->features
  6275. * *before* calling udp_tunnel_get_rx_info,
  6276. * but *after* calling udp_tunnel_drop_rx_info.
  6277. */
  6278. if (features & NETIF_F_RX_UDP_TUNNEL_PORT) {
  6279. dev->features = features;
  6280. udp_tunnel_get_rx_info(dev);
  6281. } else {
  6282. udp_tunnel_drop_rx_info(dev);
  6283. }
  6284. }
  6285. dev->features = features;
  6286. }
  6287. return err < 0 ? 0 : 1;
  6288. }
  6289. /**
  6290. * netdev_update_features - recalculate device features
  6291. * @dev: the device to check
  6292. *
  6293. * Recalculate dev->features set and send notifications if it
  6294. * has changed. Should be called after driver or hardware dependent
  6295. * conditions might have changed that influence the features.
  6296. */
  6297. void netdev_update_features(struct net_device *dev)
  6298. {
  6299. if (__netdev_update_features(dev))
  6300. netdev_features_change(dev);
  6301. }
  6302. EXPORT_SYMBOL(netdev_update_features);
  6303. /**
  6304. * netdev_change_features - recalculate device features
  6305. * @dev: the device to check
  6306. *
  6307. * Recalculate dev->features set and send notifications even
  6308. * if they have not changed. Should be called instead of
  6309. * netdev_update_features() if also dev->vlan_features might
  6310. * have changed to allow the changes to be propagated to stacked
  6311. * VLAN devices.
  6312. */
  6313. void netdev_change_features(struct net_device *dev)
  6314. {
  6315. __netdev_update_features(dev);
  6316. netdev_features_change(dev);
  6317. }
  6318. EXPORT_SYMBOL(netdev_change_features);
  6319. /**
  6320. * netif_stacked_transfer_operstate - transfer operstate
  6321. * @rootdev: the root or lower level device to transfer state from
  6322. * @dev: the device to transfer operstate to
  6323. *
  6324. * Transfer operational state from root to device. This is normally
  6325. * called when a stacking relationship exists between the root
  6326. * device and the device(a leaf device).
  6327. */
  6328. void netif_stacked_transfer_operstate(const struct net_device *rootdev,
  6329. struct net_device *dev)
  6330. {
  6331. if (rootdev->operstate == IF_OPER_DORMANT)
  6332. netif_dormant_on(dev);
  6333. else
  6334. netif_dormant_off(dev);
  6335. if (netif_carrier_ok(rootdev))
  6336. netif_carrier_on(dev);
  6337. else
  6338. netif_carrier_off(dev);
  6339. }
  6340. EXPORT_SYMBOL(netif_stacked_transfer_operstate);
  6341. #ifdef CONFIG_SYSFS
  6342. static int netif_alloc_rx_queues(struct net_device *dev)
  6343. {
  6344. unsigned int i, count = dev->num_rx_queues;
  6345. struct netdev_rx_queue *rx;
  6346. size_t sz = count * sizeof(*rx);
  6347. BUG_ON(count < 1);
  6348. rx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
  6349. if (!rx)
  6350. return -ENOMEM;
  6351. dev->_rx = rx;
  6352. for (i = 0; i < count; i++)
  6353. rx[i].dev = dev;
  6354. return 0;
  6355. }
  6356. #endif
  6357. static void netdev_init_one_queue(struct net_device *dev,
  6358. struct netdev_queue *queue, void *_unused)
  6359. {
  6360. /* Initialize queue lock */
  6361. spin_lock_init(&queue->_xmit_lock);
  6362. netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
  6363. queue->xmit_lock_owner = -1;
  6364. netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
  6365. queue->dev = dev;
  6366. #ifdef CONFIG_BQL
  6367. dql_init(&queue->dql, HZ);
  6368. #endif
  6369. }
  6370. static void netif_free_tx_queues(struct net_device *dev)
  6371. {
  6372. kvfree(dev->_tx);
  6373. }
  6374. static int netif_alloc_netdev_queues(struct net_device *dev)
  6375. {
  6376. unsigned int count = dev->num_tx_queues;
  6377. struct netdev_queue *tx;
  6378. size_t sz = count * sizeof(*tx);
  6379. if (count < 1 || count > 0xffff)
  6380. return -EINVAL;
  6381. tx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
  6382. if (!tx)
  6383. return -ENOMEM;
  6384. dev->_tx = tx;
  6385. netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
  6386. spin_lock_init(&dev->tx_global_lock);
  6387. return 0;
  6388. }
  6389. void netif_tx_stop_all_queues(struct net_device *dev)
  6390. {
  6391. unsigned int i;
  6392. for (i = 0; i < dev->num_tx_queues; i++) {
  6393. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  6394. netif_tx_stop_queue(txq);
  6395. }
  6396. }
  6397. EXPORT_SYMBOL(netif_tx_stop_all_queues);
  6398. /**
  6399. * register_netdevice - register a network device
  6400. * @dev: device to register
  6401. *
  6402. * Take a completed network device structure and add it to the kernel
  6403. * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
  6404. * chain. 0 is returned on success. A negative errno code is returned
  6405. * on a failure to set up the device, or if the name is a duplicate.
  6406. *
  6407. * Callers must hold the rtnl semaphore. You may want
  6408. * register_netdev() instead of this.
  6409. *
  6410. * BUGS:
  6411. * The locking appears insufficient to guarantee two parallel registers
  6412. * will not get the same name.
  6413. */
  6414. int register_netdevice(struct net_device *dev)
  6415. {
  6416. int ret;
  6417. struct net *net = dev_net(dev);
  6418. BUG_ON(dev_boot_phase);
  6419. ASSERT_RTNL();
  6420. might_sleep();
  6421. /* When net_device's are persistent, this will be fatal. */
  6422. BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
  6423. BUG_ON(!net);
  6424. spin_lock_init(&dev->addr_list_lock);
  6425. netdev_set_addr_lockdep_class(dev);
  6426. ret = dev_get_valid_name(net, dev, dev->name);
  6427. if (ret < 0)
  6428. goto out;
  6429. /* Init, if this function is available */
  6430. if (dev->netdev_ops->ndo_init) {
  6431. ret = dev->netdev_ops->ndo_init(dev);
  6432. if (ret) {
  6433. if (ret > 0)
  6434. ret = -EIO;
  6435. goto out;
  6436. }
  6437. }
  6438. if (((dev->hw_features | dev->features) &
  6439. NETIF_F_HW_VLAN_CTAG_FILTER) &&
  6440. (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
  6441. !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
  6442. netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
  6443. ret = -EINVAL;
  6444. goto err_uninit;
  6445. }
  6446. ret = -EBUSY;
  6447. if (!dev->ifindex)
  6448. dev->ifindex = dev_new_index(net);
  6449. else if (__dev_get_by_index(net, dev->ifindex))
  6450. goto err_uninit;
  6451. /* Transfer changeable features to wanted_features and enable
  6452. * software offloads (GSO and GRO).
  6453. */
  6454. dev->hw_features |= NETIF_F_SOFT_FEATURES;
  6455. dev->features |= NETIF_F_SOFT_FEATURES;
  6456. if (dev->netdev_ops->ndo_udp_tunnel_add) {
  6457. dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT;
  6458. dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT;
  6459. }
  6460. dev->wanted_features = dev->features & dev->hw_features;
  6461. if (!(dev->flags & IFF_LOOPBACK))
  6462. dev->hw_features |= NETIF_F_NOCACHE_COPY;
  6463. /* If IPv4 TCP segmentation offload is supported we should also
  6464. * allow the device to enable segmenting the frame with the option
  6465. * of ignoring a static IP ID value. This doesn't enable the
  6466. * feature itself but allows the user to enable it later.
  6467. */
  6468. if (dev->hw_features & NETIF_F_TSO)
  6469. dev->hw_features |= NETIF_F_TSO_MANGLEID;
  6470. if (dev->vlan_features & NETIF_F_TSO)
  6471. dev->vlan_features |= NETIF_F_TSO_MANGLEID;
  6472. if (dev->mpls_features & NETIF_F_TSO)
  6473. dev->mpls_features |= NETIF_F_TSO_MANGLEID;
  6474. if (dev->hw_enc_features & NETIF_F_TSO)
  6475. dev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
  6476. /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
  6477. */
  6478. dev->vlan_features |= NETIF_F_HIGHDMA;
  6479. /* Make NETIF_F_SG inheritable to tunnel devices.
  6480. */
  6481. dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL;
  6482. /* Make NETIF_F_SG inheritable to MPLS.
  6483. */
  6484. dev->mpls_features |= NETIF_F_SG;
  6485. ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
  6486. ret = notifier_to_errno(ret);
  6487. if (ret)
  6488. goto err_uninit;
  6489. ret = netdev_register_kobject(dev);
  6490. if (ret)
  6491. goto err_uninit;
  6492. dev->reg_state = NETREG_REGISTERED;
  6493. __netdev_update_features(dev);
  6494. /*
  6495. * Default initial state at registry is that the
  6496. * device is present.
  6497. */
  6498. set_bit(__LINK_STATE_PRESENT, &dev->state);
  6499. linkwatch_init_dev(dev);
  6500. dev_init_scheduler(dev);
  6501. dev_hold(dev);
  6502. list_netdevice(dev);
  6503. add_device_randomness(dev->dev_addr, dev->addr_len);
  6504. /* If the device has permanent device address, driver should
  6505. * set dev_addr and also addr_assign_type should be set to
  6506. * NET_ADDR_PERM (default value).
  6507. */
  6508. if (dev->addr_assign_type == NET_ADDR_PERM)
  6509. memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
  6510. /* Notify protocols, that a new device appeared. */
  6511. ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
  6512. ret = notifier_to_errno(ret);
  6513. if (ret) {
  6514. rollback_registered(dev);
  6515. dev->reg_state = NETREG_UNREGISTERED;
  6516. }
  6517. /*
  6518. * Prevent userspace races by waiting until the network
  6519. * device is fully setup before sending notifications.
  6520. */
  6521. if (!dev->rtnl_link_ops ||
  6522. dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
  6523. rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
  6524. out:
  6525. return ret;
  6526. err_uninit:
  6527. if (dev->netdev_ops->ndo_uninit)
  6528. dev->netdev_ops->ndo_uninit(dev);
  6529. if (dev->priv_destructor)
  6530. dev->priv_destructor(dev);
  6531. goto out;
  6532. }
  6533. EXPORT_SYMBOL(register_netdevice);
  6534. /**
  6535. * init_dummy_netdev - init a dummy network device for NAPI
  6536. * @dev: device to init
  6537. *
  6538. * This takes a network device structure and initialize the minimum
  6539. * amount of fields so it can be used to schedule NAPI polls without
  6540. * registering a full blown interface. This is to be used by drivers
  6541. * that need to tie several hardware interfaces to a single NAPI
  6542. * poll scheduler due to HW limitations.
  6543. */
  6544. int init_dummy_netdev(struct net_device *dev)
  6545. {
  6546. /* Clear everything. Note we don't initialize spinlocks
  6547. * are they aren't supposed to be taken by any of the
  6548. * NAPI code and this dummy netdev is supposed to be
  6549. * only ever used for NAPI polls
  6550. */
  6551. memset(dev, 0, sizeof(struct net_device));
  6552. /* make sure we BUG if trying to hit standard
  6553. * register/unregister code path
  6554. */
  6555. dev->reg_state = NETREG_DUMMY;
  6556. /* NAPI wants this */
  6557. INIT_LIST_HEAD(&dev->napi_list);
  6558. /* a dummy interface is started by default */
  6559. set_bit(__LINK_STATE_PRESENT, &dev->state);
  6560. set_bit(__LINK_STATE_START, &dev->state);
  6561. /* Note : We dont allocate pcpu_refcnt for dummy devices,
  6562. * because users of this 'device' dont need to change
  6563. * its refcount.
  6564. */
  6565. return 0;
  6566. }
  6567. EXPORT_SYMBOL_GPL(init_dummy_netdev);
  6568. /**
  6569. * register_netdev - register a network device
  6570. * @dev: device to register
  6571. *
  6572. * Take a completed network device structure and add it to the kernel
  6573. * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
  6574. * chain. 0 is returned on success. A negative errno code is returned
  6575. * on a failure to set up the device, or if the name is a duplicate.
  6576. *
  6577. * This is a wrapper around register_netdevice that takes the rtnl semaphore
  6578. * and expands the device name if you passed a format string to
  6579. * alloc_netdev.
  6580. */
  6581. int register_netdev(struct net_device *dev)
  6582. {
  6583. int err;
  6584. rtnl_lock();
  6585. err = register_netdevice(dev);
  6586. rtnl_unlock();
  6587. return err;
  6588. }
  6589. EXPORT_SYMBOL(register_netdev);
  6590. int netdev_refcnt_read(const struct net_device *dev)
  6591. {
  6592. int i, refcnt = 0;
  6593. for_each_possible_cpu(i)
  6594. refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
  6595. return refcnt;
  6596. }
  6597. EXPORT_SYMBOL(netdev_refcnt_read);
  6598. /**
  6599. * netdev_wait_allrefs - wait until all references are gone.
  6600. * @dev: target net_device
  6601. *
  6602. * This is called when unregistering network devices.
  6603. *
  6604. * Any protocol or device that holds a reference should register
  6605. * for netdevice notification, and cleanup and put back the
  6606. * reference if they receive an UNREGISTER event.
  6607. * We can get stuck here if buggy protocols don't correctly
  6608. * call dev_put.
  6609. */
  6610. static void netdev_wait_allrefs(struct net_device *dev)
  6611. {
  6612. unsigned long rebroadcast_time, warning_time;
  6613. int refcnt;
  6614. linkwatch_forget_dev(dev);
  6615. rebroadcast_time = warning_time = jiffies;
  6616. refcnt = netdev_refcnt_read(dev);
  6617. while (refcnt != 0) {
  6618. if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
  6619. rtnl_lock();
  6620. /* Rebroadcast unregister notification */
  6621. call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
  6622. __rtnl_unlock();
  6623. rcu_barrier();
  6624. rtnl_lock();
  6625. call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
  6626. if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
  6627. &dev->state)) {
  6628. /* We must not have linkwatch events
  6629. * pending on unregister. If this
  6630. * happens, we simply run the queue
  6631. * unscheduled, resulting in a noop
  6632. * for this device.
  6633. */
  6634. linkwatch_run_queue();
  6635. }
  6636. __rtnl_unlock();
  6637. rebroadcast_time = jiffies;
  6638. }
  6639. msleep(250);
  6640. refcnt = netdev_refcnt_read(dev);
  6641. if (time_after(jiffies, warning_time + 10 * HZ)) {
  6642. pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
  6643. dev->name, refcnt);
  6644. warning_time = jiffies;
  6645. }
  6646. }
  6647. }
  6648. /* The sequence is:
  6649. *
  6650. * rtnl_lock();
  6651. * ...
  6652. * register_netdevice(x1);
  6653. * register_netdevice(x2);
  6654. * ...
  6655. * unregister_netdevice(y1);
  6656. * unregister_netdevice(y2);
  6657. * ...
  6658. * rtnl_unlock();
  6659. * free_netdev(y1);
  6660. * free_netdev(y2);
  6661. *
  6662. * We are invoked by rtnl_unlock().
  6663. * This allows us to deal with problems:
  6664. * 1) We can delete sysfs objects which invoke hotplug
  6665. * without deadlocking with linkwatch via keventd.
  6666. * 2) Since we run with the RTNL semaphore not held, we can sleep
  6667. * safely in order to wait for the netdev refcnt to drop to zero.
  6668. *
  6669. * We must not return until all unregister events added during
  6670. * the interval the lock was held have been completed.
  6671. */
  6672. void netdev_run_todo(void)
  6673. {
  6674. struct list_head list;
  6675. /* Snapshot list, allow later requests */
  6676. list_replace_init(&net_todo_list, &list);
  6677. __rtnl_unlock();
  6678. /* Wait for rcu callbacks to finish before next phase */
  6679. if (!list_empty(&list))
  6680. rcu_barrier();
  6681. while (!list_empty(&list)) {
  6682. struct net_device *dev
  6683. = list_first_entry(&list, struct net_device, todo_list);
  6684. list_del(&dev->todo_list);
  6685. rtnl_lock();
  6686. call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
  6687. __rtnl_unlock();
  6688. if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
  6689. pr_err("network todo '%s' but state %d\n",
  6690. dev->name, dev->reg_state);
  6691. dump_stack();
  6692. continue;
  6693. }
  6694. dev->reg_state = NETREG_UNREGISTERED;
  6695. netdev_wait_allrefs(dev);
  6696. /* paranoia */
  6697. BUG_ON(netdev_refcnt_read(dev));
  6698. BUG_ON(!list_empty(&dev->ptype_all));
  6699. BUG_ON(!list_empty(&dev->ptype_specific));
  6700. WARN_ON(rcu_access_pointer(dev->ip_ptr));
  6701. WARN_ON(rcu_access_pointer(dev->ip6_ptr));
  6702. WARN_ON(dev->dn_ptr);
  6703. if (dev->priv_destructor)
  6704. dev->priv_destructor(dev);
  6705. if (dev->needs_free_netdev)
  6706. free_netdev(dev);
  6707. /* Report a network device has been unregistered */
  6708. rtnl_lock();
  6709. dev_net(dev)->dev_unreg_count--;
  6710. __rtnl_unlock();
  6711. wake_up(&netdev_unregistering_wq);
  6712. /* Free network device */
  6713. kobject_put(&dev->dev.kobj);
  6714. }
  6715. }
  6716. /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
  6717. * all the same fields in the same order as net_device_stats, with only
  6718. * the type differing, but rtnl_link_stats64 may have additional fields
  6719. * at the end for newer counters.
  6720. */
  6721. void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
  6722. const struct net_device_stats *netdev_stats)
  6723. {
  6724. #if BITS_PER_LONG == 64
  6725. BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
  6726. memcpy(stats64, netdev_stats, sizeof(*netdev_stats));
  6727. /* zero out counters that only exist in rtnl_link_stats64 */
  6728. memset((char *)stats64 + sizeof(*netdev_stats), 0,
  6729. sizeof(*stats64) - sizeof(*netdev_stats));
  6730. #else
  6731. size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long);
  6732. const unsigned long *src = (const unsigned long *)netdev_stats;
  6733. u64 *dst = (u64 *)stats64;
  6734. BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
  6735. for (i = 0; i < n; i++)
  6736. dst[i] = src[i];
  6737. /* zero out counters that only exist in rtnl_link_stats64 */
  6738. memset((char *)stats64 + n * sizeof(u64), 0,
  6739. sizeof(*stats64) - n * sizeof(u64));
  6740. #endif
  6741. }
  6742. EXPORT_SYMBOL(netdev_stats_to_stats64);
  6743. /**
  6744. * dev_get_stats - get network device statistics
  6745. * @dev: device to get statistics from
  6746. * @storage: place to store stats
  6747. *
  6748. * Get network statistics from device. Return @storage.
  6749. * The device driver may provide its own method by setting
  6750. * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
  6751. * otherwise the internal statistics structure is used.
  6752. */
  6753. struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
  6754. struct rtnl_link_stats64 *storage)
  6755. {
  6756. const struct net_device_ops *ops = dev->netdev_ops;
  6757. if (ops->ndo_get_stats64) {
  6758. memset(storage, 0, sizeof(*storage));
  6759. ops->ndo_get_stats64(dev, storage);
  6760. } else if (ops->ndo_get_stats) {
  6761. netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
  6762. } else {
  6763. netdev_stats_to_stats64(storage, &dev->stats);
  6764. }
  6765. storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped);
  6766. storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped);
  6767. storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler);
  6768. return storage;
  6769. }
  6770. EXPORT_SYMBOL(dev_get_stats);
  6771. struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
  6772. {
  6773. struct netdev_queue *queue = dev_ingress_queue(dev);
  6774. #ifdef CONFIG_NET_CLS_ACT
  6775. if (queue)
  6776. return queue;
  6777. queue = kzalloc(sizeof(*queue), GFP_KERNEL);
  6778. if (!queue)
  6779. return NULL;
  6780. netdev_init_one_queue(dev, queue, NULL);
  6781. RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
  6782. queue->qdisc_sleeping = &noop_qdisc;
  6783. rcu_assign_pointer(dev->ingress_queue, queue);
  6784. #endif
  6785. return queue;
  6786. }
  6787. static const struct ethtool_ops default_ethtool_ops;
  6788. void netdev_set_default_ethtool_ops(struct net_device *dev,
  6789. const struct ethtool_ops *ops)
  6790. {
  6791. if (dev->ethtool_ops == &default_ethtool_ops)
  6792. dev->ethtool_ops = ops;
  6793. }
  6794. EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
  6795. void netdev_freemem(struct net_device *dev)
  6796. {
  6797. char *addr = (char *)dev - dev->padded;
  6798. kvfree(addr);
  6799. }
  6800. /**
  6801. * alloc_netdev_mqs - allocate network device
  6802. * @sizeof_priv: size of private data to allocate space for
  6803. * @name: device name format string
  6804. * @name_assign_type: origin of device name
  6805. * @setup: callback to initialize device
  6806. * @txqs: the number of TX subqueues to allocate
  6807. * @rxqs: the number of RX subqueues to allocate
  6808. *
  6809. * Allocates a struct net_device with private data area for driver use
  6810. * and performs basic initialization. Also allocates subqueue structs
  6811. * for each queue on the device.
  6812. */
  6813. struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
  6814. unsigned char name_assign_type,
  6815. void (*setup)(struct net_device *),
  6816. unsigned int txqs, unsigned int rxqs)
  6817. {
  6818. struct net_device *dev;
  6819. unsigned int alloc_size;
  6820. struct net_device *p;
  6821. BUG_ON(strlen(name) >= sizeof(dev->name));
  6822. if (txqs < 1) {
  6823. pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
  6824. return NULL;
  6825. }
  6826. #ifdef CONFIG_SYSFS
  6827. if (rxqs < 1) {
  6828. pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
  6829. return NULL;
  6830. }
  6831. #endif
  6832. alloc_size = sizeof(struct net_device);
  6833. if (sizeof_priv) {
  6834. /* ensure 32-byte alignment of private area */
  6835. alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
  6836. alloc_size += sizeof_priv;
  6837. }
  6838. /* ensure 32-byte alignment of whole construct */
  6839. alloc_size += NETDEV_ALIGN - 1;
  6840. p = kvzalloc(alloc_size, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
  6841. if (!p)
  6842. return NULL;
  6843. dev = PTR_ALIGN(p, NETDEV_ALIGN);
  6844. dev->padded = (char *)dev - (char *)p;
  6845. dev->pcpu_refcnt = alloc_percpu(int);
  6846. if (!dev->pcpu_refcnt)
  6847. goto free_dev;
  6848. if (dev_addr_init(dev))
  6849. goto free_pcpu;
  6850. dev_mc_init(dev);
  6851. dev_uc_init(dev);
  6852. dev_net_set(dev, &init_net);
  6853. dev->gso_max_size = GSO_MAX_SIZE;
  6854. dev->gso_max_segs = GSO_MAX_SEGS;
  6855. INIT_LIST_HEAD(&dev->napi_list);
  6856. INIT_LIST_HEAD(&dev->unreg_list);
  6857. INIT_LIST_HEAD(&dev->close_list);
  6858. INIT_LIST_HEAD(&dev->link_watch_list);
  6859. INIT_LIST_HEAD(&dev->adj_list.upper);
  6860. INIT_LIST_HEAD(&dev->adj_list.lower);
  6861. INIT_LIST_HEAD(&dev->ptype_all);
  6862. INIT_LIST_HEAD(&dev->ptype_specific);
  6863. #ifdef CONFIG_NET_SCHED
  6864. hash_init(dev->qdisc_hash);
  6865. #endif
  6866. dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
  6867. setup(dev);
  6868. if (!dev->tx_queue_len) {
  6869. dev->priv_flags |= IFF_NO_QUEUE;
  6870. dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
  6871. }
  6872. dev->num_tx_queues = txqs;
  6873. dev->real_num_tx_queues = txqs;
  6874. if (netif_alloc_netdev_queues(dev))
  6875. goto free_all;
  6876. #ifdef CONFIG_SYSFS
  6877. dev->num_rx_queues = rxqs;
  6878. dev->real_num_rx_queues = rxqs;
  6879. if (netif_alloc_rx_queues(dev))
  6880. goto free_all;
  6881. #endif
  6882. strcpy(dev->name, name);
  6883. dev->name_assign_type = name_assign_type;
  6884. dev->group = INIT_NETDEV_GROUP;
  6885. if (!dev->ethtool_ops)
  6886. dev->ethtool_ops = &default_ethtool_ops;
  6887. nf_hook_ingress_init(dev);
  6888. return dev;
  6889. free_all:
  6890. free_netdev(dev);
  6891. return NULL;
  6892. free_pcpu:
  6893. free_percpu(dev->pcpu_refcnt);
  6894. free_dev:
  6895. netdev_freemem(dev);
  6896. return NULL;
  6897. }
  6898. EXPORT_SYMBOL(alloc_netdev_mqs);
  6899. /**
  6900. * free_netdev - free network device
  6901. * @dev: device
  6902. *
  6903. * This function does the last stage of destroying an allocated device
  6904. * interface. The reference to the device object is released. If this
  6905. * is the last reference then it will be freed.Must be called in process
  6906. * context.
  6907. */
  6908. void free_netdev(struct net_device *dev)
  6909. {
  6910. struct napi_struct *p, *n;
  6911. struct bpf_prog *prog;
  6912. might_sleep();
  6913. netif_free_tx_queues(dev);
  6914. #ifdef CONFIG_SYSFS
  6915. kvfree(dev->_rx);
  6916. #endif
  6917. kfree(rcu_dereference_protected(dev->ingress_queue, 1));
  6918. /* Flush device addresses */
  6919. dev_addr_flush(dev);
  6920. list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
  6921. netif_napi_del(p);
  6922. free_percpu(dev->pcpu_refcnt);
  6923. dev->pcpu_refcnt = NULL;
  6924. prog = rcu_dereference_protected(dev->xdp_prog, 1);
  6925. if (prog) {
  6926. bpf_prog_put(prog);
  6927. static_key_slow_dec(&generic_xdp_needed);
  6928. }
  6929. /* Compatibility with error handling in drivers */
  6930. if (dev->reg_state == NETREG_UNINITIALIZED) {
  6931. netdev_freemem(dev);
  6932. return;
  6933. }
  6934. BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
  6935. dev->reg_state = NETREG_RELEASED;
  6936. /* will free via device release */
  6937. put_device(&dev->dev);
  6938. }
  6939. EXPORT_SYMBOL(free_netdev);
  6940. /**
  6941. * synchronize_net - Synchronize with packet receive processing
  6942. *
  6943. * Wait for packets currently being received to be done.
  6944. * Does not block later packets from starting.
  6945. */
  6946. void synchronize_net(void)
  6947. {
  6948. might_sleep();
  6949. if (rtnl_is_locked())
  6950. synchronize_rcu_expedited();
  6951. else
  6952. synchronize_rcu();
  6953. }
  6954. EXPORT_SYMBOL(synchronize_net);
  6955. /**
  6956. * unregister_netdevice_queue - remove device from the kernel
  6957. * @dev: device
  6958. * @head: list
  6959. *
  6960. * This function shuts down a device interface and removes it
  6961. * from the kernel tables.
  6962. * If head not NULL, device is queued to be unregistered later.
  6963. *
  6964. * Callers must hold the rtnl semaphore. You may want
  6965. * unregister_netdev() instead of this.
  6966. */
  6967. void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
  6968. {
  6969. ASSERT_RTNL();
  6970. if (head) {
  6971. list_move_tail(&dev->unreg_list, head);
  6972. } else {
  6973. rollback_registered(dev);
  6974. /* Finish processing unregister after unlock */
  6975. net_set_todo(dev);
  6976. }
  6977. }
  6978. EXPORT_SYMBOL(unregister_netdevice_queue);
  6979. /**
  6980. * unregister_netdevice_many - unregister many devices
  6981. * @head: list of devices
  6982. *
  6983. * Note: As most callers use a stack allocated list_head,
  6984. * we force a list_del() to make sure stack wont be corrupted later.
  6985. */
  6986. void unregister_netdevice_many(struct list_head *head)
  6987. {
  6988. struct net_device *dev;
  6989. if (!list_empty(head)) {
  6990. rollback_registered_many(head);
  6991. list_for_each_entry(dev, head, unreg_list)
  6992. net_set_todo(dev);
  6993. list_del(head);
  6994. }
  6995. }
  6996. EXPORT_SYMBOL(unregister_netdevice_many);
  6997. /**
  6998. * unregister_netdev - remove device from the kernel
  6999. * @dev: device
  7000. *
  7001. * This function shuts down a device interface and removes it
  7002. * from the kernel tables.
  7003. *
  7004. * This is just a wrapper for unregister_netdevice that takes
  7005. * the rtnl semaphore. In general you want to use this and not
  7006. * unregister_netdevice.
  7007. */
  7008. void unregister_netdev(struct net_device *dev)
  7009. {
  7010. rtnl_lock();
  7011. unregister_netdevice(dev);
  7012. rtnl_unlock();
  7013. }
  7014. EXPORT_SYMBOL(unregister_netdev);
  7015. /**
  7016. * dev_change_net_namespace - move device to different nethost namespace
  7017. * @dev: device
  7018. * @net: network namespace
  7019. * @pat: If not NULL name pattern to try if the current device name
  7020. * is already taken in the destination network namespace.
  7021. *
  7022. * This function shuts down a device interface and moves it
  7023. * to a new network namespace. On success 0 is returned, on
  7024. * a failure a netagive errno code is returned.
  7025. *
  7026. * Callers must hold the rtnl semaphore.
  7027. */
  7028. int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
  7029. {
  7030. int err, new_nsid;
  7031. ASSERT_RTNL();
  7032. /* Don't allow namespace local devices to be moved. */
  7033. err = -EINVAL;
  7034. if (dev->features & NETIF_F_NETNS_LOCAL)
  7035. goto out;
  7036. /* Ensure the device has been registrered */
  7037. if (dev->reg_state != NETREG_REGISTERED)
  7038. goto out;
  7039. /* Get out if there is nothing todo */
  7040. err = 0;
  7041. if (net_eq(dev_net(dev), net))
  7042. goto out;
  7043. /* Pick the destination device name, and ensure
  7044. * we can use it in the destination network namespace.
  7045. */
  7046. err = -EEXIST;
  7047. if (__dev_get_by_name(net, dev->name)) {
  7048. /* We get here if we can't use the current device name */
  7049. if (!pat)
  7050. goto out;
  7051. if (dev_get_valid_name(net, dev, pat) < 0)
  7052. goto out;
  7053. }
  7054. /*
  7055. * And now a mini version of register_netdevice unregister_netdevice.
  7056. */
  7057. /* If device is running close it first. */
  7058. dev_close(dev);
  7059. /* And unlink it from device chain */
  7060. err = -ENODEV;
  7061. unlist_netdevice(dev);
  7062. synchronize_net();
  7063. /* Shutdown queueing discipline. */
  7064. dev_shutdown(dev);
  7065. /* Notify protocols, that we are about to destroy
  7066. * this device. They should clean all the things.
  7067. *
  7068. * Note that dev->reg_state stays at NETREG_REGISTERED.
  7069. * This is wanted because this way 8021q and macvlan know
  7070. * the device is just moving and can keep their slaves up.
  7071. */
  7072. call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
  7073. rcu_barrier();
  7074. call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
  7075. if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net)
  7076. new_nsid = peernet2id_alloc(dev_net(dev), net);
  7077. else
  7078. new_nsid = peernet2id(dev_net(dev), net);
  7079. rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid);
  7080. /*
  7081. * Flush the unicast and multicast chains
  7082. */
  7083. dev_uc_flush(dev);
  7084. dev_mc_flush(dev);
  7085. /* Send a netdev-removed uevent to the old namespace */
  7086. kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
  7087. netdev_adjacent_del_links(dev);
  7088. /* Actually switch the network namespace */
  7089. dev_net_set(dev, net);
  7090. /* If there is an ifindex conflict assign a new one */
  7091. if (__dev_get_by_index(net, dev->ifindex))
  7092. dev->ifindex = dev_new_index(net);
  7093. /* Send a netdev-add uevent to the new namespace */
  7094. kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
  7095. netdev_adjacent_add_links(dev);
  7096. /* Fixup kobjects */
  7097. err = device_rename(&dev->dev, dev->name);
  7098. WARN_ON(err);
  7099. /* Add the device back in the hashes */
  7100. list_netdevice(dev);
  7101. /* Notify protocols, that a new device appeared. */
  7102. call_netdevice_notifiers(NETDEV_REGISTER, dev);
  7103. /*
  7104. * Prevent userspace races by waiting until the network
  7105. * device is fully setup before sending notifications.
  7106. */
  7107. rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
  7108. synchronize_net();
  7109. err = 0;
  7110. out:
  7111. return err;
  7112. }
  7113. EXPORT_SYMBOL_GPL(dev_change_net_namespace);
  7114. static int dev_cpu_dead(unsigned int oldcpu)
  7115. {
  7116. struct sk_buff **list_skb;
  7117. struct sk_buff *skb;
  7118. unsigned int cpu;
  7119. struct softnet_data *sd, *oldsd, *remsd = NULL;
  7120. local_irq_disable();
  7121. cpu = smp_processor_id();
  7122. sd = &per_cpu(softnet_data, cpu);
  7123. oldsd = &per_cpu(softnet_data, oldcpu);
  7124. /* Find end of our completion_queue. */
  7125. list_skb = &sd->completion_queue;
  7126. while (*list_skb)
  7127. list_skb = &(*list_skb)->next;
  7128. /* Append completion queue from offline CPU. */
  7129. *list_skb = oldsd->completion_queue;
  7130. oldsd->completion_queue = NULL;
  7131. /* Append output queue from offline CPU. */
  7132. if (oldsd->output_queue) {
  7133. *sd->output_queue_tailp = oldsd->output_queue;
  7134. sd->output_queue_tailp = oldsd->output_queue_tailp;
  7135. oldsd->output_queue = NULL;
  7136. oldsd->output_queue_tailp = &oldsd->output_queue;
  7137. }
  7138. /* Append NAPI poll list from offline CPU, with one exception :
  7139. * process_backlog() must be called by cpu owning percpu backlog.
  7140. * We properly handle process_queue & input_pkt_queue later.
  7141. */
  7142. while (!list_empty(&oldsd->poll_list)) {
  7143. struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
  7144. struct napi_struct,
  7145. poll_list);
  7146. list_del_init(&napi->poll_list);
  7147. if (napi->poll == process_backlog)
  7148. napi->state = 0;
  7149. else
  7150. ____napi_schedule(sd, napi);
  7151. }
  7152. raise_softirq_irqoff(NET_TX_SOFTIRQ);
  7153. local_irq_enable();
  7154. #ifdef CONFIG_RPS
  7155. remsd = oldsd->rps_ipi_list;
  7156. oldsd->rps_ipi_list = NULL;
  7157. #endif
  7158. /* send out pending IPI's on offline CPU */
  7159. net_rps_send_ipi(remsd);
  7160. /* Process offline CPU's input_pkt_queue */
  7161. while ((skb = __skb_dequeue(&oldsd->process_queue))) {
  7162. netif_rx_ni(skb);
  7163. input_queue_head_incr(oldsd);
  7164. }
  7165. while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
  7166. netif_rx_ni(skb);
  7167. input_queue_head_incr(oldsd);
  7168. }
  7169. return 0;
  7170. }
  7171. /**
  7172. * netdev_increment_features - increment feature set by one
  7173. * @all: current feature set
  7174. * @one: new feature set
  7175. * @mask: mask feature set
  7176. *
  7177. * Computes a new feature set after adding a device with feature set
  7178. * @one to the master device with current feature set @all. Will not
  7179. * enable anything that is off in @mask. Returns the new feature set.
  7180. */
  7181. netdev_features_t netdev_increment_features(netdev_features_t all,
  7182. netdev_features_t one, netdev_features_t mask)
  7183. {
  7184. if (mask & NETIF_F_HW_CSUM)
  7185. mask |= NETIF_F_CSUM_MASK;
  7186. mask |= NETIF_F_VLAN_CHALLENGED;
  7187. all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;
  7188. all &= one | ~NETIF_F_ALL_FOR_ALL;
  7189. /* If one device supports hw checksumming, set for all. */
  7190. if (all & NETIF_F_HW_CSUM)
  7191. all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM);
  7192. return all;
  7193. }
  7194. EXPORT_SYMBOL(netdev_increment_features);
  7195. static struct hlist_head * __net_init netdev_create_hash(void)
  7196. {
  7197. int i;
  7198. struct hlist_head *hash;
  7199. hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
  7200. if (hash != NULL)
  7201. for (i = 0; i < NETDEV_HASHENTRIES; i++)
  7202. INIT_HLIST_HEAD(&hash[i]);
  7203. return hash;
  7204. }
  7205. /* Initialize per network namespace state */
  7206. static int __net_init netdev_init(struct net *net)
  7207. {
  7208. if (net != &init_net)
  7209. INIT_LIST_HEAD(&net->dev_base_head);
  7210. net->dev_name_head = netdev_create_hash();
  7211. if (net->dev_name_head == NULL)
  7212. goto err_name;
  7213. net->dev_index_head = netdev_create_hash();
  7214. if (net->dev_index_head == NULL)
  7215. goto err_idx;
  7216. return 0;
  7217. err_idx:
  7218. kfree(net->dev_name_head);
  7219. err_name:
  7220. return -ENOMEM;
  7221. }
  7222. /**
  7223. * netdev_drivername - network driver for the device
  7224. * @dev: network device
  7225. *
  7226. * Determine network driver for device.
  7227. */
  7228. const char *netdev_drivername(const struct net_device *dev)
  7229. {
  7230. const struct device_driver *driver;
  7231. const struct device *parent;
  7232. const char *empty = "";
  7233. parent = dev->dev.parent;
  7234. if (!parent)
  7235. return empty;
  7236. driver = parent->driver;
  7237. if (driver && driver->name)
  7238. return driver->name;
  7239. return empty;
  7240. }
  7241. static void __netdev_printk(const char *level, const struct net_device *dev,
  7242. struct va_format *vaf)
  7243. {
  7244. if (dev && dev->dev.parent) {
  7245. dev_printk_emit(level[1] - '0',
  7246. dev->dev.parent,
  7247. "%s %s %s%s: %pV",
  7248. dev_driver_string(dev->dev.parent),
  7249. dev_name(dev->dev.parent),
  7250. netdev_name(dev), netdev_reg_state(dev),
  7251. vaf);
  7252. } else if (dev) {
  7253. printk("%s%s%s: %pV",
  7254. level, netdev_name(dev), netdev_reg_state(dev), vaf);
  7255. } else {
  7256. printk("%s(NULL net_device): %pV", level, vaf);
  7257. }
  7258. }
  7259. void netdev_printk(const char *level, const struct net_device *dev,
  7260. const char *format, ...)
  7261. {
  7262. struct va_format vaf;
  7263. va_list args;
  7264. va_start(args, format);
  7265. vaf.fmt = format;
  7266. vaf.va = &args;
  7267. __netdev_printk(level, dev, &vaf);
  7268. va_end(args);
  7269. }
  7270. EXPORT_SYMBOL(netdev_printk);
  7271. #define define_netdev_printk_level(func, level) \
  7272. void func(const struct net_device *dev, const char *fmt, ...) \
  7273. { \
  7274. struct va_format vaf; \
  7275. va_list args; \
  7276. \
  7277. va_start(args, fmt); \
  7278. \
  7279. vaf.fmt = fmt; \
  7280. vaf.va = &args; \
  7281. \
  7282. __netdev_printk(level, dev, &vaf); \
  7283. \
  7284. va_end(args); \
  7285. } \
  7286. EXPORT_SYMBOL(func);
  7287. define_netdev_printk_level(netdev_emerg, KERN_EMERG);
  7288. define_netdev_printk_level(netdev_alert, KERN_ALERT);
  7289. define_netdev_printk_level(netdev_crit, KERN_CRIT);
  7290. define_netdev_printk_level(netdev_err, KERN_ERR);
  7291. define_netdev_printk_level(netdev_warn, KERN_WARNING);
  7292. define_netdev_printk_level(netdev_notice, KERN_NOTICE);
  7293. define_netdev_printk_level(netdev_info, KERN_INFO);
  7294. static void __net_exit netdev_exit(struct net *net)
  7295. {
  7296. kfree(net->dev_name_head);
  7297. kfree(net->dev_index_head);
  7298. if (net != &init_net)
  7299. WARN_ON_ONCE(!list_empty(&net->dev_base_head));
  7300. }
  7301. static struct pernet_operations __net_initdata netdev_net_ops = {
  7302. .init = netdev_init,
  7303. .exit = netdev_exit,
  7304. };
  7305. static void __net_exit default_device_exit(struct net *net)
  7306. {
  7307. struct net_device *dev, *aux;
  7308. /*
  7309. * Push all migratable network devices back to the
  7310. * initial network namespace
  7311. */
  7312. rtnl_lock();
  7313. for_each_netdev_safe(net, dev, aux) {
  7314. int err;
  7315. char fb_name[IFNAMSIZ];
  7316. /* Ignore unmoveable devices (i.e. loopback) */
  7317. if (dev->features & NETIF_F_NETNS_LOCAL)
  7318. continue;
  7319. /* Leave virtual devices for the generic cleanup */
  7320. if (dev->rtnl_link_ops)
  7321. continue;
  7322. /* Push remaining network devices to init_net */
  7323. snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
  7324. err = dev_change_net_namespace(dev, &init_net, fb_name);
  7325. if (err) {
  7326. pr_emerg("%s: failed to move %s to init_net: %d\n",
  7327. __func__, dev->name, err);
  7328. BUG();
  7329. }
  7330. }
  7331. rtnl_unlock();
  7332. }
  7333. static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
  7334. {
  7335. /* Return with the rtnl_lock held when there are no network
  7336. * devices unregistering in any network namespace in net_list.
  7337. */
  7338. struct net *net;
  7339. bool unregistering;
  7340. DEFINE_WAIT_FUNC(wait, woken_wake_function);
  7341. add_wait_queue(&netdev_unregistering_wq, &wait);
  7342. for (;;) {
  7343. unregistering = false;
  7344. rtnl_lock();
  7345. list_for_each_entry(net, net_list, exit_list) {
  7346. if (net->dev_unreg_count > 0) {
  7347. unregistering = true;
  7348. break;
  7349. }
  7350. }
  7351. if (!unregistering)
  7352. break;
  7353. __rtnl_unlock();
  7354. wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
  7355. }
  7356. remove_wait_queue(&netdev_unregistering_wq, &wait);
  7357. }
  7358. static void __net_exit default_device_exit_batch(struct list_head *net_list)
  7359. {
  7360. /* At exit all network devices most be removed from a network
  7361. * namespace. Do this in the reverse order of registration.
  7362. * Do this across as many network namespaces as possible to
  7363. * improve batching efficiency.
  7364. */
  7365. struct net_device *dev;
  7366. struct net *net;
  7367. LIST_HEAD(dev_kill_list);
  7368. /* To prevent network device cleanup code from dereferencing
  7369. * loopback devices or network devices that have been freed
  7370. * wait here for all pending unregistrations to complete,
  7371. * before unregistring the loopback device and allowing the
  7372. * network namespace be freed.
  7373. *
  7374. * The netdev todo list containing all network devices
  7375. * unregistrations that happen in default_device_exit_batch
  7376. * will run in the rtnl_unlock() at the end of
  7377. * default_device_exit_batch.
  7378. */
  7379. rtnl_lock_unregistering(net_list);
  7380. list_for_each_entry(net, net_list, exit_list) {
  7381. for_each_netdev_reverse(net, dev) {
  7382. if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
  7383. dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
  7384. else
  7385. unregister_netdevice_queue(dev, &dev_kill_list);
  7386. }
  7387. }
  7388. unregister_netdevice_many(&dev_kill_list);
  7389. rtnl_unlock();
  7390. }
  7391. static struct pernet_operations __net_initdata default_device_ops = {
  7392. .exit = default_device_exit,
  7393. .exit_batch = default_device_exit_batch,
  7394. };
  7395. /*
  7396. * Initialize the DEV module. At boot time this walks the device list and
  7397. * unhooks any devices that fail to initialise (normally hardware not
  7398. * present) and leaves us with a valid list of present and active devices.
  7399. *
  7400. */
  7401. /*
  7402. * This is called single threaded during boot, so no need
  7403. * to take the rtnl semaphore.
  7404. */
  7405. static int __init net_dev_init(void)
  7406. {
  7407. int i, rc = -ENOMEM;
  7408. BUG_ON(!dev_boot_phase);
  7409. if (dev_proc_init())
  7410. goto out;
  7411. if (netdev_kobject_init())
  7412. goto out;
  7413. INIT_LIST_HEAD(&ptype_all);
  7414. for (i = 0; i < PTYPE_HASH_SIZE; i++)
  7415. INIT_LIST_HEAD(&ptype_base[i]);
  7416. INIT_LIST_HEAD(&offload_base);
  7417. if (register_pernet_subsys(&netdev_net_ops))
  7418. goto out;
  7419. /*
  7420. * Initialise the packet receive queues.
  7421. */
  7422. for_each_possible_cpu(i) {
  7423. struct work_struct *flush = per_cpu_ptr(&flush_works, i);
  7424. struct softnet_data *sd = &per_cpu(softnet_data, i);
  7425. INIT_WORK(flush, flush_backlog);
  7426. skb_queue_head_init(&sd->input_pkt_queue);
  7427. skb_queue_head_init(&sd->process_queue);
  7428. INIT_LIST_HEAD(&sd->poll_list);
  7429. sd->output_queue_tailp = &sd->output_queue;
  7430. #ifdef CONFIG_RPS
  7431. sd->csd.func = rps_trigger_softirq;
  7432. sd->csd.info = sd;
  7433. sd->cpu = i;
  7434. #endif
  7435. sd->backlog.poll = process_backlog;
  7436. sd->backlog.weight = weight_p;
  7437. }
  7438. dev_boot_phase = 0;
  7439. /* The loopback device is special if any other network devices
  7440. * is present in a network namespace the loopback device must
  7441. * be present. Since we now dynamically allocate and free the
  7442. * loopback device ensure this invariant is maintained by
  7443. * keeping the loopback device as the first device on the
  7444. * list of network devices. Ensuring the loopback devices
  7445. * is the first device that appears and the last network device
  7446. * that disappears.
  7447. */
  7448. if (register_pernet_device(&loopback_net_ops))
  7449. goto out;
  7450. if (register_pernet_device(&default_device_ops))
  7451. goto out;
  7452. open_softirq(NET_TX_SOFTIRQ, net_tx_action);
  7453. open_softirq(NET_RX_SOFTIRQ, net_rx_action);
  7454. rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead",
  7455. NULL, dev_cpu_dead);
  7456. WARN_ON(rc < 0);
  7457. rc = 0;
  7458. out:
  7459. return rc;
  7460. }
  7461. subsys_initcall(net_dev_init);