ixgbe_main.c 301 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654106551065610657106581065910660106611066210663106641066510666106671066810669106701067110672106731067410675106761067710678106791068010681106821068310684106851068610687106881068910690106911069210693106941069510696106971069810699107001070110702107031070410705107061070710708107091071010711107121071310714107151071610717107181071910720107211072210723107241072510726107271072810729107301073110732107331073410735107361073710738107391074010741107421074310744107451074610747107481074910750107511075210753107541075510756107571075810759107601076110762107631076410765107661076710768107691077010771107721077310774107751077610777107781077910780107811078210783107841078510786107871078810789107901079110792107931079410795107961079710798107991080010801108021080310804108051080610807108081080910810108111081210813108141081510816108171081810819108201082110822108231082410825108261082710828108291083010831108321083310834108351083610837108381083910840108411084210843108441084510846108471084810849108501085110852108531085410855108561085710858108591086010861108621086310864108651086610867108681086910870108711087210873108741087510876108771087810879108801088110882108831088410885108861088710888108891089010891108921089310894108951089610897108981089910900109011090210903109041090510906109071090810909109101091110912109131091410915109161091710918109191092010921109221092310924109251092610927109281092910930109311093210933109341093510936109371093810939109401094110942109431094410945109461094710948109491095010951109521095310954109551095610957109581095910960109611096210963109641096510966109671096810969109701097110972109731097410975109761097710978109791098010981109821098310984109851098610987109881098910990109911099210993109941099510996109971099810999110001100111002110031100411005110061100711008110091101011011110121101311014110151101611017110181101911020110211102211023110241102511026110271102811029110301103111032110331103411035110361103711038110391104011041110421104311044110451104611047110481104911050110511105211053110541105511056110571105811059110601106111062110631106411065110661106711068110691107011071110721107311074
  1. /*******************************************************************************
  2. Intel 10 Gigabit PCI Express Linux driver
  3. Copyright(c) 1999 - 2016 Intel Corporation.
  4. This program is free software; you can redistribute it and/or modify it
  5. under the terms and conditions of the GNU General Public License,
  6. version 2, as published by the Free Software Foundation.
  7. This program is distributed in the hope it will be useful, but WITHOUT
  8. ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  9. FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  10. more details.
  11. You should have received a copy of the GNU General Public License along with
  12. this program; if not, write to the Free Software Foundation, Inc.,
  13. 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  14. The full GNU General Public License is included in this distribution in
  15. the file called "COPYING".
  16. Contact Information:
  17. Linux NICS <linux.nics@intel.com>
  18. e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  19. Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  20. *******************************************************************************/
  21. #include <linux/types.h>
  22. #include <linux/module.h>
  23. #include <linux/pci.h>
  24. #include <linux/netdevice.h>
  25. #include <linux/vmalloc.h>
  26. #include <linux/string.h>
  27. #include <linux/in.h>
  28. #include <linux/interrupt.h>
  29. #include <linux/ip.h>
  30. #include <linux/tcp.h>
  31. #include <linux/sctp.h>
  32. #include <linux/pkt_sched.h>
  33. #include <linux/ipv6.h>
  34. #include <linux/slab.h>
  35. #include <net/checksum.h>
  36. #include <net/ip6_checksum.h>
  37. #include <linux/etherdevice.h>
  38. #include <linux/ethtool.h>
  39. #include <linux/if.h>
  40. #include <linux/if_vlan.h>
  41. #include <linux/if_macvlan.h>
  42. #include <linux/if_bridge.h>
  43. #include <linux/prefetch.h>
  44. #include <linux/bpf.h>
  45. #include <linux/bpf_trace.h>
  46. #include <linux/atomic.h>
  47. #include <scsi/fc/fc_fcoe.h>
  48. #include <net/udp_tunnel.h>
  49. #include <net/pkt_cls.h>
  50. #include <net/tc_act/tc_gact.h>
  51. #include <net/tc_act/tc_mirred.h>
  52. #include <net/vxlan.h>
  53. #include <net/mpls.h>
  54. #include "ixgbe.h"
  55. #include "ixgbe_common.h"
  56. #include "ixgbe_dcb_82599.h"
  57. #include "ixgbe_sriov.h"
  58. #include "ixgbe_model.h"
  59. char ixgbe_driver_name[] = "ixgbe";
  60. static const char ixgbe_driver_string[] =
  61. "Intel(R) 10 Gigabit PCI Express Network Driver";
  62. #ifdef IXGBE_FCOE
  63. char ixgbe_default_device_descr[] =
  64. "Intel(R) 10 Gigabit Network Connection";
  65. #else
  66. static char ixgbe_default_device_descr[] =
  67. "Intel(R) 10 Gigabit Network Connection";
  68. #endif
  69. #define DRV_VERSION "5.1.0-k"
  70. const char ixgbe_driver_version[] = DRV_VERSION;
  71. static const char ixgbe_copyright[] =
  72. "Copyright (c) 1999-2016 Intel Corporation.";
  73. static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter";
  74. static const struct ixgbe_info *ixgbe_info_tbl[] = {
  75. [board_82598] = &ixgbe_82598_info,
  76. [board_82599] = &ixgbe_82599_info,
  77. [board_X540] = &ixgbe_X540_info,
  78. [board_X550] = &ixgbe_X550_info,
  79. [board_X550EM_x] = &ixgbe_X550EM_x_info,
  80. [board_x550em_x_fw] = &ixgbe_x550em_x_fw_info,
  81. [board_x550em_a] = &ixgbe_x550em_a_info,
  82. [board_x550em_a_fw] = &ixgbe_x550em_a_fw_info,
  83. };
  84. /* ixgbe_pci_tbl - PCI Device ID Table
  85. *
  86. * Wildcard entries (PCI_ANY_ID) should come last
  87. * Last entry must be all 0s
  88. *
  89. * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
  90. * Class, Class Mask, private data (not used) }
  91. */
  92. static const struct pci_device_id ixgbe_pci_tbl[] = {
  93. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 },
  94. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 },
  95. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 },
  96. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 },
  97. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 },
  98. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 },
  99. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 },
  100. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 },
  101. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 },
  102. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 },
  103. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 },
  104. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 },
  105. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 },
  106. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 },
  107. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 },
  108. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 },
  109. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 },
  110. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 },
  111. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 },
  112. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 },
  113. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 },
  114. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 },
  115. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 },
  116. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 },
  117. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
  118. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
  119. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), board_82599 },
  120. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
  121. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
  122. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
  123. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550},
  124. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), board_X550},
  125. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x},
  126. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_XFI), board_X550EM_x},
  127. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x},
  128. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x},
  129. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x},
  130. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_1G_T), board_x550em_x_fw},
  131. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), board_x550em_a },
  132. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), board_x550em_a },
  133. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a },
  134. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII), board_x550em_a },
  135. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a },
  136. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_10G_T), board_x550em_a},
  137. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a },
  138. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T), board_x550em_a_fw },
  139. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L), board_x550em_a_fw },
  140. /* required last entry */
  141. {0, }
  142. };
  143. MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
  144. #ifdef CONFIG_IXGBE_DCA
  145. static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
  146. void *p);
  147. static struct notifier_block dca_notifier = {
  148. .notifier_call = ixgbe_notify_dca,
  149. .next = NULL,
  150. .priority = 0
  151. };
  152. #endif
  153. #ifdef CONFIG_PCI_IOV
  154. static unsigned int max_vfs;
  155. module_param(max_vfs, uint, 0);
  156. MODULE_PARM_DESC(max_vfs,
  157. "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63. (Deprecated)");
  158. #endif /* CONFIG_PCI_IOV */
  159. static unsigned int allow_unsupported_sfp;
  160. module_param(allow_unsupported_sfp, uint, 0);
  161. MODULE_PARM_DESC(allow_unsupported_sfp,
  162. "Allow unsupported and untested SFP+ modules on 82599-based adapters");
  163. #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
  164. static int debug = -1;
  165. module_param(debug, int, 0);
  166. MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  167. MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
  168. MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
  169. MODULE_LICENSE("GPL");
  170. MODULE_VERSION(DRV_VERSION);
  171. static struct workqueue_struct *ixgbe_wq;
  172. static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
  173. static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *);
  174. static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
  175. u32 reg, u16 *value)
  176. {
  177. struct pci_dev *parent_dev;
  178. struct pci_bus *parent_bus;
  179. parent_bus = adapter->pdev->bus->parent;
  180. if (!parent_bus)
  181. return -1;
  182. parent_dev = parent_bus->self;
  183. if (!parent_dev)
  184. return -1;
  185. if (!pci_is_pcie(parent_dev))
  186. return -1;
  187. pcie_capability_read_word(parent_dev, reg, value);
  188. if (*value == IXGBE_FAILED_READ_CFG_WORD &&
  189. ixgbe_check_cfg_remove(&adapter->hw, parent_dev))
  190. return -1;
  191. return 0;
  192. }
  193. static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
  194. {
  195. struct ixgbe_hw *hw = &adapter->hw;
  196. u16 link_status = 0;
  197. int err;
  198. hw->bus.type = ixgbe_bus_type_pci_express;
  199. /* Get the negotiated link width and speed from PCI config space of the
  200. * parent, as this device is behind a switch
  201. */
  202. err = ixgbe_read_pci_cfg_word_parent(adapter, 18, &link_status);
  203. /* assume caller will handle error case */
  204. if (err)
  205. return err;
  206. hw->bus.width = ixgbe_convert_bus_width(link_status);
  207. hw->bus.speed = ixgbe_convert_bus_speed(link_status);
  208. return 0;
  209. }
  210. /**
  211. * ixgbe_check_from_parent - Determine whether PCIe info should come from parent
  212. * @hw: hw specific details
  213. *
  214. * This function is used by probe to determine whether a device's PCI-Express
  215. * bandwidth details should be gathered from the parent bus instead of from the
  216. * device. Used to ensure that various locations all have the correct device ID
  217. * checks.
  218. */
  219. static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw)
  220. {
  221. switch (hw->device_id) {
  222. case IXGBE_DEV_ID_82599_SFP_SF_QP:
  223. case IXGBE_DEV_ID_82599_QSFP_SF_QP:
  224. return true;
  225. default:
  226. return false;
  227. }
  228. }
  229. static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
  230. int expected_gts)
  231. {
  232. struct ixgbe_hw *hw = &adapter->hw;
  233. int max_gts = 0;
  234. enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
  235. enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
  236. struct pci_dev *pdev;
  237. /* Some devices are not connected over PCIe and thus do not negotiate
  238. * speed. These devices do not have valid bus info, and thus any report
  239. * we generate may not be correct.
  240. */
  241. if (hw->bus.type == ixgbe_bus_type_internal)
  242. return;
  243. /* determine whether to use the parent device */
  244. if (ixgbe_pcie_from_parent(&adapter->hw))
  245. pdev = adapter->pdev->bus->parent->self;
  246. else
  247. pdev = adapter->pdev;
  248. if (pcie_get_minimum_link(pdev, &speed, &width) ||
  249. speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
  250. e_dev_warn("Unable to determine PCI Express bandwidth.\n");
  251. return;
  252. }
  253. switch (speed) {
  254. case PCIE_SPEED_2_5GT:
  255. /* 8b/10b encoding reduces max throughput by 20% */
  256. max_gts = 2 * width;
  257. break;
  258. case PCIE_SPEED_5_0GT:
  259. /* 8b/10b encoding reduces max throughput by 20% */
  260. max_gts = 4 * width;
  261. break;
  262. case PCIE_SPEED_8_0GT:
  263. /* 128b/130b encoding reduces throughput by less than 2% */
  264. max_gts = 8 * width;
  265. break;
  266. default:
  267. e_dev_warn("Unable to determine PCI Express bandwidth.\n");
  268. return;
  269. }
  270. e_dev_info("PCI Express bandwidth of %dGT/s available\n",
  271. max_gts);
  272. e_dev_info("(Speed:%s, Width: x%d, Encoding Loss:%s)\n",
  273. (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
  274. speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
  275. speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
  276. "Unknown"),
  277. width,
  278. (speed == PCIE_SPEED_2_5GT ? "20%" :
  279. speed == PCIE_SPEED_5_0GT ? "20%" :
  280. speed == PCIE_SPEED_8_0GT ? "<2%" :
  281. "Unknown"));
  282. if (max_gts < expected_gts) {
  283. e_dev_warn("This is not sufficient for optimal performance of this card.\n");
  284. e_dev_warn("For optimal performance, at least %dGT/s of bandwidth is required.\n",
  285. expected_gts);
  286. e_dev_warn("A slot with more lanes and/or higher speed is suggested.\n");
  287. }
  288. }
  289. static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
  290. {
  291. if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
  292. !test_bit(__IXGBE_REMOVING, &adapter->state) &&
  293. !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state))
  294. queue_work(ixgbe_wq, &adapter->service_task);
  295. }
  296. static void ixgbe_remove_adapter(struct ixgbe_hw *hw)
  297. {
  298. struct ixgbe_adapter *adapter = hw->back;
  299. if (!hw->hw_addr)
  300. return;
  301. hw->hw_addr = NULL;
  302. e_dev_err("Adapter removed\n");
  303. if (test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
  304. ixgbe_service_event_schedule(adapter);
  305. }
  306. static void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
  307. {
  308. u32 value;
  309. /* The following check not only optimizes a bit by not
  310. * performing a read on the status register when the
  311. * register just read was a status register read that
  312. * returned IXGBE_FAILED_READ_REG. It also blocks any
  313. * potential recursion.
  314. */
  315. if (reg == IXGBE_STATUS) {
  316. ixgbe_remove_adapter(hw);
  317. return;
  318. }
  319. value = ixgbe_read_reg(hw, IXGBE_STATUS);
  320. if (value == IXGBE_FAILED_READ_REG)
  321. ixgbe_remove_adapter(hw);
  322. }
  323. /**
  324. * ixgbe_read_reg - Read from device register
  325. * @hw: hw specific details
  326. * @reg: offset of register to read
  327. *
  328. * Returns : value read or IXGBE_FAILED_READ_REG if removed
  329. *
  330. * This function is used to read device registers. It checks for device
  331. * removal by confirming any read that returns all ones by checking the
  332. * status register value for all ones. This function avoids reading from
  333. * the hardware if a removal was previously detected in which case it
  334. * returns IXGBE_FAILED_READ_REG (all ones).
  335. */
  336. u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
  337. {
  338. u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
  339. u32 value;
  340. if (ixgbe_removed(reg_addr))
  341. return IXGBE_FAILED_READ_REG;
  342. if (unlikely(hw->phy.nw_mng_if_sel &
  343. IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE)) {
  344. struct ixgbe_adapter *adapter;
  345. int i;
  346. for (i = 0; i < 200; ++i) {
  347. value = readl(reg_addr + IXGBE_MAC_SGMII_BUSY);
  348. if (likely(!value))
  349. goto writes_completed;
  350. if (value == IXGBE_FAILED_READ_REG) {
  351. ixgbe_remove_adapter(hw);
  352. return IXGBE_FAILED_READ_REG;
  353. }
  354. udelay(5);
  355. }
  356. adapter = hw->back;
  357. e_warn(hw, "register writes incomplete %08x\n", value);
  358. }
  359. writes_completed:
  360. value = readl(reg_addr + reg);
  361. if (unlikely(value == IXGBE_FAILED_READ_REG))
  362. ixgbe_check_remove(hw, reg);
  363. return value;
  364. }
  365. static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev)
  366. {
  367. u16 value;
  368. pci_read_config_word(pdev, PCI_VENDOR_ID, &value);
  369. if (value == IXGBE_FAILED_READ_CFG_WORD) {
  370. ixgbe_remove_adapter(hw);
  371. return true;
  372. }
  373. return false;
  374. }
  375. u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg)
  376. {
  377. struct ixgbe_adapter *adapter = hw->back;
  378. u16 value;
  379. if (ixgbe_removed(hw->hw_addr))
  380. return IXGBE_FAILED_READ_CFG_WORD;
  381. pci_read_config_word(adapter->pdev, reg, &value);
  382. if (value == IXGBE_FAILED_READ_CFG_WORD &&
  383. ixgbe_check_cfg_remove(hw, adapter->pdev))
  384. return IXGBE_FAILED_READ_CFG_WORD;
  385. return value;
  386. }
  387. #ifdef CONFIG_PCI_IOV
  388. static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw, u32 reg)
  389. {
  390. struct ixgbe_adapter *adapter = hw->back;
  391. u32 value;
  392. if (ixgbe_removed(hw->hw_addr))
  393. return IXGBE_FAILED_READ_CFG_DWORD;
  394. pci_read_config_dword(adapter->pdev, reg, &value);
  395. if (value == IXGBE_FAILED_READ_CFG_DWORD &&
  396. ixgbe_check_cfg_remove(hw, adapter->pdev))
  397. return IXGBE_FAILED_READ_CFG_DWORD;
  398. return value;
  399. }
  400. #endif /* CONFIG_PCI_IOV */
  401. void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value)
  402. {
  403. struct ixgbe_adapter *adapter = hw->back;
  404. if (ixgbe_removed(hw->hw_addr))
  405. return;
  406. pci_write_config_word(adapter->pdev, reg, value);
  407. }
  408. static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
  409. {
  410. BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
  411. /* flush memory to make sure state is correct before next watchdog */
  412. smp_mb__before_atomic();
  413. clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
  414. }
  415. struct ixgbe_reg_info {
  416. u32 ofs;
  417. char *name;
  418. };
  419. static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
  420. /* General Registers */
  421. {IXGBE_CTRL, "CTRL"},
  422. {IXGBE_STATUS, "STATUS"},
  423. {IXGBE_CTRL_EXT, "CTRL_EXT"},
  424. /* Interrupt Registers */
  425. {IXGBE_EICR, "EICR"},
  426. /* RX Registers */
  427. {IXGBE_SRRCTL(0), "SRRCTL"},
  428. {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
  429. {IXGBE_RDLEN(0), "RDLEN"},
  430. {IXGBE_RDH(0), "RDH"},
  431. {IXGBE_RDT(0), "RDT"},
  432. {IXGBE_RXDCTL(0), "RXDCTL"},
  433. {IXGBE_RDBAL(0), "RDBAL"},
  434. {IXGBE_RDBAH(0), "RDBAH"},
  435. /* TX Registers */
  436. {IXGBE_TDBAL(0), "TDBAL"},
  437. {IXGBE_TDBAH(0), "TDBAH"},
  438. {IXGBE_TDLEN(0), "TDLEN"},
  439. {IXGBE_TDH(0), "TDH"},
  440. {IXGBE_TDT(0), "TDT"},
  441. {IXGBE_TXDCTL(0), "TXDCTL"},
  442. /* List Terminator */
  443. { .name = NULL }
  444. };
  445. /*
  446. * ixgbe_regdump - register printout routine
  447. */
  448. static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
  449. {
  450. int i;
  451. char rname[16];
  452. u32 regs[64];
  453. switch (reginfo->ofs) {
  454. case IXGBE_SRRCTL(0):
  455. for (i = 0; i < 64; i++)
  456. regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
  457. break;
  458. case IXGBE_DCA_RXCTRL(0):
  459. for (i = 0; i < 64; i++)
  460. regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
  461. break;
  462. case IXGBE_RDLEN(0):
  463. for (i = 0; i < 64; i++)
  464. regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
  465. break;
  466. case IXGBE_RDH(0):
  467. for (i = 0; i < 64; i++)
  468. regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
  469. break;
  470. case IXGBE_RDT(0):
  471. for (i = 0; i < 64; i++)
  472. regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
  473. break;
  474. case IXGBE_RXDCTL(0):
  475. for (i = 0; i < 64; i++)
  476. regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
  477. break;
  478. case IXGBE_RDBAL(0):
  479. for (i = 0; i < 64; i++)
  480. regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
  481. break;
  482. case IXGBE_RDBAH(0):
  483. for (i = 0; i < 64; i++)
  484. regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
  485. break;
  486. case IXGBE_TDBAL(0):
  487. for (i = 0; i < 64; i++)
  488. regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
  489. break;
  490. case IXGBE_TDBAH(0):
  491. for (i = 0; i < 64; i++)
  492. regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
  493. break;
  494. case IXGBE_TDLEN(0):
  495. for (i = 0; i < 64; i++)
  496. regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
  497. break;
  498. case IXGBE_TDH(0):
  499. for (i = 0; i < 64; i++)
  500. regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
  501. break;
  502. case IXGBE_TDT(0):
  503. for (i = 0; i < 64; i++)
  504. regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
  505. break;
  506. case IXGBE_TXDCTL(0):
  507. for (i = 0; i < 64; i++)
  508. regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
  509. break;
  510. default:
  511. pr_info("%-15s %08x\n",
  512. reginfo->name, IXGBE_READ_REG(hw, reginfo->ofs));
  513. return;
  514. }
  515. i = 0;
  516. while (i < 64) {
  517. int j;
  518. char buf[9 * 8 + 1];
  519. char *p = buf;
  520. snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i, i + 7);
  521. for (j = 0; j < 8; j++)
  522. p += sprintf(p, " %08x", regs[i++]);
  523. pr_err("%-15s%s\n", rname, buf);
  524. }
  525. }
  526. static void ixgbe_print_buffer(struct ixgbe_ring *ring, int n)
  527. {
  528. struct ixgbe_tx_buffer *tx_buffer;
  529. tx_buffer = &ring->tx_buffer_info[ring->next_to_clean];
  530. pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n",
  531. n, ring->next_to_use, ring->next_to_clean,
  532. (u64)dma_unmap_addr(tx_buffer, dma),
  533. dma_unmap_len(tx_buffer, len),
  534. tx_buffer->next_to_watch,
  535. (u64)tx_buffer->time_stamp);
  536. }
  537. /*
  538. * ixgbe_dump - Print registers, tx-rings and rx-rings
  539. */
  540. static void ixgbe_dump(struct ixgbe_adapter *adapter)
  541. {
  542. struct net_device *netdev = adapter->netdev;
  543. struct ixgbe_hw *hw = &adapter->hw;
  544. struct ixgbe_reg_info *reginfo;
  545. int n = 0;
  546. struct ixgbe_ring *ring;
  547. struct ixgbe_tx_buffer *tx_buffer;
  548. union ixgbe_adv_tx_desc *tx_desc;
  549. struct my_u0 { u64 a; u64 b; } *u0;
  550. struct ixgbe_ring *rx_ring;
  551. union ixgbe_adv_rx_desc *rx_desc;
  552. struct ixgbe_rx_buffer *rx_buffer_info;
  553. int i = 0;
  554. if (!netif_msg_hw(adapter))
  555. return;
  556. /* Print netdevice Info */
  557. if (netdev) {
  558. dev_info(&adapter->pdev->dev, "Net device Info\n");
  559. pr_info("Device Name state "
  560. "trans_start\n");
  561. pr_info("%-15s %016lX %016lX\n",
  562. netdev->name,
  563. netdev->state,
  564. dev_trans_start(netdev));
  565. }
  566. /* Print Registers */
  567. dev_info(&adapter->pdev->dev, "Register Dump\n");
  568. pr_info(" Register Name Value\n");
  569. for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
  570. reginfo->name; reginfo++) {
  571. ixgbe_regdump(hw, reginfo);
  572. }
  573. /* Print TX Ring Summary */
  574. if (!netdev || !netif_running(netdev))
  575. return;
  576. dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
  577. pr_info(" %s %s %s %s\n",
  578. "Queue [NTU] [NTC] [bi(ntc)->dma ]",
  579. "leng", "ntw", "timestamp");
  580. for (n = 0; n < adapter->num_tx_queues; n++) {
  581. ring = adapter->tx_ring[n];
  582. ixgbe_print_buffer(ring, n);
  583. }
  584. for (n = 0; n < adapter->num_xdp_queues; n++) {
  585. ring = adapter->xdp_ring[n];
  586. ixgbe_print_buffer(ring, n);
  587. }
  588. /* Print TX Rings */
  589. if (!netif_msg_tx_done(adapter))
  590. goto rx_ring_summary;
  591. dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
  592. /* Transmit Descriptor Formats
  593. *
  594. * 82598 Advanced Transmit Descriptor
  595. * +--------------------------------------------------------------+
  596. * 0 | Buffer Address [63:0] |
  597. * +--------------------------------------------------------------+
  598. * 8 | PAYLEN | POPTS | IDX | STA | DCMD |DTYP | RSV | DTALEN |
  599. * +--------------------------------------------------------------+
  600. * 63 46 45 40 39 36 35 32 31 24 23 20 19 0
  601. *
  602. * 82598 Advanced Transmit Descriptor (Write-Back Format)
  603. * +--------------------------------------------------------------+
  604. * 0 | RSV [63:0] |
  605. * +--------------------------------------------------------------+
  606. * 8 | RSV | STA | NXTSEQ |
  607. * +--------------------------------------------------------------+
  608. * 63 36 35 32 31 0
  609. *
  610. * 82599+ Advanced Transmit Descriptor
  611. * +--------------------------------------------------------------+
  612. * 0 | Buffer Address [63:0] |
  613. * +--------------------------------------------------------------+
  614. * 8 |PAYLEN |POPTS|CC|IDX |STA |DCMD |DTYP |MAC |RSV |DTALEN |
  615. * +--------------------------------------------------------------+
  616. * 63 46 45 40 39 38 36 35 32 31 24 23 20 19 18 17 16 15 0
  617. *
  618. * 82599+ Advanced Transmit Descriptor (Write-Back Format)
  619. * +--------------------------------------------------------------+
  620. * 0 | RSV [63:0] |
  621. * +--------------------------------------------------------------+
  622. * 8 | RSV | STA | RSV |
  623. * +--------------------------------------------------------------+
  624. * 63 36 35 32 31 0
  625. */
  626. for (n = 0; n < adapter->num_tx_queues; n++) {
  627. ring = adapter->tx_ring[n];
  628. pr_info("------------------------------------\n");
  629. pr_info("TX QUEUE INDEX = %d\n", ring->queue_index);
  630. pr_info("------------------------------------\n");
  631. pr_info("%s%s %s %s %s %s\n",
  632. "T [desc] [address 63:0 ] ",
  633. "[PlPOIdStDDt Ln] [bi->dma ] ",
  634. "leng", "ntw", "timestamp", "bi->skb");
  635. for (i = 0; ring->desc && (i < ring->count); i++) {
  636. tx_desc = IXGBE_TX_DESC(ring, i);
  637. tx_buffer = &ring->tx_buffer_info[i];
  638. u0 = (struct my_u0 *)tx_desc;
  639. if (dma_unmap_len(tx_buffer, len) > 0) {
  640. const char *ring_desc;
  641. if (i == ring->next_to_use &&
  642. i == ring->next_to_clean)
  643. ring_desc = " NTC/U";
  644. else if (i == ring->next_to_use)
  645. ring_desc = " NTU";
  646. else if (i == ring->next_to_clean)
  647. ring_desc = " NTC";
  648. else
  649. ring_desc = "";
  650. pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p%s",
  651. i,
  652. le64_to_cpu(u0->a),
  653. le64_to_cpu(u0->b),
  654. (u64)dma_unmap_addr(tx_buffer, dma),
  655. dma_unmap_len(tx_buffer, len),
  656. tx_buffer->next_to_watch,
  657. (u64)tx_buffer->time_stamp,
  658. tx_buffer->skb,
  659. ring_desc);
  660. if (netif_msg_pktdata(adapter) &&
  661. tx_buffer->skb)
  662. print_hex_dump(KERN_INFO, "",
  663. DUMP_PREFIX_ADDRESS, 16, 1,
  664. tx_buffer->skb->data,
  665. dma_unmap_len(tx_buffer, len),
  666. true);
  667. }
  668. }
  669. }
  670. /* Print RX Rings Summary */
  671. rx_ring_summary:
  672. dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
  673. pr_info("Queue [NTU] [NTC]\n");
  674. for (n = 0; n < adapter->num_rx_queues; n++) {
  675. rx_ring = adapter->rx_ring[n];
  676. pr_info("%5d %5X %5X\n",
  677. n, rx_ring->next_to_use, rx_ring->next_to_clean);
  678. }
  679. /* Print RX Rings */
  680. if (!netif_msg_rx_status(adapter))
  681. return;
  682. dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
  683. /* Receive Descriptor Formats
  684. *
  685. * 82598 Advanced Receive Descriptor (Read) Format
  686. * 63 1 0
  687. * +-----------------------------------------------------+
  688. * 0 | Packet Buffer Address [63:1] |A0/NSE|
  689. * +----------------------------------------------+------+
  690. * 8 | Header Buffer Address [63:1] | DD |
  691. * +-----------------------------------------------------+
  692. *
  693. *
  694. * 82598 Advanced Receive Descriptor (Write-Back) Format
  695. *
  696. * 63 48 47 32 31 30 21 20 16 15 4 3 0
  697. * +------------------------------------------------------+
  698. * 0 | RSS Hash / |SPH| HDR_LEN | RSV |Packet| RSS |
  699. * | Packet | IP | | | | Type | Type |
  700. * | Checksum | Ident | | | | | |
  701. * +------------------------------------------------------+
  702. * 8 | VLAN Tag | Length | Extended Error | Extended Status |
  703. * +------------------------------------------------------+
  704. * 63 48 47 32 31 20 19 0
  705. *
  706. * 82599+ Advanced Receive Descriptor (Read) Format
  707. * 63 1 0
  708. * +-----------------------------------------------------+
  709. * 0 | Packet Buffer Address [63:1] |A0/NSE|
  710. * +----------------------------------------------+------+
  711. * 8 | Header Buffer Address [63:1] | DD |
  712. * +-----------------------------------------------------+
  713. *
  714. *
  715. * 82599+ Advanced Receive Descriptor (Write-Back) Format
  716. *
  717. * 63 48 47 32 31 30 21 20 17 16 4 3 0
  718. * +------------------------------------------------------+
  719. * 0 |RSS / Frag Checksum|SPH| HDR_LEN |RSC- |Packet| RSS |
  720. * |/ RTT / PCoE_PARAM | | | CNT | Type | Type |
  721. * |/ Flow Dir Flt ID | | | | | |
  722. * +------------------------------------------------------+
  723. * 8 | VLAN Tag | Length |Extended Error| Xtnd Status/NEXTP |
  724. * +------------------------------------------------------+
  725. * 63 48 47 32 31 20 19 0
  726. */
  727. for (n = 0; n < adapter->num_rx_queues; n++) {
  728. rx_ring = adapter->rx_ring[n];
  729. pr_info("------------------------------------\n");
  730. pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
  731. pr_info("------------------------------------\n");
  732. pr_info("%s%s%s\n",
  733. "R [desc] [ PktBuf A0] ",
  734. "[ HeadBuf DD] [bi->dma ] [bi->skb ] ",
  735. "<-- Adv Rx Read format");
  736. pr_info("%s%s%s\n",
  737. "RWB[desc] [PcsmIpSHl PtRs] ",
  738. "[vl er S cks ln] ---------------- [bi->skb ] ",
  739. "<-- Adv Rx Write-Back format");
  740. for (i = 0; i < rx_ring->count; i++) {
  741. const char *ring_desc;
  742. if (i == rx_ring->next_to_use)
  743. ring_desc = " NTU";
  744. else if (i == rx_ring->next_to_clean)
  745. ring_desc = " NTC";
  746. else
  747. ring_desc = "";
  748. rx_buffer_info = &rx_ring->rx_buffer_info[i];
  749. rx_desc = IXGBE_RX_DESC(rx_ring, i);
  750. u0 = (struct my_u0 *)rx_desc;
  751. if (rx_desc->wb.upper.length) {
  752. /* Descriptor Done */
  753. pr_info("RWB[0x%03X] %016llX %016llX ---------------- %p%s\n",
  754. i,
  755. le64_to_cpu(u0->a),
  756. le64_to_cpu(u0->b),
  757. rx_buffer_info->skb,
  758. ring_desc);
  759. } else {
  760. pr_info("R [0x%03X] %016llX %016llX %016llX %p%s\n",
  761. i,
  762. le64_to_cpu(u0->a),
  763. le64_to_cpu(u0->b),
  764. (u64)rx_buffer_info->dma,
  765. rx_buffer_info->skb,
  766. ring_desc);
  767. if (netif_msg_pktdata(adapter) &&
  768. rx_buffer_info->dma) {
  769. print_hex_dump(KERN_INFO, "",
  770. DUMP_PREFIX_ADDRESS, 16, 1,
  771. page_address(rx_buffer_info->page) +
  772. rx_buffer_info->page_offset,
  773. ixgbe_rx_bufsz(rx_ring), true);
  774. }
  775. }
  776. }
  777. }
  778. }
  779. static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
  780. {
  781. u32 ctrl_ext;
  782. /* Let firmware take over control of h/w */
  783. ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
  784. IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
  785. ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
  786. }
  787. static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
  788. {
  789. u32 ctrl_ext;
  790. /* Let firmware know the driver has taken over */
  791. ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
  792. IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
  793. ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
  794. }
  795. /**
  796. * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
  797. * @adapter: pointer to adapter struct
  798. * @direction: 0 for Rx, 1 for Tx, -1 for other causes
  799. * @queue: queue to map the corresponding interrupt to
  800. * @msix_vector: the vector to map to the corresponding queue
  801. *
  802. */
  803. static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
  804. u8 queue, u8 msix_vector)
  805. {
  806. u32 ivar, index;
  807. struct ixgbe_hw *hw = &adapter->hw;
  808. switch (hw->mac.type) {
  809. case ixgbe_mac_82598EB:
  810. msix_vector |= IXGBE_IVAR_ALLOC_VAL;
  811. if (direction == -1)
  812. direction = 0;
  813. index = (((direction * 64) + queue) >> 2) & 0x1F;
  814. ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
  815. ivar &= ~(0xFF << (8 * (queue & 0x3)));
  816. ivar |= (msix_vector << (8 * (queue & 0x3)));
  817. IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
  818. break;
  819. case ixgbe_mac_82599EB:
  820. case ixgbe_mac_X540:
  821. case ixgbe_mac_X550:
  822. case ixgbe_mac_X550EM_x:
  823. case ixgbe_mac_x550em_a:
  824. if (direction == -1) {
  825. /* other causes */
  826. msix_vector |= IXGBE_IVAR_ALLOC_VAL;
  827. index = ((queue & 1) * 8);
  828. ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
  829. ivar &= ~(0xFF << index);
  830. ivar |= (msix_vector << index);
  831. IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
  832. break;
  833. } else {
  834. /* tx or rx causes */
  835. msix_vector |= IXGBE_IVAR_ALLOC_VAL;
  836. index = ((16 * (queue & 1)) + (8 * direction));
  837. ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
  838. ivar &= ~(0xFF << index);
  839. ivar |= (msix_vector << index);
  840. IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
  841. break;
  842. }
  843. default:
  844. break;
  845. }
  846. }
  847. static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
  848. u64 qmask)
  849. {
  850. u32 mask;
  851. switch (adapter->hw.mac.type) {
  852. case ixgbe_mac_82598EB:
  853. mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
  854. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
  855. break;
  856. case ixgbe_mac_82599EB:
  857. case ixgbe_mac_X540:
  858. case ixgbe_mac_X550:
  859. case ixgbe_mac_X550EM_x:
  860. case ixgbe_mac_x550em_a:
  861. mask = (qmask & 0xFFFFFFFF);
  862. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
  863. mask = (qmask >> 32);
  864. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
  865. break;
  866. default:
  867. break;
  868. }
  869. }
  870. static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
  871. {
  872. struct ixgbe_hw *hw = &adapter->hw;
  873. struct ixgbe_hw_stats *hwstats = &adapter->stats;
  874. int i;
  875. u32 data;
  876. if ((hw->fc.current_mode != ixgbe_fc_full) &&
  877. (hw->fc.current_mode != ixgbe_fc_rx_pause))
  878. return;
  879. switch (hw->mac.type) {
  880. case ixgbe_mac_82598EB:
  881. data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
  882. break;
  883. default:
  884. data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
  885. }
  886. hwstats->lxoffrxc += data;
  887. /* refill credits (no tx hang) if we received xoff */
  888. if (!data)
  889. return;
  890. for (i = 0; i < adapter->num_tx_queues; i++)
  891. clear_bit(__IXGBE_HANG_CHECK_ARMED,
  892. &adapter->tx_ring[i]->state);
  893. for (i = 0; i < adapter->num_xdp_queues; i++)
  894. clear_bit(__IXGBE_HANG_CHECK_ARMED,
  895. &adapter->xdp_ring[i]->state);
  896. }
  897. static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
  898. {
  899. struct ixgbe_hw *hw = &adapter->hw;
  900. struct ixgbe_hw_stats *hwstats = &adapter->stats;
  901. u32 xoff[8] = {0};
  902. u8 tc;
  903. int i;
  904. bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
  905. if (adapter->ixgbe_ieee_pfc)
  906. pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
  907. if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) {
  908. ixgbe_update_xoff_rx_lfc(adapter);
  909. return;
  910. }
  911. /* update stats for each tc, only valid with PFC enabled */
  912. for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
  913. u32 pxoffrxc;
  914. switch (hw->mac.type) {
  915. case ixgbe_mac_82598EB:
  916. pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
  917. break;
  918. default:
  919. pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
  920. }
  921. hwstats->pxoffrxc[i] += pxoffrxc;
  922. /* Get the TC for given UP */
  923. tc = netdev_get_prio_tc_map(adapter->netdev, i);
  924. xoff[tc] += pxoffrxc;
  925. }
  926. /* disarm tx queues that have received xoff frames */
  927. for (i = 0; i < adapter->num_tx_queues; i++) {
  928. struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
  929. tc = tx_ring->dcb_tc;
  930. if (xoff[tc])
  931. clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
  932. }
  933. for (i = 0; i < adapter->num_xdp_queues; i++) {
  934. struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i];
  935. tc = xdp_ring->dcb_tc;
  936. if (xoff[tc])
  937. clear_bit(__IXGBE_HANG_CHECK_ARMED, &xdp_ring->state);
  938. }
  939. }
  940. static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
  941. {
  942. return ring->stats.packets;
  943. }
  944. static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
  945. {
  946. struct ixgbe_adapter *adapter;
  947. struct ixgbe_hw *hw;
  948. u32 head, tail;
  949. if (ring->l2_accel_priv)
  950. adapter = ring->l2_accel_priv->real_adapter;
  951. else
  952. adapter = netdev_priv(ring->netdev);
  953. hw = &adapter->hw;
  954. head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
  955. tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
  956. if (head != tail)
  957. return (head < tail) ?
  958. tail - head : (tail + ring->count - head);
  959. return 0;
  960. }
  961. static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
  962. {
  963. u32 tx_done = ixgbe_get_tx_completed(tx_ring);
  964. u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
  965. u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
  966. clear_check_for_tx_hang(tx_ring);
  967. /*
  968. * Check for a hung queue, but be thorough. This verifies
  969. * that a transmit has been completed since the previous
  970. * check AND there is at least one packet pending. The
  971. * ARMED bit is set to indicate a potential hang. The
  972. * bit is cleared if a pause frame is received to remove
  973. * false hang detection due to PFC or 802.3x frames. By
  974. * requiring this to fail twice we avoid races with
  975. * pfc clearing the ARMED bit and conditions where we
  976. * run the check_tx_hang logic with a transmit completion
  977. * pending but without time to complete it yet.
  978. */
  979. if (tx_done_old == tx_done && tx_pending)
  980. /* make sure it is true for two checks in a row */
  981. return test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
  982. &tx_ring->state);
  983. /* update completed stats and continue */
  984. tx_ring->tx_stats.tx_done_old = tx_done;
  985. /* reset the countdown */
  986. clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
  987. return false;
  988. }
  989. /**
  990. * ixgbe_tx_timeout_reset - initiate reset due to Tx timeout
  991. * @adapter: driver private struct
  992. **/
  993. static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
  994. {
  995. /* Do the reset outside of interrupt context */
  996. if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
  997. set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
  998. e_warn(drv, "initiating reset due to tx timeout\n");
  999. ixgbe_service_event_schedule(adapter);
  1000. }
  1001. }
  1002. /**
  1003. * ixgbe_tx_maxrate - callback to set the maximum per-queue bitrate
  1004. **/
  1005. static int ixgbe_tx_maxrate(struct net_device *netdev,
  1006. int queue_index, u32 maxrate)
  1007. {
  1008. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  1009. struct ixgbe_hw *hw = &adapter->hw;
  1010. u32 bcnrc_val = ixgbe_link_mbps(adapter);
  1011. if (!maxrate)
  1012. return 0;
  1013. /* Calculate the rate factor values to set */
  1014. bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT;
  1015. bcnrc_val /= maxrate;
  1016. /* clear everything but the rate factor */
  1017. bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK |
  1018. IXGBE_RTTBCNRC_RF_DEC_MASK;
  1019. /* enable the rate scheduler */
  1020. bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA;
  1021. IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_index);
  1022. IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
  1023. return 0;
  1024. }
  1025. /**
  1026. * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
  1027. * @q_vector: structure containing interrupt and ring information
  1028. * @tx_ring: tx ring to clean
  1029. * @napi_budget: Used to determine if we are in netpoll
  1030. **/
  1031. static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
  1032. struct ixgbe_ring *tx_ring, int napi_budget)
  1033. {
  1034. struct ixgbe_adapter *adapter = q_vector->adapter;
  1035. struct ixgbe_tx_buffer *tx_buffer;
  1036. union ixgbe_adv_tx_desc *tx_desc;
  1037. unsigned int total_bytes = 0, total_packets = 0;
  1038. unsigned int budget = q_vector->tx.work_limit;
  1039. unsigned int i = tx_ring->next_to_clean;
  1040. if (test_bit(__IXGBE_DOWN, &adapter->state))
  1041. return true;
  1042. tx_buffer = &tx_ring->tx_buffer_info[i];
  1043. tx_desc = IXGBE_TX_DESC(tx_ring, i);
  1044. i -= tx_ring->count;
  1045. do {
  1046. union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
  1047. /* if next_to_watch is not set then there is no work pending */
  1048. if (!eop_desc)
  1049. break;
  1050. /* prevent any other reads prior to eop_desc */
  1051. read_barrier_depends();
  1052. /* if DD is not set pending work has not been completed */
  1053. if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
  1054. break;
  1055. /* clear next_to_watch to prevent false hangs */
  1056. tx_buffer->next_to_watch = NULL;
  1057. /* update the statistics for this packet */
  1058. total_bytes += tx_buffer->bytecount;
  1059. total_packets += tx_buffer->gso_segs;
  1060. /* free the skb */
  1061. if (ring_is_xdp(tx_ring))
  1062. page_frag_free(tx_buffer->data);
  1063. else
  1064. napi_consume_skb(tx_buffer->skb, napi_budget);
  1065. /* unmap skb header data */
  1066. dma_unmap_single(tx_ring->dev,
  1067. dma_unmap_addr(tx_buffer, dma),
  1068. dma_unmap_len(tx_buffer, len),
  1069. DMA_TO_DEVICE);
  1070. /* clear tx_buffer data */
  1071. dma_unmap_len_set(tx_buffer, len, 0);
  1072. /* unmap remaining buffers */
  1073. while (tx_desc != eop_desc) {
  1074. tx_buffer++;
  1075. tx_desc++;
  1076. i++;
  1077. if (unlikely(!i)) {
  1078. i -= tx_ring->count;
  1079. tx_buffer = tx_ring->tx_buffer_info;
  1080. tx_desc = IXGBE_TX_DESC(tx_ring, 0);
  1081. }
  1082. /* unmap any remaining paged data */
  1083. if (dma_unmap_len(tx_buffer, len)) {
  1084. dma_unmap_page(tx_ring->dev,
  1085. dma_unmap_addr(tx_buffer, dma),
  1086. dma_unmap_len(tx_buffer, len),
  1087. DMA_TO_DEVICE);
  1088. dma_unmap_len_set(tx_buffer, len, 0);
  1089. }
  1090. }
  1091. /* move us one more past the eop_desc for start of next pkt */
  1092. tx_buffer++;
  1093. tx_desc++;
  1094. i++;
  1095. if (unlikely(!i)) {
  1096. i -= tx_ring->count;
  1097. tx_buffer = tx_ring->tx_buffer_info;
  1098. tx_desc = IXGBE_TX_DESC(tx_ring, 0);
  1099. }
  1100. /* issue prefetch for next Tx descriptor */
  1101. prefetch(tx_desc);
  1102. /* update budget accounting */
  1103. budget--;
  1104. } while (likely(budget));
  1105. i += tx_ring->count;
  1106. tx_ring->next_to_clean = i;
  1107. u64_stats_update_begin(&tx_ring->syncp);
  1108. tx_ring->stats.bytes += total_bytes;
  1109. tx_ring->stats.packets += total_packets;
  1110. u64_stats_update_end(&tx_ring->syncp);
  1111. q_vector->tx.total_bytes += total_bytes;
  1112. q_vector->tx.total_packets += total_packets;
  1113. if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
  1114. /* schedule immediate reset if we believe we hung */
  1115. struct ixgbe_hw *hw = &adapter->hw;
  1116. e_err(drv, "Detected Tx Unit Hang %s\n"
  1117. " Tx Queue <%d>\n"
  1118. " TDH, TDT <%x>, <%x>\n"
  1119. " next_to_use <%x>\n"
  1120. " next_to_clean <%x>\n"
  1121. "tx_buffer_info[next_to_clean]\n"
  1122. " time_stamp <%lx>\n"
  1123. " jiffies <%lx>\n",
  1124. ring_is_xdp(tx_ring) ? "(XDP)" : "",
  1125. tx_ring->queue_index,
  1126. IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
  1127. IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
  1128. tx_ring->next_to_use, i,
  1129. tx_ring->tx_buffer_info[i].time_stamp, jiffies);
  1130. if (!ring_is_xdp(tx_ring))
  1131. netif_stop_subqueue(tx_ring->netdev,
  1132. tx_ring->queue_index);
  1133. e_info(probe,
  1134. "tx hang %d detected on queue %d, resetting adapter\n",
  1135. adapter->tx_timeout_count + 1, tx_ring->queue_index);
  1136. /* schedule immediate reset if we believe we hung */
  1137. ixgbe_tx_timeout_reset(adapter);
  1138. /* the adapter is about to reset, no point in enabling stuff */
  1139. return true;
  1140. }
  1141. if (ring_is_xdp(tx_ring))
  1142. return !!budget;
  1143. netdev_tx_completed_queue(txring_txq(tx_ring),
  1144. total_packets, total_bytes);
  1145. #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
  1146. if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
  1147. (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
  1148. /* Make sure that anybody stopping the queue after this
  1149. * sees the new next_to_clean.
  1150. */
  1151. smp_mb();
  1152. if (__netif_subqueue_stopped(tx_ring->netdev,
  1153. tx_ring->queue_index)
  1154. && !test_bit(__IXGBE_DOWN, &adapter->state)) {
  1155. netif_wake_subqueue(tx_ring->netdev,
  1156. tx_ring->queue_index);
  1157. ++tx_ring->tx_stats.restart_queue;
  1158. }
  1159. }
  1160. return !!budget;
  1161. }
  1162. #ifdef CONFIG_IXGBE_DCA
  1163. static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
  1164. struct ixgbe_ring *tx_ring,
  1165. int cpu)
  1166. {
  1167. struct ixgbe_hw *hw = &adapter->hw;
  1168. u32 txctrl = 0;
  1169. u16 reg_offset;
  1170. if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
  1171. txctrl = dca3_get_tag(tx_ring->dev, cpu);
  1172. switch (hw->mac.type) {
  1173. case ixgbe_mac_82598EB:
  1174. reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx);
  1175. break;
  1176. case ixgbe_mac_82599EB:
  1177. case ixgbe_mac_X540:
  1178. reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx);
  1179. txctrl <<= IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599;
  1180. break;
  1181. default:
  1182. /* for unknown hardware do not write register */
  1183. return;
  1184. }
  1185. /*
  1186. * We can enable relaxed ordering for reads, but not writes when
  1187. * DCA is enabled. This is due to a known issue in some chipsets
  1188. * which will cause the DCA tag to be cleared.
  1189. */
  1190. txctrl |= IXGBE_DCA_TXCTRL_DESC_RRO_EN |
  1191. IXGBE_DCA_TXCTRL_DATA_RRO_EN |
  1192. IXGBE_DCA_TXCTRL_DESC_DCA_EN;
  1193. IXGBE_WRITE_REG(hw, reg_offset, txctrl);
  1194. }
  1195. static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
  1196. struct ixgbe_ring *rx_ring,
  1197. int cpu)
  1198. {
  1199. struct ixgbe_hw *hw = &adapter->hw;
  1200. u32 rxctrl = 0;
  1201. u8 reg_idx = rx_ring->reg_idx;
  1202. if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
  1203. rxctrl = dca3_get_tag(rx_ring->dev, cpu);
  1204. switch (hw->mac.type) {
  1205. case ixgbe_mac_82599EB:
  1206. case ixgbe_mac_X540:
  1207. rxctrl <<= IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599;
  1208. break;
  1209. default:
  1210. break;
  1211. }
  1212. /*
  1213. * We can enable relaxed ordering for reads, but not writes when
  1214. * DCA is enabled. This is due to a known issue in some chipsets
  1215. * which will cause the DCA tag to be cleared.
  1216. */
  1217. rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN |
  1218. IXGBE_DCA_RXCTRL_DATA_DCA_EN |
  1219. IXGBE_DCA_RXCTRL_DESC_DCA_EN;
  1220. IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
  1221. }
  1222. static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
  1223. {
  1224. struct ixgbe_adapter *adapter = q_vector->adapter;
  1225. struct ixgbe_ring *ring;
  1226. int cpu = get_cpu();
  1227. if (q_vector->cpu == cpu)
  1228. goto out_no_update;
  1229. ixgbe_for_each_ring(ring, q_vector->tx)
  1230. ixgbe_update_tx_dca(adapter, ring, cpu);
  1231. ixgbe_for_each_ring(ring, q_vector->rx)
  1232. ixgbe_update_rx_dca(adapter, ring, cpu);
  1233. q_vector->cpu = cpu;
  1234. out_no_update:
  1235. put_cpu();
  1236. }
  1237. static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
  1238. {
  1239. int i;
  1240. /* always use CB2 mode, difference is masked in the CB driver */
  1241. if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
  1242. IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
  1243. IXGBE_DCA_CTRL_DCA_MODE_CB2);
  1244. else
  1245. IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
  1246. IXGBE_DCA_CTRL_DCA_DISABLE);
  1247. for (i = 0; i < adapter->num_q_vectors; i++) {
  1248. adapter->q_vector[i]->cpu = -1;
  1249. ixgbe_update_dca(adapter->q_vector[i]);
  1250. }
  1251. }
  1252. static int __ixgbe_notify_dca(struct device *dev, void *data)
  1253. {
  1254. struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
  1255. unsigned long event = *(unsigned long *)data;
  1256. if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE))
  1257. return 0;
  1258. switch (event) {
  1259. case DCA_PROVIDER_ADD:
  1260. /* if we're already enabled, don't do it again */
  1261. if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
  1262. break;
  1263. if (dca_add_requester(dev) == 0) {
  1264. adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
  1265. IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
  1266. IXGBE_DCA_CTRL_DCA_MODE_CB2);
  1267. break;
  1268. }
  1269. /* fall through - DCA is disabled. */
  1270. case DCA_PROVIDER_REMOVE:
  1271. if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
  1272. dca_remove_requester(dev);
  1273. adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
  1274. IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
  1275. IXGBE_DCA_CTRL_DCA_DISABLE);
  1276. }
  1277. break;
  1278. }
  1279. return 0;
  1280. }
  1281. #endif /* CONFIG_IXGBE_DCA */
  1282. #define IXGBE_RSS_L4_TYPES_MASK \
  1283. ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
  1284. (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
  1285. (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
  1286. (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
  1287. static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
  1288. union ixgbe_adv_rx_desc *rx_desc,
  1289. struct sk_buff *skb)
  1290. {
  1291. u16 rss_type;
  1292. if (!(ring->netdev->features & NETIF_F_RXHASH))
  1293. return;
  1294. rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
  1295. IXGBE_RXDADV_RSSTYPE_MASK;
  1296. if (!rss_type)
  1297. return;
  1298. skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
  1299. (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
  1300. PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
  1301. }
  1302. #ifdef IXGBE_FCOE
  1303. /**
  1304. * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type
  1305. * @ring: structure containing ring specific data
  1306. * @rx_desc: advanced rx descriptor
  1307. *
  1308. * Returns : true if it is FCoE pkt
  1309. */
  1310. static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring,
  1311. union ixgbe_adv_rx_desc *rx_desc)
  1312. {
  1313. __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
  1314. return test_bit(__IXGBE_RX_FCOE, &ring->state) &&
  1315. ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) ==
  1316. (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE <<
  1317. IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
  1318. }
  1319. #endif /* IXGBE_FCOE */
  1320. /**
  1321. * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
  1322. * @ring: structure containing ring specific data
  1323. * @rx_desc: current Rx descriptor being processed
  1324. * @skb: skb currently being received and modified
  1325. **/
  1326. static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
  1327. union ixgbe_adv_rx_desc *rx_desc,
  1328. struct sk_buff *skb)
  1329. {
  1330. __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
  1331. bool encap_pkt = false;
  1332. skb_checksum_none_assert(skb);
  1333. /* Rx csum disabled */
  1334. if (!(ring->netdev->features & NETIF_F_RXCSUM))
  1335. return;
  1336. /* check for VXLAN and Geneve packets */
  1337. if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_VXLAN)) {
  1338. encap_pkt = true;
  1339. skb->encapsulation = 1;
  1340. }
  1341. /* if IP and error */
  1342. if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
  1343. ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
  1344. ring->rx_stats.csum_err++;
  1345. return;
  1346. }
  1347. if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
  1348. return;
  1349. if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
  1350. /*
  1351. * 82599 errata, UDP frames with a 0 checksum can be marked as
  1352. * checksum errors.
  1353. */
  1354. if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_UDP)) &&
  1355. test_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state))
  1356. return;
  1357. ring->rx_stats.csum_err++;
  1358. return;
  1359. }
  1360. /* It must be a TCP or UDP packet with a valid checksum */
  1361. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1362. if (encap_pkt) {
  1363. if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_OUTERIPCS))
  1364. return;
  1365. if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_OUTERIPER)) {
  1366. skb->ip_summed = CHECKSUM_NONE;
  1367. return;
  1368. }
  1369. /* If we checked the outer header let the stack know */
  1370. skb->csum_level = 1;
  1371. }
  1372. }
  1373. static inline unsigned int ixgbe_rx_offset(struct ixgbe_ring *rx_ring)
  1374. {
  1375. return ring_uses_build_skb(rx_ring) ? IXGBE_SKB_PAD : 0;
  1376. }
  1377. static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
  1378. struct ixgbe_rx_buffer *bi)
  1379. {
  1380. struct page *page = bi->page;
  1381. dma_addr_t dma;
  1382. /* since we are recycling buffers we should seldom need to alloc */
  1383. if (likely(page))
  1384. return true;
  1385. /* alloc new page for storage */
  1386. page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring));
  1387. if (unlikely(!page)) {
  1388. rx_ring->rx_stats.alloc_rx_page_failed++;
  1389. return false;
  1390. }
  1391. /* map page for use */
  1392. dma = dma_map_page_attrs(rx_ring->dev, page, 0,
  1393. ixgbe_rx_pg_size(rx_ring),
  1394. DMA_FROM_DEVICE,
  1395. IXGBE_RX_DMA_ATTR);
  1396. /*
  1397. * if mapping failed free memory back to system since
  1398. * there isn't much point in holding memory we can't use
  1399. */
  1400. if (dma_mapping_error(rx_ring->dev, dma)) {
  1401. __free_pages(page, ixgbe_rx_pg_order(rx_ring));
  1402. rx_ring->rx_stats.alloc_rx_page_failed++;
  1403. return false;
  1404. }
  1405. bi->dma = dma;
  1406. bi->page = page;
  1407. bi->page_offset = ixgbe_rx_offset(rx_ring);
  1408. bi->pagecnt_bias = 1;
  1409. rx_ring->rx_stats.alloc_rx_page++;
  1410. return true;
  1411. }
  1412. /**
  1413. * ixgbe_alloc_rx_buffers - Replace used receive buffers
  1414. * @rx_ring: ring to place buffers on
  1415. * @cleaned_count: number of buffers to replace
  1416. **/
  1417. void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
  1418. {
  1419. union ixgbe_adv_rx_desc *rx_desc;
  1420. struct ixgbe_rx_buffer *bi;
  1421. u16 i = rx_ring->next_to_use;
  1422. u16 bufsz;
  1423. /* nothing to do */
  1424. if (!cleaned_count)
  1425. return;
  1426. rx_desc = IXGBE_RX_DESC(rx_ring, i);
  1427. bi = &rx_ring->rx_buffer_info[i];
  1428. i -= rx_ring->count;
  1429. bufsz = ixgbe_rx_bufsz(rx_ring);
  1430. do {
  1431. if (!ixgbe_alloc_mapped_page(rx_ring, bi))
  1432. break;
  1433. /* sync the buffer for use by the device */
  1434. dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
  1435. bi->page_offset, bufsz,
  1436. DMA_FROM_DEVICE);
  1437. /*
  1438. * Refresh the desc even if buffer_addrs didn't change
  1439. * because each write-back erases this info.
  1440. */
  1441. rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
  1442. rx_desc++;
  1443. bi++;
  1444. i++;
  1445. if (unlikely(!i)) {
  1446. rx_desc = IXGBE_RX_DESC(rx_ring, 0);
  1447. bi = rx_ring->rx_buffer_info;
  1448. i -= rx_ring->count;
  1449. }
  1450. /* clear the length for the next_to_use descriptor */
  1451. rx_desc->wb.upper.length = 0;
  1452. cleaned_count--;
  1453. } while (cleaned_count);
  1454. i += rx_ring->count;
  1455. if (rx_ring->next_to_use != i) {
  1456. rx_ring->next_to_use = i;
  1457. /* update next to alloc since we have filled the ring */
  1458. rx_ring->next_to_alloc = i;
  1459. /* Force memory writes to complete before letting h/w
  1460. * know there are new descriptors to fetch. (Only
  1461. * applicable for weak-ordered memory model archs,
  1462. * such as IA-64).
  1463. */
  1464. wmb();
  1465. writel(i, rx_ring->tail);
  1466. }
  1467. }
  1468. static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
  1469. struct sk_buff *skb)
  1470. {
  1471. u16 hdr_len = skb_headlen(skb);
  1472. /* set gso_size to avoid messing up TCP MSS */
  1473. skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len),
  1474. IXGBE_CB(skb)->append_cnt);
  1475. skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
  1476. }
  1477. static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring,
  1478. struct sk_buff *skb)
  1479. {
  1480. /* if append_cnt is 0 then frame is not RSC */
  1481. if (!IXGBE_CB(skb)->append_cnt)
  1482. return;
  1483. rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt;
  1484. rx_ring->rx_stats.rsc_flush++;
  1485. ixgbe_set_rsc_gso_size(rx_ring, skb);
  1486. /* gso_size is computed using append_cnt so always clear it last */
  1487. IXGBE_CB(skb)->append_cnt = 0;
  1488. }
  1489. /**
  1490. * ixgbe_process_skb_fields - Populate skb header fields from Rx descriptor
  1491. * @rx_ring: rx descriptor ring packet is being transacted on
  1492. * @rx_desc: pointer to the EOP Rx descriptor
  1493. * @skb: pointer to current skb being populated
  1494. *
  1495. * This function checks the ring, descriptor, and packet information in
  1496. * order to populate the hash, checksum, VLAN, timestamp, protocol, and
  1497. * other fields within the skb.
  1498. **/
  1499. static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
  1500. union ixgbe_adv_rx_desc *rx_desc,
  1501. struct sk_buff *skb)
  1502. {
  1503. struct net_device *dev = rx_ring->netdev;
  1504. u32 flags = rx_ring->q_vector->adapter->flags;
  1505. ixgbe_update_rsc_stats(rx_ring, skb);
  1506. ixgbe_rx_hash(rx_ring, rx_desc, skb);
  1507. ixgbe_rx_checksum(rx_ring, rx_desc, skb);
  1508. if (unlikely(flags & IXGBE_FLAG_RX_HWTSTAMP_ENABLED))
  1509. ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
  1510. if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
  1511. ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
  1512. u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
  1513. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
  1514. }
  1515. skb_record_rx_queue(skb, rx_ring->queue_index);
  1516. skb->protocol = eth_type_trans(skb, dev);
  1517. }
  1518. static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
  1519. struct sk_buff *skb)
  1520. {
  1521. napi_gro_receive(&q_vector->napi, skb);
  1522. }
  1523. /**
  1524. * ixgbe_is_non_eop - process handling of non-EOP buffers
  1525. * @rx_ring: Rx ring being processed
  1526. * @rx_desc: Rx descriptor for current buffer
  1527. * @skb: Current socket buffer containing buffer in progress
  1528. *
  1529. * This function updates next to clean. If the buffer is an EOP buffer
  1530. * this function exits returning false, otherwise it will place the
  1531. * sk_buff in the next buffer to be chained and return true indicating
  1532. * that this is in fact a non-EOP buffer.
  1533. **/
  1534. static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
  1535. union ixgbe_adv_rx_desc *rx_desc,
  1536. struct sk_buff *skb)
  1537. {
  1538. u32 ntc = rx_ring->next_to_clean + 1;
  1539. /* fetch, update, and store next to clean */
  1540. ntc = (ntc < rx_ring->count) ? ntc : 0;
  1541. rx_ring->next_to_clean = ntc;
  1542. prefetch(IXGBE_RX_DESC(rx_ring, ntc));
  1543. /* update RSC append count if present */
  1544. if (ring_is_rsc_enabled(rx_ring)) {
  1545. __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data &
  1546. cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
  1547. if (unlikely(rsc_enabled)) {
  1548. u32 rsc_cnt = le32_to_cpu(rsc_enabled);
  1549. rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
  1550. IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
  1551. /* update ntc based on RSC value */
  1552. ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
  1553. ntc &= IXGBE_RXDADV_NEXTP_MASK;
  1554. ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
  1555. }
  1556. }
  1557. /* if we are the last buffer then there is nothing else to do */
  1558. if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
  1559. return false;
  1560. /* place skb in next buffer to be received */
  1561. rx_ring->rx_buffer_info[ntc].skb = skb;
  1562. rx_ring->rx_stats.non_eop_descs++;
  1563. return true;
  1564. }
  1565. /**
  1566. * ixgbe_pull_tail - ixgbe specific version of skb_pull_tail
  1567. * @rx_ring: rx descriptor ring packet is being transacted on
  1568. * @skb: pointer to current skb being adjusted
  1569. *
  1570. * This function is an ixgbe specific version of __pskb_pull_tail. The
  1571. * main difference between this version and the original function is that
  1572. * this function can make several assumptions about the state of things
  1573. * that allow for significant optimizations versus the standard function.
  1574. * As a result we can do things like drop a frag and maintain an accurate
  1575. * truesize for the skb.
  1576. */
  1577. static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
  1578. struct sk_buff *skb)
  1579. {
  1580. struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
  1581. unsigned char *va;
  1582. unsigned int pull_len;
  1583. /*
  1584. * it is valid to use page_address instead of kmap since we are
  1585. * working with pages allocated out of the lomem pool per
  1586. * alloc_page(GFP_ATOMIC)
  1587. */
  1588. va = skb_frag_address(frag);
  1589. /*
  1590. * we need the header to contain the greater of either ETH_HLEN or
  1591. * 60 bytes if the skb->len is less than 60 for skb_pad.
  1592. */
  1593. pull_len = eth_get_headlen(va, IXGBE_RX_HDR_SIZE);
  1594. /* align pull length to size of long to optimize memcpy performance */
  1595. skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
  1596. /* update all of the pointers */
  1597. skb_frag_size_sub(frag, pull_len);
  1598. frag->page_offset += pull_len;
  1599. skb->data_len -= pull_len;
  1600. skb->tail += pull_len;
  1601. }
  1602. /**
  1603. * ixgbe_dma_sync_frag - perform DMA sync for first frag of SKB
  1604. * @rx_ring: rx descriptor ring packet is being transacted on
  1605. * @skb: pointer to current skb being updated
  1606. *
  1607. * This function provides a basic DMA sync up for the first fragment of an
  1608. * skb. The reason for doing this is that the first fragment cannot be
  1609. * unmapped until we have reached the end of packet descriptor for a buffer
  1610. * chain.
  1611. */
  1612. static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
  1613. struct sk_buff *skb)
  1614. {
  1615. /* if the page was released unmap it, else just sync our portion */
  1616. if (unlikely(IXGBE_CB(skb)->page_released)) {
  1617. dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma,
  1618. ixgbe_rx_pg_size(rx_ring),
  1619. DMA_FROM_DEVICE,
  1620. IXGBE_RX_DMA_ATTR);
  1621. } else {
  1622. struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
  1623. dma_sync_single_range_for_cpu(rx_ring->dev,
  1624. IXGBE_CB(skb)->dma,
  1625. frag->page_offset,
  1626. skb_frag_size(frag),
  1627. DMA_FROM_DEVICE);
  1628. }
  1629. }
  1630. /**
  1631. * ixgbe_cleanup_headers - Correct corrupted or empty headers
  1632. * @rx_ring: rx descriptor ring packet is being transacted on
  1633. * @rx_desc: pointer to the EOP Rx descriptor
  1634. * @skb: pointer to current skb being fixed
  1635. *
  1636. * Check if the skb is valid in the XDP case it will be an error pointer.
  1637. * Return true in this case to abort processing and advance to next
  1638. * descriptor.
  1639. *
  1640. * Check for corrupted packet headers caused by senders on the local L2
  1641. * embedded NIC switch not setting up their Tx Descriptors right. These
  1642. * should be very rare.
  1643. *
  1644. * Also address the case where we are pulling data in on pages only
  1645. * and as such no data is present in the skb header.
  1646. *
  1647. * In addition if skb is not at least 60 bytes we need to pad it so that
  1648. * it is large enough to qualify as a valid Ethernet frame.
  1649. *
  1650. * Returns true if an error was encountered and skb was freed.
  1651. **/
  1652. static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
  1653. union ixgbe_adv_rx_desc *rx_desc,
  1654. struct sk_buff *skb)
  1655. {
  1656. struct net_device *netdev = rx_ring->netdev;
  1657. /* XDP packets use error pointer so abort at this point */
  1658. if (IS_ERR(skb))
  1659. return true;
  1660. /* verify that the packet does not have any known errors */
  1661. if (unlikely(ixgbe_test_staterr(rx_desc,
  1662. IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
  1663. !(netdev->features & NETIF_F_RXALL))) {
  1664. dev_kfree_skb_any(skb);
  1665. return true;
  1666. }
  1667. /* place header in linear portion of buffer */
  1668. if (!skb_headlen(skb))
  1669. ixgbe_pull_tail(rx_ring, skb);
  1670. #ifdef IXGBE_FCOE
  1671. /* do not attempt to pad FCoE Frames as this will disrupt DDP */
  1672. if (ixgbe_rx_is_fcoe(rx_ring, rx_desc))
  1673. return false;
  1674. #endif
  1675. /* if eth_skb_pad returns an error the skb was freed */
  1676. if (eth_skb_pad(skb))
  1677. return true;
  1678. return false;
  1679. }
  1680. /**
  1681. * ixgbe_reuse_rx_page - page flip buffer and store it back on the ring
  1682. * @rx_ring: rx descriptor ring to store buffers on
  1683. * @old_buff: donor buffer to have page reused
  1684. *
  1685. * Synchronizes page for reuse by the adapter
  1686. **/
  1687. static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
  1688. struct ixgbe_rx_buffer *old_buff)
  1689. {
  1690. struct ixgbe_rx_buffer *new_buff;
  1691. u16 nta = rx_ring->next_to_alloc;
  1692. new_buff = &rx_ring->rx_buffer_info[nta];
  1693. /* update, and store next to alloc */
  1694. nta++;
  1695. rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
  1696. /* Transfer page from old buffer to new buffer.
  1697. * Move each member individually to avoid possible store
  1698. * forwarding stalls and unnecessary copy of skb.
  1699. */
  1700. new_buff->dma = old_buff->dma;
  1701. new_buff->page = old_buff->page;
  1702. new_buff->page_offset = old_buff->page_offset;
  1703. new_buff->pagecnt_bias = old_buff->pagecnt_bias;
  1704. }
  1705. static inline bool ixgbe_page_is_reserved(struct page *page)
  1706. {
  1707. return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
  1708. }
  1709. static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
  1710. {
  1711. unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
  1712. struct page *page = rx_buffer->page;
  1713. /* avoid re-using remote pages */
  1714. if (unlikely(ixgbe_page_is_reserved(page)))
  1715. return false;
  1716. #if (PAGE_SIZE < 8192)
  1717. /* if we are only owner of page we can reuse it */
  1718. if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
  1719. return false;
  1720. #else
  1721. /* The last offset is a bit aggressive in that we assume the
  1722. * worst case of FCoE being enabled and using a 3K buffer.
  1723. * However this should have minimal impact as the 1K extra is
  1724. * still less than one buffer in size.
  1725. */
  1726. #define IXGBE_LAST_OFFSET \
  1727. (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBE_RXBUFFER_3K)
  1728. if (rx_buffer->page_offset > IXGBE_LAST_OFFSET)
  1729. return false;
  1730. #endif
  1731. /* If we have drained the page fragment pool we need to update
  1732. * the pagecnt_bias and page count so that we fully restock the
  1733. * number of references the driver holds.
  1734. */
  1735. if (unlikely(!pagecnt_bias)) {
  1736. page_ref_add(page, USHRT_MAX);
  1737. rx_buffer->pagecnt_bias = USHRT_MAX;
  1738. }
  1739. return true;
  1740. }
  1741. /**
  1742. * ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff
  1743. * @rx_ring: rx descriptor ring to transact packets on
  1744. * @rx_buffer: buffer containing page to add
  1745. * @rx_desc: descriptor containing length of buffer written by hardware
  1746. * @skb: sk_buff to place the data into
  1747. *
  1748. * This function will add the data contained in rx_buffer->page to the skb.
  1749. * This is done either through a direct copy if the data in the buffer is
  1750. * less than the skb header size, otherwise it will just attach the page as
  1751. * a frag to the skb.
  1752. *
  1753. * The function will then update the page offset if necessary and return
  1754. * true if the buffer can be reused by the adapter.
  1755. **/
  1756. static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
  1757. struct ixgbe_rx_buffer *rx_buffer,
  1758. struct sk_buff *skb,
  1759. unsigned int size)
  1760. {
  1761. #if (PAGE_SIZE < 8192)
  1762. unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
  1763. #else
  1764. unsigned int truesize = ring_uses_build_skb(rx_ring) ?
  1765. SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) :
  1766. SKB_DATA_ALIGN(size);
  1767. #endif
  1768. skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
  1769. rx_buffer->page_offset, size, truesize);
  1770. #if (PAGE_SIZE < 8192)
  1771. rx_buffer->page_offset ^= truesize;
  1772. #else
  1773. rx_buffer->page_offset += truesize;
  1774. #endif
  1775. }
  1776. static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring,
  1777. union ixgbe_adv_rx_desc *rx_desc,
  1778. struct sk_buff **skb,
  1779. const unsigned int size)
  1780. {
  1781. struct ixgbe_rx_buffer *rx_buffer;
  1782. rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
  1783. prefetchw(rx_buffer->page);
  1784. *skb = rx_buffer->skb;
  1785. /* Delay unmapping of the first packet. It carries the header
  1786. * information, HW may still access the header after the writeback.
  1787. * Only unmap it when EOP is reached
  1788. */
  1789. if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) {
  1790. if (!*skb)
  1791. goto skip_sync;
  1792. } else {
  1793. if (*skb)
  1794. ixgbe_dma_sync_frag(rx_ring, *skb);
  1795. }
  1796. /* we are reusing so sync this buffer for CPU use */
  1797. dma_sync_single_range_for_cpu(rx_ring->dev,
  1798. rx_buffer->dma,
  1799. rx_buffer->page_offset,
  1800. size,
  1801. DMA_FROM_DEVICE);
  1802. skip_sync:
  1803. rx_buffer->pagecnt_bias--;
  1804. return rx_buffer;
  1805. }
  1806. static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
  1807. struct ixgbe_rx_buffer *rx_buffer,
  1808. struct sk_buff *skb)
  1809. {
  1810. if (ixgbe_can_reuse_rx_page(rx_buffer)) {
  1811. /* hand second half of page back to the ring */
  1812. ixgbe_reuse_rx_page(rx_ring, rx_buffer);
  1813. } else {
  1814. if (!IS_ERR(skb) && IXGBE_CB(skb)->dma == rx_buffer->dma) {
  1815. /* the page has been released from the ring */
  1816. IXGBE_CB(skb)->page_released = true;
  1817. } else {
  1818. /* we are not reusing the buffer so unmap it */
  1819. dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
  1820. ixgbe_rx_pg_size(rx_ring),
  1821. DMA_FROM_DEVICE,
  1822. IXGBE_RX_DMA_ATTR);
  1823. }
  1824. __page_frag_cache_drain(rx_buffer->page,
  1825. rx_buffer->pagecnt_bias);
  1826. }
  1827. /* clear contents of rx_buffer */
  1828. rx_buffer->page = NULL;
  1829. rx_buffer->skb = NULL;
  1830. }
  1831. static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring,
  1832. struct ixgbe_rx_buffer *rx_buffer,
  1833. struct xdp_buff *xdp,
  1834. union ixgbe_adv_rx_desc *rx_desc)
  1835. {
  1836. unsigned int size = xdp->data_end - xdp->data;
  1837. #if (PAGE_SIZE < 8192)
  1838. unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
  1839. #else
  1840. unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end -
  1841. xdp->data_hard_start);
  1842. #endif
  1843. struct sk_buff *skb;
  1844. /* prefetch first cache line of first page */
  1845. prefetch(xdp->data);
  1846. #if L1_CACHE_BYTES < 128
  1847. prefetch(xdp->data + L1_CACHE_BYTES);
  1848. #endif
  1849. /* Note, we get here by enabling legacy-rx via:
  1850. *
  1851. * ethtool --set-priv-flags <dev> legacy-rx on
  1852. *
  1853. * In this mode, we currently get 0 extra XDP headroom as
  1854. * opposed to having legacy-rx off, where we process XDP
  1855. * packets going to stack via ixgbe_build_skb(). The latter
  1856. * provides us currently with 192 bytes of headroom.
  1857. *
  1858. * For ixgbe_construct_skb() mode it means that the
  1859. * xdp->data_meta will always point to xdp->data, since
  1860. * the helper cannot expand the head. Should this ever
  1861. * change in future for legacy-rx mode on, then lets also
  1862. * add xdp->data_meta handling here.
  1863. */
  1864. /* allocate a skb to store the frags */
  1865. skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBE_RX_HDR_SIZE);
  1866. if (unlikely(!skb))
  1867. return NULL;
  1868. if (size > IXGBE_RX_HDR_SIZE) {
  1869. if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
  1870. IXGBE_CB(skb)->dma = rx_buffer->dma;
  1871. skb_add_rx_frag(skb, 0, rx_buffer->page,
  1872. xdp->data - page_address(rx_buffer->page),
  1873. size, truesize);
  1874. #if (PAGE_SIZE < 8192)
  1875. rx_buffer->page_offset ^= truesize;
  1876. #else
  1877. rx_buffer->page_offset += truesize;
  1878. #endif
  1879. } else {
  1880. memcpy(__skb_put(skb, size),
  1881. xdp->data, ALIGN(size, sizeof(long)));
  1882. rx_buffer->pagecnt_bias++;
  1883. }
  1884. return skb;
  1885. }
  1886. static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
  1887. struct ixgbe_rx_buffer *rx_buffer,
  1888. struct xdp_buff *xdp,
  1889. union ixgbe_adv_rx_desc *rx_desc)
  1890. {
  1891. unsigned int metasize = xdp->data - xdp->data_meta;
  1892. #if (PAGE_SIZE < 8192)
  1893. unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
  1894. #else
  1895. unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
  1896. SKB_DATA_ALIGN(xdp->data_end -
  1897. xdp->data_hard_start);
  1898. #endif
  1899. struct sk_buff *skb;
  1900. /* Prefetch first cache line of first page. If xdp->data_meta
  1901. * is unused, this points extactly as xdp->data, otherwise we
  1902. * likely have a consumer accessing first few bytes of meta
  1903. * data, and then actual data.
  1904. */
  1905. prefetch(xdp->data_meta);
  1906. #if L1_CACHE_BYTES < 128
  1907. prefetch(xdp->data_meta + L1_CACHE_BYTES);
  1908. #endif
  1909. /* build an skb to around the page buffer */
  1910. skb = build_skb(xdp->data_hard_start, truesize);
  1911. if (unlikely(!skb))
  1912. return NULL;
  1913. /* update pointers within the skb to store the data */
  1914. skb_reserve(skb, xdp->data - xdp->data_hard_start);
  1915. __skb_put(skb, xdp->data_end - xdp->data);
  1916. if (metasize)
  1917. skb_metadata_set(skb, metasize);
  1918. /* record DMA address if this is the start of a chain of buffers */
  1919. if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
  1920. IXGBE_CB(skb)->dma = rx_buffer->dma;
  1921. /* update buffer offset */
  1922. #if (PAGE_SIZE < 8192)
  1923. rx_buffer->page_offset ^= truesize;
  1924. #else
  1925. rx_buffer->page_offset += truesize;
  1926. #endif
  1927. return skb;
  1928. }
  1929. #define IXGBE_XDP_PASS 0
  1930. #define IXGBE_XDP_CONSUMED 1
  1931. #define IXGBE_XDP_TX 2
  1932. static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
  1933. struct xdp_buff *xdp);
  1934. static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
  1935. struct ixgbe_ring *rx_ring,
  1936. struct xdp_buff *xdp)
  1937. {
  1938. int err, result = IXGBE_XDP_PASS;
  1939. struct bpf_prog *xdp_prog;
  1940. u32 act;
  1941. rcu_read_lock();
  1942. xdp_prog = READ_ONCE(rx_ring->xdp_prog);
  1943. if (!xdp_prog)
  1944. goto xdp_out;
  1945. act = bpf_prog_run_xdp(xdp_prog, xdp);
  1946. switch (act) {
  1947. case XDP_PASS:
  1948. break;
  1949. case XDP_TX:
  1950. result = ixgbe_xmit_xdp_ring(adapter, xdp);
  1951. break;
  1952. case XDP_REDIRECT:
  1953. err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
  1954. if (!err)
  1955. result = IXGBE_XDP_TX;
  1956. else
  1957. result = IXGBE_XDP_CONSUMED;
  1958. break;
  1959. default:
  1960. bpf_warn_invalid_xdp_action(act);
  1961. /* fallthrough */
  1962. case XDP_ABORTED:
  1963. trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
  1964. /* fallthrough -- handle aborts by dropping packet */
  1965. case XDP_DROP:
  1966. result = IXGBE_XDP_CONSUMED;
  1967. break;
  1968. }
  1969. xdp_out:
  1970. rcu_read_unlock();
  1971. return ERR_PTR(-result);
  1972. }
  1973. static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring,
  1974. struct ixgbe_rx_buffer *rx_buffer,
  1975. unsigned int size)
  1976. {
  1977. #if (PAGE_SIZE < 8192)
  1978. unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
  1979. rx_buffer->page_offset ^= truesize;
  1980. #else
  1981. unsigned int truesize = ring_uses_build_skb(rx_ring) ?
  1982. SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) :
  1983. SKB_DATA_ALIGN(size);
  1984. rx_buffer->page_offset += truesize;
  1985. #endif
  1986. }
  1987. /**
  1988. * ixgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
  1989. * @q_vector: structure containing interrupt and ring information
  1990. * @rx_ring: rx descriptor ring to transact packets on
  1991. * @budget: Total limit on number of packets to process
  1992. *
  1993. * This function provides a "bounce buffer" approach to Rx interrupt
  1994. * processing. The advantage to this is that on systems that have
  1995. * expensive overhead for IOMMU access this provides a means of avoiding
  1996. * it by maintaining the mapping of the page to the syste.
  1997. *
  1998. * Returns amount of work completed
  1999. **/
  2000. static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
  2001. struct ixgbe_ring *rx_ring,
  2002. const int budget)
  2003. {
  2004. unsigned int total_rx_bytes = 0, total_rx_packets = 0;
  2005. struct ixgbe_adapter *adapter = q_vector->adapter;
  2006. #ifdef IXGBE_FCOE
  2007. int ddp_bytes;
  2008. unsigned int mss = 0;
  2009. #endif /* IXGBE_FCOE */
  2010. u16 cleaned_count = ixgbe_desc_unused(rx_ring);
  2011. bool xdp_xmit = false;
  2012. while (likely(total_rx_packets < budget)) {
  2013. union ixgbe_adv_rx_desc *rx_desc;
  2014. struct ixgbe_rx_buffer *rx_buffer;
  2015. struct sk_buff *skb;
  2016. struct xdp_buff xdp;
  2017. unsigned int size;
  2018. /* return some buffers to hardware, one at a time is too slow */
  2019. if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
  2020. ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
  2021. cleaned_count = 0;
  2022. }
  2023. rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
  2024. size = le16_to_cpu(rx_desc->wb.upper.length);
  2025. if (!size)
  2026. break;
  2027. /* This memory barrier is needed to keep us from reading
  2028. * any other fields out of the rx_desc until we know the
  2029. * descriptor has been written back
  2030. */
  2031. dma_rmb();
  2032. rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size);
  2033. /* retrieve a buffer from the ring */
  2034. if (!skb) {
  2035. xdp.data = page_address(rx_buffer->page) +
  2036. rx_buffer->page_offset;
  2037. xdp.data_meta = xdp.data;
  2038. xdp.data_hard_start = xdp.data -
  2039. ixgbe_rx_offset(rx_ring);
  2040. xdp.data_end = xdp.data + size;
  2041. skb = ixgbe_run_xdp(adapter, rx_ring, &xdp);
  2042. }
  2043. if (IS_ERR(skb)) {
  2044. if (PTR_ERR(skb) == -IXGBE_XDP_TX) {
  2045. xdp_xmit = true;
  2046. ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size);
  2047. } else {
  2048. rx_buffer->pagecnt_bias++;
  2049. }
  2050. total_rx_packets++;
  2051. total_rx_bytes += size;
  2052. } else if (skb) {
  2053. ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, size);
  2054. } else if (ring_uses_build_skb(rx_ring)) {
  2055. skb = ixgbe_build_skb(rx_ring, rx_buffer,
  2056. &xdp, rx_desc);
  2057. } else {
  2058. skb = ixgbe_construct_skb(rx_ring, rx_buffer,
  2059. &xdp, rx_desc);
  2060. }
  2061. /* exit if we failed to retrieve a buffer */
  2062. if (!skb) {
  2063. rx_ring->rx_stats.alloc_rx_buff_failed++;
  2064. rx_buffer->pagecnt_bias++;
  2065. break;
  2066. }
  2067. ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb);
  2068. cleaned_count++;
  2069. /* place incomplete frames back on ring for completion */
  2070. if (ixgbe_is_non_eop(rx_ring, rx_desc, skb))
  2071. continue;
  2072. /* verify the packet layout is correct */
  2073. if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb))
  2074. continue;
  2075. /* probably a little skewed due to removing CRC */
  2076. total_rx_bytes += skb->len;
  2077. /* populate checksum, timestamp, VLAN, and protocol */
  2078. ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
  2079. #ifdef IXGBE_FCOE
  2080. /* if ddp, not passing to ULD unless for FCP_RSP or error */
  2081. if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) {
  2082. ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
  2083. /* include DDPed FCoE data */
  2084. if (ddp_bytes > 0) {
  2085. if (!mss) {
  2086. mss = rx_ring->netdev->mtu -
  2087. sizeof(struct fcoe_hdr) -
  2088. sizeof(struct fc_frame_header) -
  2089. sizeof(struct fcoe_crc_eof);
  2090. if (mss > 512)
  2091. mss &= ~511;
  2092. }
  2093. total_rx_bytes += ddp_bytes;
  2094. total_rx_packets += DIV_ROUND_UP(ddp_bytes,
  2095. mss);
  2096. }
  2097. if (!ddp_bytes) {
  2098. dev_kfree_skb_any(skb);
  2099. continue;
  2100. }
  2101. }
  2102. #endif /* IXGBE_FCOE */
  2103. ixgbe_rx_skb(q_vector, skb);
  2104. /* update budget accounting */
  2105. total_rx_packets++;
  2106. }
  2107. if (xdp_xmit) {
  2108. struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
  2109. /* Force memory writes to complete before letting h/w
  2110. * know there are new descriptors to fetch.
  2111. */
  2112. wmb();
  2113. writel(ring->next_to_use, ring->tail);
  2114. xdp_do_flush_map();
  2115. }
  2116. u64_stats_update_begin(&rx_ring->syncp);
  2117. rx_ring->stats.packets += total_rx_packets;
  2118. rx_ring->stats.bytes += total_rx_bytes;
  2119. u64_stats_update_end(&rx_ring->syncp);
  2120. q_vector->rx.total_packets += total_rx_packets;
  2121. q_vector->rx.total_bytes += total_rx_bytes;
  2122. return total_rx_packets;
  2123. }
  2124. /**
  2125. * ixgbe_configure_msix - Configure MSI-X hardware
  2126. * @adapter: board private structure
  2127. *
  2128. * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
  2129. * interrupts.
  2130. **/
  2131. static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
  2132. {
  2133. struct ixgbe_q_vector *q_vector;
  2134. int v_idx;
  2135. u32 mask;
  2136. /* Populate MSIX to EITR Select */
  2137. if (adapter->num_vfs > 32) {
  2138. u32 eitrsel = BIT(adapter->num_vfs - 32) - 1;
  2139. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
  2140. }
  2141. /*
  2142. * Populate the IVAR table and set the ITR values to the
  2143. * corresponding register.
  2144. */
  2145. for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
  2146. struct ixgbe_ring *ring;
  2147. q_vector = adapter->q_vector[v_idx];
  2148. ixgbe_for_each_ring(ring, q_vector->rx)
  2149. ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx);
  2150. ixgbe_for_each_ring(ring, q_vector->tx)
  2151. ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
  2152. ixgbe_write_eitr(q_vector);
  2153. }
  2154. switch (adapter->hw.mac.type) {
  2155. case ixgbe_mac_82598EB:
  2156. ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
  2157. v_idx);
  2158. break;
  2159. case ixgbe_mac_82599EB:
  2160. case ixgbe_mac_X540:
  2161. case ixgbe_mac_X550:
  2162. case ixgbe_mac_X550EM_x:
  2163. case ixgbe_mac_x550em_a:
  2164. ixgbe_set_ivar(adapter, -1, 1, v_idx);
  2165. break;
  2166. default:
  2167. break;
  2168. }
  2169. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
  2170. /* set up to autoclear timer, and the vectors */
  2171. mask = IXGBE_EIMS_ENABLE_MASK;
  2172. mask &= ~(IXGBE_EIMS_OTHER |
  2173. IXGBE_EIMS_MAILBOX |
  2174. IXGBE_EIMS_LSC);
  2175. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
  2176. }
  2177. enum latency_range {
  2178. lowest_latency = 0,
  2179. low_latency = 1,
  2180. bulk_latency = 2,
  2181. latency_invalid = 255
  2182. };
  2183. /**
  2184. * ixgbe_update_itr - update the dynamic ITR value based on statistics
  2185. * @q_vector: structure containing interrupt and ring information
  2186. * @ring_container: structure containing ring performance data
  2187. *
  2188. * Stores a new ITR value based on packets and byte
  2189. * counts during the last interrupt. The advantage of per interrupt
  2190. * computation is faster updates and more accurate ITR for the current
  2191. * traffic pattern. Constants in this function were computed
  2192. * based on theoretical maximum wire speed and thresholds were set based
  2193. * on testing data as well as attempting to minimize response time
  2194. * while increasing bulk throughput.
  2195. * this functionality is controlled by the InterruptThrottleRate module
  2196. * parameter (see ixgbe_param.c)
  2197. **/
  2198. static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
  2199. struct ixgbe_ring_container *ring_container)
  2200. {
  2201. unsigned int itr = IXGBE_ITR_ADAPTIVE_MIN_USECS |
  2202. IXGBE_ITR_ADAPTIVE_LATENCY;
  2203. unsigned int avg_wire_size, packets, bytes;
  2204. unsigned long next_update = jiffies;
  2205. /* If we don't have any rings just leave ourselves set for maximum
  2206. * possible latency so we take ourselves out of the equation.
  2207. */
  2208. if (!ring_container->ring)
  2209. return;
  2210. /* If we didn't update within up to 1 - 2 jiffies we can assume
  2211. * that either packets are coming in so slow there hasn't been
  2212. * any work, or that there is so much work that NAPI is dealing
  2213. * with interrupt moderation and we don't need to do anything.
  2214. */
  2215. if (time_after(next_update, ring_container->next_update))
  2216. goto clear_counts;
  2217. packets = ring_container->total_packets;
  2218. /* We have no packets to actually measure against. This means
  2219. * either one of the other queues on this vector is active or
  2220. * we are a Tx queue doing TSO with too high of an interrupt rate.
  2221. *
  2222. * When this occurs just tick up our delay by the minimum value
  2223. * and hope that this extra delay will prevent us from being called
  2224. * without any work on our queue.
  2225. */
  2226. if (!packets) {
  2227. itr = (q_vector->itr >> 2) + IXGBE_ITR_ADAPTIVE_MIN_INC;
  2228. if (itr > IXGBE_ITR_ADAPTIVE_MAX_USECS)
  2229. itr = IXGBE_ITR_ADAPTIVE_MAX_USECS;
  2230. itr += ring_container->itr & IXGBE_ITR_ADAPTIVE_LATENCY;
  2231. goto clear_counts;
  2232. }
  2233. bytes = ring_container->total_bytes;
  2234. /* If packets are less than 4 or bytes are less than 9000 assume
  2235. * insufficient data to use bulk rate limiting approach. We are
  2236. * likely latency driven.
  2237. */
  2238. if (packets < 4 && bytes < 9000) {
  2239. itr = IXGBE_ITR_ADAPTIVE_LATENCY;
  2240. goto adjust_by_size;
  2241. }
  2242. /* Between 4 and 48 we can assume that our current interrupt delay
  2243. * is only slightly too low. As such we should increase it by a small
  2244. * fixed amount.
  2245. */
  2246. if (packets < 48) {
  2247. itr = (q_vector->itr >> 2) + IXGBE_ITR_ADAPTIVE_MIN_INC;
  2248. if (itr > IXGBE_ITR_ADAPTIVE_MAX_USECS)
  2249. itr = IXGBE_ITR_ADAPTIVE_MAX_USECS;
  2250. goto clear_counts;
  2251. }
  2252. /* Between 48 and 96 is our "goldilocks" zone where we are working
  2253. * out "just right". Just report that our current ITR is good for us.
  2254. */
  2255. if (packets < 96) {
  2256. itr = q_vector->itr >> 2;
  2257. goto clear_counts;
  2258. }
  2259. /* If packet count is 96 or greater we are likely looking at a slight
  2260. * overrun of the delay we want. Try halving our delay to see if that
  2261. * will cut the number of packets in half per interrupt.
  2262. */
  2263. if (packets < 256) {
  2264. itr = q_vector->itr >> 3;
  2265. if (itr < IXGBE_ITR_ADAPTIVE_MIN_USECS)
  2266. itr = IXGBE_ITR_ADAPTIVE_MIN_USECS;
  2267. goto clear_counts;
  2268. }
  2269. /* The paths below assume we are dealing with a bulk ITR since number
  2270. * of packets is 256 or greater. We are just going to have to compute
  2271. * a value and try to bring the count under control, though for smaller
  2272. * packet sizes there isn't much we can do as NAPI polling will likely
  2273. * be kicking in sooner rather than later.
  2274. */
  2275. itr = IXGBE_ITR_ADAPTIVE_BULK;
  2276. adjust_by_size:
  2277. /* If packet counts are 256 or greater we can assume we have a gross
  2278. * overestimation of what the rate should be. Instead of trying to fine
  2279. * tune it just use the formula below to try and dial in an exact value
  2280. * give the current packet size of the frame.
  2281. */
  2282. avg_wire_size = bytes / packets;
  2283. /* The following is a crude approximation of:
  2284. * wmem_default / (size + overhead) = desired_pkts_per_int
  2285. * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
  2286. * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
  2287. *
  2288. * Assuming wmem_default is 212992 and overhead is 640 bytes per
  2289. * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
  2290. * formula down to
  2291. *
  2292. * (170 * (size + 24)) / (size + 640) = ITR
  2293. *
  2294. * We first do some math on the packet size and then finally bitshift
  2295. * by 8 after rounding up. We also have to account for PCIe link speed
  2296. * difference as ITR scales based on this.
  2297. */
  2298. if (avg_wire_size <= 60) {
  2299. /* Start at 50k ints/sec */
  2300. avg_wire_size = 5120;
  2301. } else if (avg_wire_size <= 316) {
  2302. /* 50K ints/sec to 16K ints/sec */
  2303. avg_wire_size *= 40;
  2304. avg_wire_size += 2720;
  2305. } else if (avg_wire_size <= 1084) {
  2306. /* 16K ints/sec to 9.2K ints/sec */
  2307. avg_wire_size *= 15;
  2308. avg_wire_size += 11452;
  2309. } else if (avg_wire_size <= 1980) {
  2310. /* 9.2K ints/sec to 8K ints/sec */
  2311. avg_wire_size *= 5;
  2312. avg_wire_size += 22420;
  2313. } else {
  2314. /* plateau at a limit of 8K ints/sec */
  2315. avg_wire_size = 32256;
  2316. }
  2317. /* If we are in low latency mode half our delay which doubles the rate
  2318. * to somewhere between 100K to 16K ints/sec
  2319. */
  2320. if (itr & IXGBE_ITR_ADAPTIVE_LATENCY)
  2321. avg_wire_size >>= 1;
  2322. /* Resultant value is 256 times larger than it needs to be. This
  2323. * gives us room to adjust the value as needed to either increase
  2324. * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.
  2325. *
  2326. * Use addition as we have already recorded the new latency flag
  2327. * for the ITR value.
  2328. */
  2329. switch (q_vector->adapter->link_speed) {
  2330. case IXGBE_LINK_SPEED_10GB_FULL:
  2331. case IXGBE_LINK_SPEED_100_FULL:
  2332. default:
  2333. itr += DIV_ROUND_UP(avg_wire_size,
  2334. IXGBE_ITR_ADAPTIVE_MIN_INC * 256) *
  2335. IXGBE_ITR_ADAPTIVE_MIN_INC;
  2336. break;
  2337. case IXGBE_LINK_SPEED_2_5GB_FULL:
  2338. case IXGBE_LINK_SPEED_1GB_FULL:
  2339. case IXGBE_LINK_SPEED_10_FULL:
  2340. itr += DIV_ROUND_UP(avg_wire_size,
  2341. IXGBE_ITR_ADAPTIVE_MIN_INC * 64) *
  2342. IXGBE_ITR_ADAPTIVE_MIN_INC;
  2343. break;
  2344. }
  2345. clear_counts:
  2346. /* write back value */
  2347. ring_container->itr = itr;
  2348. /* next update should occur within next jiffy */
  2349. ring_container->next_update = next_update + 1;
  2350. ring_container->total_bytes = 0;
  2351. ring_container->total_packets = 0;
  2352. }
  2353. /**
  2354. * ixgbe_write_eitr - write EITR register in hardware specific way
  2355. * @q_vector: structure containing interrupt and ring information
  2356. *
  2357. * This function is made to be called by ethtool and by the driver
  2358. * when it needs to update EITR registers at runtime. Hardware
  2359. * specific quirks/differences are taken care of here.
  2360. */
  2361. void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
  2362. {
  2363. struct ixgbe_adapter *adapter = q_vector->adapter;
  2364. struct ixgbe_hw *hw = &adapter->hw;
  2365. int v_idx = q_vector->v_idx;
  2366. u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
  2367. switch (adapter->hw.mac.type) {
  2368. case ixgbe_mac_82598EB:
  2369. /* must write high and low 16 bits to reset counter */
  2370. itr_reg |= (itr_reg << 16);
  2371. break;
  2372. case ixgbe_mac_82599EB:
  2373. case ixgbe_mac_X540:
  2374. case ixgbe_mac_X550:
  2375. case ixgbe_mac_X550EM_x:
  2376. case ixgbe_mac_x550em_a:
  2377. /*
  2378. * set the WDIS bit to not clear the timer bits and cause an
  2379. * immediate assertion of the interrupt
  2380. */
  2381. itr_reg |= IXGBE_EITR_CNT_WDIS;
  2382. break;
  2383. default:
  2384. break;
  2385. }
  2386. IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
  2387. }
  2388. static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)
  2389. {
  2390. u32 new_itr;
  2391. ixgbe_update_itr(q_vector, &q_vector->tx);
  2392. ixgbe_update_itr(q_vector, &q_vector->rx);
  2393. /* use the smallest value of new ITR delay calculations */
  2394. new_itr = min(q_vector->rx.itr, q_vector->tx.itr);
  2395. /* Clear latency flag if set, shift into correct position */
  2396. new_itr &= ~IXGBE_ITR_ADAPTIVE_LATENCY;
  2397. new_itr <<= 2;
  2398. if (new_itr != q_vector->itr) {
  2399. /* save the algorithm value here */
  2400. q_vector->itr = new_itr;
  2401. ixgbe_write_eitr(q_vector);
  2402. }
  2403. }
  2404. /**
  2405. * ixgbe_check_overtemp_subtask - check for over temperature
  2406. * @adapter: pointer to adapter
  2407. **/
  2408. static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
  2409. {
  2410. struct ixgbe_hw *hw = &adapter->hw;
  2411. u32 eicr = adapter->interrupt_event;
  2412. s32 rc;
  2413. if (test_bit(__IXGBE_DOWN, &adapter->state))
  2414. return;
  2415. if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT))
  2416. return;
  2417. adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT;
  2418. switch (hw->device_id) {
  2419. case IXGBE_DEV_ID_82599_T3_LOM:
  2420. /*
  2421. * Since the warning interrupt is for both ports
  2422. * we don't have to check if:
  2423. * - This interrupt wasn't for our port.
  2424. * - We may have missed the interrupt so always have to
  2425. * check if we got a LSC
  2426. */
  2427. if (!(eicr & IXGBE_EICR_GPI_SDP0_8259X) &&
  2428. !(eicr & IXGBE_EICR_LSC))
  2429. return;
  2430. if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) {
  2431. u32 speed;
  2432. bool link_up = false;
  2433. hw->mac.ops.check_link(hw, &speed, &link_up, false);
  2434. if (link_up)
  2435. return;
  2436. }
  2437. /* Check if this is not due to overtemp */
  2438. if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP)
  2439. return;
  2440. break;
  2441. case IXGBE_DEV_ID_X550EM_A_1G_T:
  2442. case IXGBE_DEV_ID_X550EM_A_1G_T_L:
  2443. rc = hw->phy.ops.check_overtemp(hw);
  2444. if (rc != IXGBE_ERR_OVERTEMP)
  2445. return;
  2446. break;
  2447. default:
  2448. if (adapter->hw.mac.type >= ixgbe_mac_X540)
  2449. return;
  2450. if (!(eicr & IXGBE_EICR_GPI_SDP0(hw)))
  2451. return;
  2452. break;
  2453. }
  2454. e_crit(drv, "%s\n", ixgbe_overheat_msg);
  2455. adapter->interrupt_event = 0;
  2456. }
  2457. static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
  2458. {
  2459. struct ixgbe_hw *hw = &adapter->hw;
  2460. if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
  2461. (eicr & IXGBE_EICR_GPI_SDP1(hw))) {
  2462. e_crit(probe, "Fan has stopped, replace the adapter\n");
  2463. /* write to clear the interrupt */
  2464. IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
  2465. }
  2466. }
  2467. static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr)
  2468. {
  2469. struct ixgbe_hw *hw = &adapter->hw;
  2470. if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE))
  2471. return;
  2472. switch (adapter->hw.mac.type) {
  2473. case ixgbe_mac_82599EB:
  2474. /*
  2475. * Need to check link state so complete overtemp check
  2476. * on service task
  2477. */
  2478. if (((eicr & IXGBE_EICR_GPI_SDP0(hw)) ||
  2479. (eicr & IXGBE_EICR_LSC)) &&
  2480. (!test_bit(__IXGBE_DOWN, &adapter->state))) {
  2481. adapter->interrupt_event = eicr;
  2482. adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
  2483. ixgbe_service_event_schedule(adapter);
  2484. return;
  2485. }
  2486. return;
  2487. case ixgbe_mac_x550em_a:
  2488. if (eicr & IXGBE_EICR_GPI_SDP0_X550EM_a) {
  2489. adapter->interrupt_event = eicr;
  2490. adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
  2491. ixgbe_service_event_schedule(adapter);
  2492. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
  2493. IXGBE_EICR_GPI_SDP0_X550EM_a);
  2494. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICR,
  2495. IXGBE_EICR_GPI_SDP0_X550EM_a);
  2496. }
  2497. return;
  2498. case ixgbe_mac_X550:
  2499. case ixgbe_mac_X540:
  2500. if (!(eicr & IXGBE_EICR_TS))
  2501. return;
  2502. break;
  2503. default:
  2504. return;
  2505. }
  2506. e_crit(drv, "%s\n", ixgbe_overheat_msg);
  2507. }
  2508. static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
  2509. {
  2510. switch (hw->mac.type) {
  2511. case ixgbe_mac_82598EB:
  2512. if (hw->phy.type == ixgbe_phy_nl)
  2513. return true;
  2514. return false;
  2515. case ixgbe_mac_82599EB:
  2516. case ixgbe_mac_X550EM_x:
  2517. case ixgbe_mac_x550em_a:
  2518. switch (hw->mac.ops.get_media_type(hw)) {
  2519. case ixgbe_media_type_fiber:
  2520. case ixgbe_media_type_fiber_qsfp:
  2521. return true;
  2522. default:
  2523. return false;
  2524. }
  2525. default:
  2526. return false;
  2527. }
  2528. }
  2529. static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
  2530. {
  2531. struct ixgbe_hw *hw = &adapter->hw;
  2532. u32 eicr_mask = IXGBE_EICR_GPI_SDP2(hw);
  2533. if (!ixgbe_is_sfp(hw))
  2534. return;
  2535. /* Later MAC's use different SDP */
  2536. if (hw->mac.type >= ixgbe_mac_X540)
  2537. eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
  2538. if (eicr & eicr_mask) {
  2539. /* Clear the interrupt */
  2540. IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
  2541. if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
  2542. adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
  2543. adapter->sfp_poll_time = 0;
  2544. ixgbe_service_event_schedule(adapter);
  2545. }
  2546. }
  2547. if (adapter->hw.mac.type == ixgbe_mac_82599EB &&
  2548. (eicr & IXGBE_EICR_GPI_SDP1(hw))) {
  2549. /* Clear the interrupt */
  2550. IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
  2551. if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
  2552. adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
  2553. ixgbe_service_event_schedule(adapter);
  2554. }
  2555. }
  2556. }
  2557. static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
  2558. {
  2559. struct ixgbe_hw *hw = &adapter->hw;
  2560. adapter->lsc_int++;
  2561. adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
  2562. adapter->link_check_timeout = jiffies;
  2563. if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
  2564. IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
  2565. IXGBE_WRITE_FLUSH(hw);
  2566. ixgbe_service_event_schedule(adapter);
  2567. }
  2568. }
  2569. static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
  2570. u64 qmask)
  2571. {
  2572. u32 mask;
  2573. struct ixgbe_hw *hw = &adapter->hw;
  2574. switch (hw->mac.type) {
  2575. case ixgbe_mac_82598EB:
  2576. mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
  2577. IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
  2578. break;
  2579. case ixgbe_mac_82599EB:
  2580. case ixgbe_mac_X540:
  2581. case ixgbe_mac_X550:
  2582. case ixgbe_mac_X550EM_x:
  2583. case ixgbe_mac_x550em_a:
  2584. mask = (qmask & 0xFFFFFFFF);
  2585. if (mask)
  2586. IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
  2587. mask = (qmask >> 32);
  2588. if (mask)
  2589. IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
  2590. break;
  2591. default:
  2592. break;
  2593. }
  2594. /* skip the flush */
  2595. }
  2596. static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
  2597. u64 qmask)
  2598. {
  2599. u32 mask;
  2600. struct ixgbe_hw *hw = &adapter->hw;
  2601. switch (hw->mac.type) {
  2602. case ixgbe_mac_82598EB:
  2603. mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
  2604. IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
  2605. break;
  2606. case ixgbe_mac_82599EB:
  2607. case ixgbe_mac_X540:
  2608. case ixgbe_mac_X550:
  2609. case ixgbe_mac_X550EM_x:
  2610. case ixgbe_mac_x550em_a:
  2611. mask = (qmask & 0xFFFFFFFF);
  2612. if (mask)
  2613. IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
  2614. mask = (qmask >> 32);
  2615. if (mask)
  2616. IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
  2617. break;
  2618. default:
  2619. break;
  2620. }
  2621. /* skip the flush */
  2622. }
  2623. /**
  2624. * ixgbe_irq_enable - Enable default interrupt generation settings
  2625. * @adapter: board private structure
  2626. **/
  2627. static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
  2628. bool flush)
  2629. {
  2630. struct ixgbe_hw *hw = &adapter->hw;
  2631. u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
  2632. /* don't reenable LSC while waiting for link */
  2633. if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
  2634. mask &= ~IXGBE_EIMS_LSC;
  2635. if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
  2636. switch (adapter->hw.mac.type) {
  2637. case ixgbe_mac_82599EB:
  2638. mask |= IXGBE_EIMS_GPI_SDP0(hw);
  2639. break;
  2640. case ixgbe_mac_X540:
  2641. case ixgbe_mac_X550:
  2642. case ixgbe_mac_X550EM_x:
  2643. case ixgbe_mac_x550em_a:
  2644. mask |= IXGBE_EIMS_TS;
  2645. break;
  2646. default:
  2647. break;
  2648. }
  2649. if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
  2650. mask |= IXGBE_EIMS_GPI_SDP1(hw);
  2651. switch (adapter->hw.mac.type) {
  2652. case ixgbe_mac_82599EB:
  2653. mask |= IXGBE_EIMS_GPI_SDP1(hw);
  2654. mask |= IXGBE_EIMS_GPI_SDP2(hw);
  2655. /* fall through */
  2656. case ixgbe_mac_X540:
  2657. case ixgbe_mac_X550:
  2658. case ixgbe_mac_X550EM_x:
  2659. case ixgbe_mac_x550em_a:
  2660. if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
  2661. adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
  2662. adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP_N)
  2663. mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw);
  2664. if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t)
  2665. mask |= IXGBE_EICR_GPI_SDP0_X540;
  2666. mask |= IXGBE_EIMS_ECC;
  2667. mask |= IXGBE_EIMS_MAILBOX;
  2668. break;
  2669. default:
  2670. break;
  2671. }
  2672. if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
  2673. !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
  2674. mask |= IXGBE_EIMS_FLOW_DIR;
  2675. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
  2676. if (queues)
  2677. ixgbe_irq_enable_queues(adapter, ~0);
  2678. if (flush)
  2679. IXGBE_WRITE_FLUSH(&adapter->hw);
  2680. }
  2681. static irqreturn_t ixgbe_msix_other(int irq, void *data)
  2682. {
  2683. struct ixgbe_adapter *adapter = data;
  2684. struct ixgbe_hw *hw = &adapter->hw;
  2685. u32 eicr;
  2686. /*
  2687. * Workaround for Silicon errata. Use clear-by-write instead
  2688. * of clear-by-read. Reading with EICS will return the
  2689. * interrupt causes without clearing, which later be done
  2690. * with the write to EICR.
  2691. */
  2692. eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
  2693. /* The lower 16bits of the EICR register are for the queue interrupts
  2694. * which should be masked here in order to not accidentally clear them if
  2695. * the bits are high when ixgbe_msix_other is called. There is a race
  2696. * condition otherwise which results in possible performance loss
  2697. * especially if the ixgbe_msix_other interrupt is triggering
  2698. * consistently (as it would when PPS is turned on for the X540 device)
  2699. */
  2700. eicr &= 0xFFFF0000;
  2701. IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
  2702. if (eicr & IXGBE_EICR_LSC)
  2703. ixgbe_check_lsc(adapter);
  2704. if (eicr & IXGBE_EICR_MAILBOX)
  2705. ixgbe_msg_task(adapter);
  2706. switch (hw->mac.type) {
  2707. case ixgbe_mac_82599EB:
  2708. case ixgbe_mac_X540:
  2709. case ixgbe_mac_X550:
  2710. case ixgbe_mac_X550EM_x:
  2711. case ixgbe_mac_x550em_a:
  2712. if (hw->phy.type == ixgbe_phy_x550em_ext_t &&
  2713. (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
  2714. adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT;
  2715. ixgbe_service_event_schedule(adapter);
  2716. IXGBE_WRITE_REG(hw, IXGBE_EICR,
  2717. IXGBE_EICR_GPI_SDP0_X540);
  2718. }
  2719. if (eicr & IXGBE_EICR_ECC) {
  2720. e_info(link, "Received ECC Err, initiating reset\n");
  2721. set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
  2722. ixgbe_service_event_schedule(adapter);
  2723. IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
  2724. }
  2725. /* Handle Flow Director Full threshold interrupt */
  2726. if (eicr & IXGBE_EICR_FLOW_DIR) {
  2727. int reinit_count = 0;
  2728. int i;
  2729. for (i = 0; i < adapter->num_tx_queues; i++) {
  2730. struct ixgbe_ring *ring = adapter->tx_ring[i];
  2731. if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
  2732. &ring->state))
  2733. reinit_count++;
  2734. }
  2735. if (reinit_count) {
  2736. /* no more flow director interrupts until after init */
  2737. IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
  2738. adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
  2739. ixgbe_service_event_schedule(adapter);
  2740. }
  2741. }
  2742. ixgbe_check_sfp_event(adapter, eicr);
  2743. ixgbe_check_overtemp_event(adapter, eicr);
  2744. break;
  2745. default:
  2746. break;
  2747. }
  2748. ixgbe_check_fan_failure(adapter, eicr);
  2749. if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
  2750. ixgbe_ptp_check_pps_event(adapter);
  2751. /* re-enable the original interrupt state, no lsc, no queues */
  2752. if (!test_bit(__IXGBE_DOWN, &adapter->state))
  2753. ixgbe_irq_enable(adapter, false, false);
  2754. return IRQ_HANDLED;
  2755. }
  2756. static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data)
  2757. {
  2758. struct ixgbe_q_vector *q_vector = data;
  2759. /* EIAM disabled interrupts (on this vector) for us */
  2760. if (q_vector->rx.ring || q_vector->tx.ring)
  2761. napi_schedule_irqoff(&q_vector->napi);
  2762. return IRQ_HANDLED;
  2763. }
  2764. /**
  2765. * ixgbe_poll - NAPI Rx polling callback
  2766. * @napi: structure for representing this polling device
  2767. * @budget: how many packets driver is allowed to clean
  2768. *
  2769. * This function is used for legacy and MSI, NAPI mode
  2770. **/
  2771. int ixgbe_poll(struct napi_struct *napi, int budget)
  2772. {
  2773. struct ixgbe_q_vector *q_vector =
  2774. container_of(napi, struct ixgbe_q_vector, napi);
  2775. struct ixgbe_adapter *adapter = q_vector->adapter;
  2776. struct ixgbe_ring *ring;
  2777. int per_ring_budget, work_done = 0;
  2778. bool clean_complete = true;
  2779. #ifdef CONFIG_IXGBE_DCA
  2780. if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
  2781. ixgbe_update_dca(q_vector);
  2782. #endif
  2783. ixgbe_for_each_ring(ring, q_vector->tx) {
  2784. if (!ixgbe_clean_tx_irq(q_vector, ring, budget))
  2785. clean_complete = false;
  2786. }
  2787. /* Exit if we are called by netpoll */
  2788. if (budget <= 0)
  2789. return budget;
  2790. /* attempt to distribute budget to each queue fairly, but don't allow
  2791. * the budget to go below 1 because we'll exit polling */
  2792. if (q_vector->rx.count > 1)
  2793. per_ring_budget = max(budget/q_vector->rx.count, 1);
  2794. else
  2795. per_ring_budget = budget;
  2796. ixgbe_for_each_ring(ring, q_vector->rx) {
  2797. int cleaned = ixgbe_clean_rx_irq(q_vector, ring,
  2798. per_ring_budget);
  2799. work_done += cleaned;
  2800. if (cleaned >= per_ring_budget)
  2801. clean_complete = false;
  2802. }
  2803. /* If all work not completed, return budget and keep polling */
  2804. if (!clean_complete)
  2805. return budget;
  2806. /* all work done, exit the polling mode */
  2807. napi_complete_done(napi, work_done);
  2808. if (adapter->rx_itr_setting & 1)
  2809. ixgbe_set_itr(q_vector);
  2810. if (!test_bit(__IXGBE_DOWN, &adapter->state))
  2811. ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx));
  2812. return min(work_done, budget - 1);
  2813. }
  2814. /**
  2815. * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
  2816. * @adapter: board private structure
  2817. *
  2818. * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
  2819. * interrupts from the kernel.
  2820. **/
  2821. static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
  2822. {
  2823. struct net_device *netdev = adapter->netdev;
  2824. unsigned int ri = 0, ti = 0;
  2825. int vector, err;
  2826. for (vector = 0; vector < adapter->num_q_vectors; vector++) {
  2827. struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
  2828. struct msix_entry *entry = &adapter->msix_entries[vector];
  2829. if (q_vector->tx.ring && q_vector->rx.ring) {
  2830. snprintf(q_vector->name, sizeof(q_vector->name),
  2831. "%s-TxRx-%u", netdev->name, ri++);
  2832. ti++;
  2833. } else if (q_vector->rx.ring) {
  2834. snprintf(q_vector->name, sizeof(q_vector->name),
  2835. "%s-rx-%u", netdev->name, ri++);
  2836. } else if (q_vector->tx.ring) {
  2837. snprintf(q_vector->name, sizeof(q_vector->name),
  2838. "%s-tx-%u", netdev->name, ti++);
  2839. } else {
  2840. /* skip this unused q_vector */
  2841. continue;
  2842. }
  2843. err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0,
  2844. q_vector->name, q_vector);
  2845. if (err) {
  2846. e_err(probe, "request_irq failed for MSIX interrupt "
  2847. "Error: %d\n", err);
  2848. goto free_queue_irqs;
  2849. }
  2850. /* If Flow Director is enabled, set interrupt affinity */
  2851. if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
  2852. /* assign the mask for this irq */
  2853. irq_set_affinity_hint(entry->vector,
  2854. &q_vector->affinity_mask);
  2855. }
  2856. }
  2857. err = request_irq(adapter->msix_entries[vector].vector,
  2858. ixgbe_msix_other, 0, netdev->name, adapter);
  2859. if (err) {
  2860. e_err(probe, "request_irq for msix_other failed: %d\n", err);
  2861. goto free_queue_irqs;
  2862. }
  2863. return 0;
  2864. free_queue_irqs:
  2865. while (vector) {
  2866. vector--;
  2867. irq_set_affinity_hint(adapter->msix_entries[vector].vector,
  2868. NULL);
  2869. free_irq(adapter->msix_entries[vector].vector,
  2870. adapter->q_vector[vector]);
  2871. }
  2872. adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
  2873. pci_disable_msix(adapter->pdev);
  2874. kfree(adapter->msix_entries);
  2875. adapter->msix_entries = NULL;
  2876. return err;
  2877. }
  2878. /**
  2879. * ixgbe_intr - legacy mode Interrupt Handler
  2880. * @irq: interrupt number
  2881. * @data: pointer to a network interface device structure
  2882. **/
  2883. static irqreturn_t ixgbe_intr(int irq, void *data)
  2884. {
  2885. struct ixgbe_adapter *adapter = data;
  2886. struct ixgbe_hw *hw = &adapter->hw;
  2887. struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
  2888. u32 eicr;
  2889. /*
  2890. * Workaround for silicon errata #26 on 82598. Mask the interrupt
  2891. * before the read of EICR.
  2892. */
  2893. IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
  2894. /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
  2895. * therefore no explicit interrupt disable is necessary */
  2896. eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
  2897. if (!eicr) {
  2898. /*
  2899. * shared interrupt alert!
  2900. * make sure interrupts are enabled because the read will
  2901. * have disabled interrupts due to EIAM
  2902. * finish the workaround of silicon errata on 82598. Unmask
  2903. * the interrupt that we masked before the EICR read.
  2904. */
  2905. if (!test_bit(__IXGBE_DOWN, &adapter->state))
  2906. ixgbe_irq_enable(adapter, true, true);
  2907. return IRQ_NONE; /* Not our interrupt */
  2908. }
  2909. if (eicr & IXGBE_EICR_LSC)
  2910. ixgbe_check_lsc(adapter);
  2911. switch (hw->mac.type) {
  2912. case ixgbe_mac_82599EB:
  2913. ixgbe_check_sfp_event(adapter, eicr);
  2914. /* Fall through */
  2915. case ixgbe_mac_X540:
  2916. case ixgbe_mac_X550:
  2917. case ixgbe_mac_X550EM_x:
  2918. case ixgbe_mac_x550em_a:
  2919. if (eicr & IXGBE_EICR_ECC) {
  2920. e_info(link, "Received ECC Err, initiating reset\n");
  2921. set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
  2922. ixgbe_service_event_schedule(adapter);
  2923. IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
  2924. }
  2925. ixgbe_check_overtemp_event(adapter, eicr);
  2926. break;
  2927. default:
  2928. break;
  2929. }
  2930. ixgbe_check_fan_failure(adapter, eicr);
  2931. if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
  2932. ixgbe_ptp_check_pps_event(adapter);
  2933. /* would disable interrupts here but EIAM disabled it */
  2934. napi_schedule_irqoff(&q_vector->napi);
  2935. /*
  2936. * re-enable link(maybe) and non-queue interrupts, no flush.
  2937. * ixgbe_poll will re-enable the queue interrupts
  2938. */
  2939. if (!test_bit(__IXGBE_DOWN, &adapter->state))
  2940. ixgbe_irq_enable(adapter, false, false);
  2941. return IRQ_HANDLED;
  2942. }
  2943. /**
  2944. * ixgbe_request_irq - initialize interrupts
  2945. * @adapter: board private structure
  2946. *
  2947. * Attempts to configure interrupts using the best available
  2948. * capabilities of the hardware and kernel.
  2949. **/
  2950. static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
  2951. {
  2952. struct net_device *netdev = adapter->netdev;
  2953. int err;
  2954. if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
  2955. err = ixgbe_request_msix_irqs(adapter);
  2956. else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED)
  2957. err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
  2958. netdev->name, adapter);
  2959. else
  2960. err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
  2961. netdev->name, adapter);
  2962. if (err)
  2963. e_err(probe, "request_irq failed, Error %d\n", err);
  2964. return err;
  2965. }
  2966. static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
  2967. {
  2968. int vector;
  2969. if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
  2970. free_irq(adapter->pdev->irq, adapter);
  2971. return;
  2972. }
  2973. if (!adapter->msix_entries)
  2974. return;
  2975. for (vector = 0; vector < adapter->num_q_vectors; vector++) {
  2976. struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
  2977. struct msix_entry *entry = &adapter->msix_entries[vector];
  2978. /* free only the irqs that were actually requested */
  2979. if (!q_vector->rx.ring && !q_vector->tx.ring)
  2980. continue;
  2981. /* clear the affinity_mask in the IRQ descriptor */
  2982. irq_set_affinity_hint(entry->vector, NULL);
  2983. free_irq(entry->vector, q_vector);
  2984. }
  2985. free_irq(adapter->msix_entries[vector].vector, adapter);
  2986. }
  2987. /**
  2988. * ixgbe_irq_disable - Mask off interrupt generation on the NIC
  2989. * @adapter: board private structure
  2990. **/
  2991. static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
  2992. {
  2993. switch (adapter->hw.mac.type) {
  2994. case ixgbe_mac_82598EB:
  2995. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
  2996. break;
  2997. case ixgbe_mac_82599EB:
  2998. case ixgbe_mac_X540:
  2999. case ixgbe_mac_X550:
  3000. case ixgbe_mac_X550EM_x:
  3001. case ixgbe_mac_x550em_a:
  3002. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
  3003. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
  3004. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
  3005. break;
  3006. default:
  3007. break;
  3008. }
  3009. IXGBE_WRITE_FLUSH(&adapter->hw);
  3010. if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
  3011. int vector;
  3012. for (vector = 0; vector < adapter->num_q_vectors; vector++)
  3013. synchronize_irq(adapter->msix_entries[vector].vector);
  3014. synchronize_irq(adapter->msix_entries[vector++].vector);
  3015. } else {
  3016. synchronize_irq(adapter->pdev->irq);
  3017. }
  3018. }
  3019. /**
  3020. * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
  3021. *
  3022. **/
  3023. static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
  3024. {
  3025. struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
  3026. ixgbe_write_eitr(q_vector);
  3027. ixgbe_set_ivar(adapter, 0, 0, 0);
  3028. ixgbe_set_ivar(adapter, 1, 0, 0);
  3029. e_info(hw, "Legacy interrupt IVAR setup done\n");
  3030. }
  3031. /**
  3032. * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset
  3033. * @adapter: board private structure
  3034. * @ring: structure containing ring specific data
  3035. *
  3036. * Configure the Tx descriptor ring after a reset.
  3037. **/
  3038. void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
  3039. struct ixgbe_ring *ring)
  3040. {
  3041. struct ixgbe_hw *hw = &adapter->hw;
  3042. u64 tdba = ring->dma;
  3043. int wait_loop = 10;
  3044. u32 txdctl = IXGBE_TXDCTL_ENABLE;
  3045. u8 reg_idx = ring->reg_idx;
  3046. /* disable queue to avoid issues while updating state */
  3047. IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
  3048. IXGBE_WRITE_FLUSH(hw);
  3049. IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
  3050. (tdba & DMA_BIT_MASK(32)));
  3051. IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
  3052. IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
  3053. ring->count * sizeof(union ixgbe_adv_tx_desc));
  3054. IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
  3055. IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
  3056. ring->tail = adapter->io_addr + IXGBE_TDT(reg_idx);
  3057. /*
  3058. * set WTHRESH to encourage burst writeback, it should not be set
  3059. * higher than 1 when:
  3060. * - ITR is 0 as it could cause false TX hangs
  3061. * - ITR is set to > 100k int/sec and BQL is enabled
  3062. *
  3063. * In order to avoid issues WTHRESH + PTHRESH should always be equal
  3064. * to or less than the number of on chip descriptors, which is
  3065. * currently 40.
  3066. */
  3067. if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
  3068. txdctl |= 1u << 16; /* WTHRESH = 1 */
  3069. else
  3070. txdctl |= 8u << 16; /* WTHRESH = 8 */
  3071. /*
  3072. * Setting PTHRESH to 32 both improves performance
  3073. * and avoids a TX hang with DFP enabled
  3074. */
  3075. txdctl |= (1u << 8) | /* HTHRESH = 1 */
  3076. 32; /* PTHRESH = 32 */
  3077. /* reinitialize flowdirector state */
  3078. if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
  3079. ring->atr_sample_rate = adapter->atr_sample_rate;
  3080. ring->atr_count = 0;
  3081. set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
  3082. } else {
  3083. ring->atr_sample_rate = 0;
  3084. }
  3085. /* initialize XPS */
  3086. if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) {
  3087. struct ixgbe_q_vector *q_vector = ring->q_vector;
  3088. if (q_vector)
  3089. netif_set_xps_queue(ring->netdev,
  3090. &q_vector->affinity_mask,
  3091. ring->queue_index);
  3092. }
  3093. clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
  3094. /* reinitialize tx_buffer_info */
  3095. memset(ring->tx_buffer_info, 0,
  3096. sizeof(struct ixgbe_tx_buffer) * ring->count);
  3097. /* enable queue */
  3098. IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
  3099. /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */
  3100. if (hw->mac.type == ixgbe_mac_82598EB &&
  3101. !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
  3102. return;
  3103. /* poll to verify queue is enabled */
  3104. do {
  3105. usleep_range(1000, 2000);
  3106. txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
  3107. } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
  3108. if (!wait_loop)
  3109. hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx);
  3110. }
  3111. static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
  3112. {
  3113. struct ixgbe_hw *hw = &adapter->hw;
  3114. u32 rttdcs, mtqc;
  3115. u8 tcs = netdev_get_num_tc(adapter->netdev);
  3116. if (hw->mac.type == ixgbe_mac_82598EB)
  3117. return;
  3118. /* disable the arbiter while setting MTQC */
  3119. rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
  3120. rttdcs |= IXGBE_RTTDCS_ARBDIS;
  3121. IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
  3122. /* set transmit pool layout */
  3123. if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
  3124. mtqc = IXGBE_MTQC_VT_ENA;
  3125. if (tcs > 4)
  3126. mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
  3127. else if (tcs > 1)
  3128. mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
  3129. else if (adapter->ring_feature[RING_F_VMDQ].mask ==
  3130. IXGBE_82599_VMDQ_4Q_MASK)
  3131. mtqc |= IXGBE_MTQC_32VF;
  3132. else
  3133. mtqc |= IXGBE_MTQC_64VF;
  3134. } else {
  3135. if (tcs > 4)
  3136. mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
  3137. else if (tcs > 1)
  3138. mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
  3139. else
  3140. mtqc = IXGBE_MTQC_64Q_1PB;
  3141. }
  3142. IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
  3143. /* Enable Security TX Buffer IFG for multiple pb */
  3144. if (tcs) {
  3145. u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
  3146. sectx |= IXGBE_SECTX_DCB;
  3147. IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx);
  3148. }
  3149. /* re-enable the arbiter */
  3150. rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
  3151. IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
  3152. }
  3153. /**
  3154. * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
  3155. * @adapter: board private structure
  3156. *
  3157. * Configure the Tx unit of the MAC after a reset.
  3158. **/
  3159. static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
  3160. {
  3161. struct ixgbe_hw *hw = &adapter->hw;
  3162. u32 dmatxctl;
  3163. u32 i;
  3164. ixgbe_setup_mtqc(adapter);
  3165. if (hw->mac.type != ixgbe_mac_82598EB) {
  3166. /* DMATXCTL.EN must be before Tx queues are enabled */
  3167. dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
  3168. dmatxctl |= IXGBE_DMATXCTL_TE;
  3169. IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
  3170. }
  3171. /* Setup the HW Tx Head and Tail descriptor pointers */
  3172. for (i = 0; i < adapter->num_tx_queues; i++)
  3173. ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
  3174. for (i = 0; i < adapter->num_xdp_queues; i++)
  3175. ixgbe_configure_tx_ring(adapter, adapter->xdp_ring[i]);
  3176. }
  3177. static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter,
  3178. struct ixgbe_ring *ring)
  3179. {
  3180. struct ixgbe_hw *hw = &adapter->hw;
  3181. u8 reg_idx = ring->reg_idx;
  3182. u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
  3183. srrctl |= IXGBE_SRRCTL_DROP_EN;
  3184. IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
  3185. }
  3186. static void ixgbe_disable_rx_drop(struct ixgbe_adapter *adapter,
  3187. struct ixgbe_ring *ring)
  3188. {
  3189. struct ixgbe_hw *hw = &adapter->hw;
  3190. u8 reg_idx = ring->reg_idx;
  3191. u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
  3192. srrctl &= ~IXGBE_SRRCTL_DROP_EN;
  3193. IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
  3194. }
  3195. #ifdef CONFIG_IXGBE_DCB
  3196. void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
  3197. #else
  3198. static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
  3199. #endif
  3200. {
  3201. int i;
  3202. bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
  3203. if (adapter->ixgbe_ieee_pfc)
  3204. pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
  3205. /*
  3206. * We should set the drop enable bit if:
  3207. * SR-IOV is enabled
  3208. * or
  3209. * Number of Rx queues > 1 and flow control is disabled
  3210. *
  3211. * This allows us to avoid head of line blocking for security
  3212. * and performance reasons.
  3213. */
  3214. if (adapter->num_vfs || (adapter->num_rx_queues > 1 &&
  3215. !(adapter->hw.fc.current_mode & ixgbe_fc_tx_pause) && !pfc_en)) {
  3216. for (i = 0; i < adapter->num_rx_queues; i++)
  3217. ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]);
  3218. } else {
  3219. for (i = 0; i < adapter->num_rx_queues; i++)
  3220. ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]);
  3221. }
  3222. }
  3223. #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
  3224. static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
  3225. struct ixgbe_ring *rx_ring)
  3226. {
  3227. struct ixgbe_hw *hw = &adapter->hw;
  3228. u32 srrctl;
  3229. u8 reg_idx = rx_ring->reg_idx;
  3230. if (hw->mac.type == ixgbe_mac_82598EB) {
  3231. u16 mask = adapter->ring_feature[RING_F_RSS].mask;
  3232. /*
  3233. * if VMDq is not active we must program one srrctl register
  3234. * per RSS queue since we have enabled RDRXCTL.MVMEN
  3235. */
  3236. reg_idx &= mask;
  3237. }
  3238. /* configure header buffer length, needed for RSC */
  3239. srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
  3240. /* configure the packet buffer length */
  3241. if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state))
  3242. srrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
  3243. else
  3244. srrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
  3245. /* configure descriptor type */
  3246. srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
  3247. IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
  3248. }
  3249. /**
  3250. * ixgbe_rss_indir_tbl_entries - Return RSS indirection table entries
  3251. * @adapter: device handle
  3252. *
  3253. * - 82598/82599/X540: 128
  3254. * - X550(non-SRIOV mode): 512
  3255. * - X550(SRIOV mode): 64
  3256. */
  3257. u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter)
  3258. {
  3259. if (adapter->hw.mac.type < ixgbe_mac_X550)
  3260. return 128;
  3261. else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
  3262. return 64;
  3263. else
  3264. return 512;
  3265. }
  3266. /**
  3267. * ixgbe_store_key - Write the RSS key to HW
  3268. * @adapter: device handle
  3269. *
  3270. * Write the RSS key stored in adapter.rss_key to HW.
  3271. */
  3272. void ixgbe_store_key(struct ixgbe_adapter *adapter)
  3273. {
  3274. struct ixgbe_hw *hw = &adapter->hw;
  3275. int i;
  3276. for (i = 0; i < 10; i++)
  3277. IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]);
  3278. }
  3279. /**
  3280. * ixgbe_init_rss_key - Initialize adapter RSS key
  3281. * @adapter: device handle
  3282. *
  3283. * Allocates and initializes the RSS key if it is not allocated.
  3284. **/
  3285. static inline int ixgbe_init_rss_key(struct ixgbe_adapter *adapter)
  3286. {
  3287. u32 *rss_key;
  3288. if (!adapter->rss_key) {
  3289. rss_key = kzalloc(IXGBE_RSS_KEY_SIZE, GFP_KERNEL);
  3290. if (unlikely(!rss_key))
  3291. return -ENOMEM;
  3292. netdev_rss_key_fill(rss_key, IXGBE_RSS_KEY_SIZE);
  3293. adapter->rss_key = rss_key;
  3294. }
  3295. return 0;
  3296. }
  3297. /**
  3298. * ixgbe_store_reta - Write the RETA table to HW
  3299. * @adapter: device handle
  3300. *
  3301. * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW.
  3302. */
  3303. void ixgbe_store_reta(struct ixgbe_adapter *adapter)
  3304. {
  3305. u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
  3306. struct ixgbe_hw *hw = &adapter->hw;
  3307. u32 reta = 0;
  3308. u32 indices_multi;
  3309. u8 *indir_tbl = adapter->rss_indir_tbl;
  3310. /* Fill out the redirection table as follows:
  3311. * - 82598: 8 bit wide entries containing pair of 4 bit RSS
  3312. * indices.
  3313. * - 82599/X540: 8 bit wide entries containing 4 bit RSS index
  3314. * - X550: 8 bit wide entries containing 6 bit RSS index
  3315. */
  3316. if (adapter->hw.mac.type == ixgbe_mac_82598EB)
  3317. indices_multi = 0x11;
  3318. else
  3319. indices_multi = 0x1;
  3320. /* Write redirection table to HW */
  3321. for (i = 0; i < reta_entries; i++) {
  3322. reta |= indices_multi * indir_tbl[i] << (i & 0x3) * 8;
  3323. if ((i & 3) == 3) {
  3324. if (i < 128)
  3325. IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
  3326. else
  3327. IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
  3328. reta);
  3329. reta = 0;
  3330. }
  3331. }
  3332. }
  3333. /**
  3334. * ixgbe_store_vfreta - Write the RETA table to HW (x550 devices in SRIOV mode)
  3335. * @adapter: device handle
  3336. *
  3337. * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW.
  3338. */
  3339. static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter)
  3340. {
  3341. u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
  3342. struct ixgbe_hw *hw = &adapter->hw;
  3343. u32 vfreta = 0;
  3344. unsigned int pf_pool = adapter->num_vfs;
  3345. /* Write redirection table to HW */
  3346. for (i = 0; i < reta_entries; i++) {
  3347. vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8;
  3348. if ((i & 3) == 3) {
  3349. IXGBE_WRITE_REG(hw, IXGBE_PFVFRETA(i >> 2, pf_pool),
  3350. vfreta);
  3351. vfreta = 0;
  3352. }
  3353. }
  3354. }
  3355. static void ixgbe_setup_reta(struct ixgbe_adapter *adapter)
  3356. {
  3357. u32 i, j;
  3358. u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
  3359. u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
  3360. /* Program table for at least 4 queues w/ SR-IOV so that VFs can
  3361. * make full use of any rings they may have. We will use the
  3362. * PSRTYPE register to control how many rings we use within the PF.
  3363. */
  3364. if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 4))
  3365. rss_i = 4;
  3366. /* Fill out hash function seeds */
  3367. ixgbe_store_key(adapter);
  3368. /* Fill out redirection table */
  3369. memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl));
  3370. for (i = 0, j = 0; i < reta_entries; i++, j++) {
  3371. if (j == rss_i)
  3372. j = 0;
  3373. adapter->rss_indir_tbl[i] = j;
  3374. }
  3375. ixgbe_store_reta(adapter);
  3376. }
  3377. static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter)
  3378. {
  3379. struct ixgbe_hw *hw = &adapter->hw;
  3380. u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
  3381. unsigned int pf_pool = adapter->num_vfs;
  3382. int i, j;
  3383. /* Fill out hash function seeds */
  3384. for (i = 0; i < 10; i++)
  3385. IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool),
  3386. *(adapter->rss_key + i));
  3387. /* Fill out the redirection table */
  3388. for (i = 0, j = 0; i < 64; i++, j++) {
  3389. if (j == rss_i)
  3390. j = 0;
  3391. adapter->rss_indir_tbl[i] = j;
  3392. }
  3393. ixgbe_store_vfreta(adapter);
  3394. }
  3395. static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
  3396. {
  3397. struct ixgbe_hw *hw = &adapter->hw;
  3398. u32 mrqc = 0, rss_field = 0, vfmrqc = 0;
  3399. u32 rxcsum;
  3400. /* Disable indicating checksum in descriptor, enables RSS hash */
  3401. rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
  3402. rxcsum |= IXGBE_RXCSUM_PCSD;
  3403. IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
  3404. if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
  3405. if (adapter->ring_feature[RING_F_RSS].mask)
  3406. mrqc = IXGBE_MRQC_RSSEN;
  3407. } else {
  3408. u8 tcs = netdev_get_num_tc(adapter->netdev);
  3409. if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
  3410. if (tcs > 4)
  3411. mrqc = IXGBE_MRQC_VMDQRT8TCEN; /* 8 TCs */
  3412. else if (tcs > 1)
  3413. mrqc = IXGBE_MRQC_VMDQRT4TCEN; /* 4 TCs */
  3414. else if (adapter->ring_feature[RING_F_VMDQ].mask ==
  3415. IXGBE_82599_VMDQ_4Q_MASK)
  3416. mrqc = IXGBE_MRQC_VMDQRSS32EN;
  3417. else
  3418. mrqc = IXGBE_MRQC_VMDQRSS64EN;
  3419. /* Enable L3/L4 for Tx Switched packets */
  3420. mrqc |= IXGBE_MRQC_L3L4TXSWEN;
  3421. } else {
  3422. if (tcs > 4)
  3423. mrqc = IXGBE_MRQC_RTRSS8TCEN;
  3424. else if (tcs > 1)
  3425. mrqc = IXGBE_MRQC_RTRSS4TCEN;
  3426. else
  3427. mrqc = IXGBE_MRQC_RSSEN;
  3428. }
  3429. }
  3430. /* Perform hash on these packet types */
  3431. rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4 |
  3432. IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
  3433. IXGBE_MRQC_RSS_FIELD_IPV6 |
  3434. IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
  3435. if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
  3436. rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
  3437. if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
  3438. rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
  3439. if ((hw->mac.type >= ixgbe_mac_X550) &&
  3440. (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
  3441. unsigned int pf_pool = adapter->num_vfs;
  3442. /* Enable VF RSS mode */
  3443. mrqc |= IXGBE_MRQC_MULTIPLE_RSS;
  3444. IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
  3445. /* Setup RSS through the VF registers */
  3446. ixgbe_setup_vfreta(adapter);
  3447. vfmrqc = IXGBE_MRQC_RSSEN;
  3448. vfmrqc |= rss_field;
  3449. IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), vfmrqc);
  3450. } else {
  3451. ixgbe_setup_reta(adapter);
  3452. mrqc |= rss_field;
  3453. IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
  3454. }
  3455. }
  3456. /**
  3457. * ixgbe_configure_rscctl - enable RSC for the indicated ring
  3458. * @adapter: address of board private structure
  3459. * @index: index of ring to set
  3460. **/
  3461. static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
  3462. struct ixgbe_ring *ring)
  3463. {
  3464. struct ixgbe_hw *hw = &adapter->hw;
  3465. u32 rscctrl;
  3466. u8 reg_idx = ring->reg_idx;
  3467. if (!ring_is_rsc_enabled(ring))
  3468. return;
  3469. rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
  3470. rscctrl |= IXGBE_RSCCTL_RSCEN;
  3471. /*
  3472. * we must limit the number of descriptors so that the
  3473. * total size of max desc * buf_len is not greater
  3474. * than 65536
  3475. */
  3476. rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
  3477. IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
  3478. }
  3479. #define IXGBE_MAX_RX_DESC_POLL 10
  3480. static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
  3481. struct ixgbe_ring *ring)
  3482. {
  3483. struct ixgbe_hw *hw = &adapter->hw;
  3484. int wait_loop = IXGBE_MAX_RX_DESC_POLL;
  3485. u32 rxdctl;
  3486. u8 reg_idx = ring->reg_idx;
  3487. if (ixgbe_removed(hw->hw_addr))
  3488. return;
  3489. /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
  3490. if (hw->mac.type == ixgbe_mac_82598EB &&
  3491. !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
  3492. return;
  3493. do {
  3494. usleep_range(1000, 2000);
  3495. rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
  3496. } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
  3497. if (!wait_loop) {
  3498. e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
  3499. "the polling period\n", reg_idx);
  3500. }
  3501. }
  3502. void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
  3503. struct ixgbe_ring *ring)
  3504. {
  3505. struct ixgbe_hw *hw = &adapter->hw;
  3506. int wait_loop = IXGBE_MAX_RX_DESC_POLL;
  3507. u32 rxdctl;
  3508. u8 reg_idx = ring->reg_idx;
  3509. if (ixgbe_removed(hw->hw_addr))
  3510. return;
  3511. rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
  3512. rxdctl &= ~IXGBE_RXDCTL_ENABLE;
  3513. /* write value back with RXDCTL.ENABLE bit cleared */
  3514. IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
  3515. if (hw->mac.type == ixgbe_mac_82598EB &&
  3516. !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
  3517. return;
  3518. /* the hardware may take up to 100us to really disable the rx queue */
  3519. do {
  3520. udelay(10);
  3521. rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
  3522. } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
  3523. if (!wait_loop) {
  3524. e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within "
  3525. "the polling period\n", reg_idx);
  3526. }
  3527. }
  3528. void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
  3529. struct ixgbe_ring *ring)
  3530. {
  3531. struct ixgbe_hw *hw = &adapter->hw;
  3532. union ixgbe_adv_rx_desc *rx_desc;
  3533. u64 rdba = ring->dma;
  3534. u32 rxdctl;
  3535. u8 reg_idx = ring->reg_idx;
  3536. /* disable queue to avoid issues while updating state */
  3537. rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
  3538. ixgbe_disable_rx_queue(adapter, ring);
  3539. IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
  3540. IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
  3541. IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
  3542. ring->count * sizeof(union ixgbe_adv_rx_desc));
  3543. /* Force flushing of IXGBE_RDLEN to prevent MDD */
  3544. IXGBE_WRITE_FLUSH(hw);
  3545. IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
  3546. IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
  3547. ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx);
  3548. ixgbe_configure_srrctl(adapter, ring);
  3549. ixgbe_configure_rscctl(adapter, ring);
  3550. if (hw->mac.type == ixgbe_mac_82598EB) {
  3551. /*
  3552. * enable cache line friendly hardware writes:
  3553. * PTHRESH=32 descriptors (half the internal cache),
  3554. * this also removes ugly rx_no_buffer_count increment
  3555. * HTHRESH=4 descriptors (to minimize latency on fetch)
  3556. * WTHRESH=8 burst writeback up to two cache lines
  3557. */
  3558. rxdctl &= ~0x3FFFFF;
  3559. rxdctl |= 0x080420;
  3560. #if (PAGE_SIZE < 8192)
  3561. } else {
  3562. rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
  3563. IXGBE_RXDCTL_RLPML_EN);
  3564. /* Limit the maximum frame size so we don't overrun the skb */
  3565. if (ring_uses_build_skb(ring) &&
  3566. !test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
  3567. rxdctl |= IXGBE_MAX_2K_FRAME_BUILD_SKB |
  3568. IXGBE_RXDCTL_RLPML_EN;
  3569. #endif
  3570. }
  3571. /* initialize rx_buffer_info */
  3572. memset(ring->rx_buffer_info, 0,
  3573. sizeof(struct ixgbe_rx_buffer) * ring->count);
  3574. /* initialize Rx descriptor 0 */
  3575. rx_desc = IXGBE_RX_DESC(ring, 0);
  3576. rx_desc->wb.upper.length = 0;
  3577. /* enable receive descriptor ring */
  3578. rxdctl |= IXGBE_RXDCTL_ENABLE;
  3579. IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
  3580. ixgbe_rx_desc_queue_enable(adapter, ring);
  3581. ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
  3582. }
  3583. static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
  3584. {
  3585. struct ixgbe_hw *hw = &adapter->hw;
  3586. int rss_i = adapter->ring_feature[RING_F_RSS].indices;
  3587. u16 pool;
  3588. /* PSRTYPE must be initialized in non 82598 adapters */
  3589. u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
  3590. IXGBE_PSRTYPE_UDPHDR |
  3591. IXGBE_PSRTYPE_IPV4HDR |
  3592. IXGBE_PSRTYPE_L2HDR |
  3593. IXGBE_PSRTYPE_IPV6HDR;
  3594. if (hw->mac.type == ixgbe_mac_82598EB)
  3595. return;
  3596. if (rss_i > 3)
  3597. psrtype |= 2u << 29;
  3598. else if (rss_i > 1)
  3599. psrtype |= 1u << 29;
  3600. for_each_set_bit(pool, &adapter->fwd_bitmask, 32)
  3601. IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
  3602. }
  3603. static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
  3604. {
  3605. struct ixgbe_hw *hw = &adapter->hw;
  3606. u32 reg_offset, vf_shift;
  3607. u32 gcr_ext, vmdctl;
  3608. int i;
  3609. if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
  3610. return;
  3611. vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
  3612. vmdctl |= IXGBE_VMD_CTL_VMDQ_EN;
  3613. vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
  3614. vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT;
  3615. vmdctl |= IXGBE_VT_CTL_REPLEN;
  3616. IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
  3617. vf_shift = VMDQ_P(0) % 32;
  3618. reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
  3619. /* Enable only the PF's pool for Tx/Rx */
  3620. IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), GENMASK(31, vf_shift));
  3621. IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
  3622. IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), GENMASK(31, vf_shift));
  3623. IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
  3624. if (adapter->bridge_mode == BRIDGE_MODE_VEB)
  3625. IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
  3626. /* Map PF MAC address in RAR Entry 0 to first pool following VFs */
  3627. hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0));
  3628. /* clear VLAN promisc flag so VFTA will be updated if necessary */
  3629. adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC;
  3630. /*
  3631. * Set up VF register offsets for selected VT Mode,
  3632. * i.e. 32 or 64 VFs for SR-IOV
  3633. */
  3634. switch (adapter->ring_feature[RING_F_VMDQ].mask) {
  3635. case IXGBE_82599_VMDQ_8Q_MASK:
  3636. gcr_ext = IXGBE_GCR_EXT_VT_MODE_16;
  3637. break;
  3638. case IXGBE_82599_VMDQ_4Q_MASK:
  3639. gcr_ext = IXGBE_GCR_EXT_VT_MODE_32;
  3640. break;
  3641. default:
  3642. gcr_ext = IXGBE_GCR_EXT_VT_MODE_64;
  3643. break;
  3644. }
  3645. IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
  3646. for (i = 0; i < adapter->num_vfs; i++) {
  3647. /* configure spoof checking */
  3648. ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i,
  3649. adapter->vfinfo[i].spoofchk_enabled);
  3650. /* Enable/Disable RSS query feature */
  3651. ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i,
  3652. adapter->vfinfo[i].rss_query_enabled);
  3653. }
  3654. }
  3655. static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
  3656. {
  3657. struct ixgbe_hw *hw = &adapter->hw;
  3658. struct net_device *netdev = adapter->netdev;
  3659. int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
  3660. struct ixgbe_ring *rx_ring;
  3661. int i;
  3662. u32 mhadd, hlreg0;
  3663. #ifdef IXGBE_FCOE
  3664. /* adjust max frame to be able to do baby jumbo for FCoE */
  3665. if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
  3666. (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
  3667. max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
  3668. #endif /* IXGBE_FCOE */
  3669. /* adjust max frame to be at least the size of a standard frame */
  3670. if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
  3671. max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
  3672. mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
  3673. if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
  3674. mhadd &= ~IXGBE_MHADD_MFS_MASK;
  3675. mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
  3676. IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
  3677. }
  3678. hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
  3679. /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */
  3680. hlreg0 |= IXGBE_HLREG0_JUMBOEN;
  3681. IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
  3682. /*
  3683. * Setup the HW Rx Head and Tail Descriptor Pointers and
  3684. * the Base and Length of the Rx Descriptor Ring
  3685. */
  3686. for (i = 0; i < adapter->num_rx_queues; i++) {
  3687. rx_ring = adapter->rx_ring[i];
  3688. clear_ring_rsc_enabled(rx_ring);
  3689. clear_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
  3690. clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
  3691. if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
  3692. set_ring_rsc_enabled(rx_ring);
  3693. if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state))
  3694. set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
  3695. clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
  3696. if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
  3697. continue;
  3698. set_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
  3699. #if (PAGE_SIZE < 8192)
  3700. if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
  3701. set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
  3702. if (IXGBE_2K_TOO_SMALL_WITH_PADDING ||
  3703. (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
  3704. set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
  3705. #endif
  3706. }
  3707. }
  3708. static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
  3709. {
  3710. struct ixgbe_hw *hw = &adapter->hw;
  3711. u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
  3712. switch (hw->mac.type) {
  3713. case ixgbe_mac_82598EB:
  3714. /*
  3715. * For VMDq support of different descriptor types or
  3716. * buffer sizes through the use of multiple SRRCTL
  3717. * registers, RDRXCTL.MVMEN must be set to 1
  3718. *
  3719. * also, the manual doesn't mention it clearly but DCA hints
  3720. * will only use queue 0's tags unless this bit is set. Side
  3721. * effects of setting this bit are only that SRRCTL must be
  3722. * fully programmed [0..15]
  3723. */
  3724. rdrxctl |= IXGBE_RDRXCTL_MVMEN;
  3725. break;
  3726. case ixgbe_mac_X550:
  3727. case ixgbe_mac_X550EM_x:
  3728. case ixgbe_mac_x550em_a:
  3729. if (adapter->num_vfs)
  3730. rdrxctl |= IXGBE_RDRXCTL_PSP;
  3731. /* fall through */
  3732. case ixgbe_mac_82599EB:
  3733. case ixgbe_mac_X540:
  3734. /* Disable RSC for ACK packets */
  3735. IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
  3736. (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
  3737. rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
  3738. /* hardware requires some bits to be set by default */
  3739. rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX);
  3740. rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
  3741. break;
  3742. default:
  3743. /* We should do nothing since we don't know this hardware */
  3744. return;
  3745. }
  3746. IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
  3747. }
  3748. /**
  3749. * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
  3750. * @adapter: board private structure
  3751. *
  3752. * Configure the Rx unit of the MAC after a reset.
  3753. **/
  3754. static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
  3755. {
  3756. struct ixgbe_hw *hw = &adapter->hw;
  3757. int i;
  3758. u32 rxctrl, rfctl;
  3759. /* disable receives while setting up the descriptors */
  3760. hw->mac.ops.disable_rx(hw);
  3761. ixgbe_setup_psrtype(adapter);
  3762. ixgbe_setup_rdrxctl(adapter);
  3763. /* RSC Setup */
  3764. rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
  3765. rfctl &= ~IXGBE_RFCTL_RSC_DIS;
  3766. if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
  3767. rfctl |= IXGBE_RFCTL_RSC_DIS;
  3768. /* disable NFS filtering */
  3769. rfctl |= (IXGBE_RFCTL_NFSW_DIS | IXGBE_RFCTL_NFSR_DIS);
  3770. IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
  3771. /* Program registers for the distribution of queues */
  3772. ixgbe_setup_mrqc(adapter);
  3773. /* set_rx_buffer_len must be called before ring initialization */
  3774. ixgbe_set_rx_buffer_len(adapter);
  3775. /*
  3776. * Setup the HW Rx Head and Tail Descriptor Pointers and
  3777. * the Base and Length of the Rx Descriptor Ring
  3778. */
  3779. for (i = 0; i < adapter->num_rx_queues; i++)
  3780. ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
  3781. rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
  3782. /* disable drop enable for 82598 parts */
  3783. if (hw->mac.type == ixgbe_mac_82598EB)
  3784. rxctrl |= IXGBE_RXCTRL_DMBYPS;
  3785. /* enable all receives */
  3786. rxctrl |= IXGBE_RXCTRL_RXEN;
  3787. hw->mac.ops.enable_rx_dma(hw, rxctrl);
  3788. }
  3789. static int ixgbe_vlan_rx_add_vid(struct net_device *netdev,
  3790. __be16 proto, u16 vid)
  3791. {
  3792. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  3793. struct ixgbe_hw *hw = &adapter->hw;
  3794. /* add VID to filter table */
  3795. if (!vid || !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
  3796. hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, !!vid);
  3797. set_bit(vid, adapter->active_vlans);
  3798. return 0;
  3799. }
  3800. static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan)
  3801. {
  3802. u32 vlvf;
  3803. int idx;
  3804. /* short cut the special case */
  3805. if (vlan == 0)
  3806. return 0;
  3807. /* Search for the vlan id in the VLVF entries */
  3808. for (idx = IXGBE_VLVF_ENTRIES; --idx;) {
  3809. vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(idx));
  3810. if ((vlvf & VLAN_VID_MASK) == vlan)
  3811. break;
  3812. }
  3813. return idx;
  3814. }
  3815. void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid)
  3816. {
  3817. struct ixgbe_hw *hw = &adapter->hw;
  3818. u32 bits, word;
  3819. int idx;
  3820. idx = ixgbe_find_vlvf_entry(hw, vid);
  3821. if (!idx)
  3822. return;
  3823. /* See if any other pools are set for this VLAN filter
  3824. * entry other than the PF.
  3825. */
  3826. word = idx * 2 + (VMDQ_P(0) / 32);
  3827. bits = ~BIT(VMDQ_P(0) % 32);
  3828. bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
  3829. /* Disable the filter so this falls into the default pool. */
  3830. if (!bits && !IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1))) {
  3831. if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
  3832. IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), 0);
  3833. IXGBE_WRITE_REG(hw, IXGBE_VLVF(idx), 0);
  3834. }
  3835. }
  3836. static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
  3837. __be16 proto, u16 vid)
  3838. {
  3839. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  3840. struct ixgbe_hw *hw = &adapter->hw;
  3841. /* remove VID from filter table */
  3842. if (vid && !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
  3843. hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), false, true);
  3844. clear_bit(vid, adapter->active_vlans);
  3845. return 0;
  3846. }
  3847. /**
  3848. * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping
  3849. * @adapter: driver data
  3850. */
  3851. static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
  3852. {
  3853. struct ixgbe_hw *hw = &adapter->hw;
  3854. u32 vlnctrl;
  3855. int i, j;
  3856. switch (hw->mac.type) {
  3857. case ixgbe_mac_82598EB:
  3858. vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
  3859. vlnctrl &= ~IXGBE_VLNCTRL_VME;
  3860. IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
  3861. break;
  3862. case ixgbe_mac_82599EB:
  3863. case ixgbe_mac_X540:
  3864. case ixgbe_mac_X550:
  3865. case ixgbe_mac_X550EM_x:
  3866. case ixgbe_mac_x550em_a:
  3867. for (i = 0; i < adapter->num_rx_queues; i++) {
  3868. struct ixgbe_ring *ring = adapter->rx_ring[i];
  3869. if (ring->l2_accel_priv)
  3870. continue;
  3871. j = ring->reg_idx;
  3872. vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
  3873. vlnctrl &= ~IXGBE_RXDCTL_VME;
  3874. IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
  3875. }
  3876. break;
  3877. default:
  3878. break;
  3879. }
  3880. }
  3881. /**
  3882. * ixgbe_vlan_strip_enable - helper to enable hw vlan stripping
  3883. * @adapter: driver data
  3884. */
  3885. static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
  3886. {
  3887. struct ixgbe_hw *hw = &adapter->hw;
  3888. u32 vlnctrl;
  3889. int i, j;
  3890. switch (hw->mac.type) {
  3891. case ixgbe_mac_82598EB:
  3892. vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
  3893. vlnctrl |= IXGBE_VLNCTRL_VME;
  3894. IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
  3895. break;
  3896. case ixgbe_mac_82599EB:
  3897. case ixgbe_mac_X540:
  3898. case ixgbe_mac_X550:
  3899. case ixgbe_mac_X550EM_x:
  3900. case ixgbe_mac_x550em_a:
  3901. for (i = 0; i < adapter->num_rx_queues; i++) {
  3902. struct ixgbe_ring *ring = adapter->rx_ring[i];
  3903. if (ring->l2_accel_priv)
  3904. continue;
  3905. j = ring->reg_idx;
  3906. vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
  3907. vlnctrl |= IXGBE_RXDCTL_VME;
  3908. IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
  3909. }
  3910. break;
  3911. default:
  3912. break;
  3913. }
  3914. }
  3915. static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
  3916. {
  3917. struct ixgbe_hw *hw = &adapter->hw;
  3918. u32 vlnctrl, i;
  3919. vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
  3920. if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
  3921. /* For VMDq and SR-IOV we must leave VLAN filtering enabled */
  3922. vlnctrl |= IXGBE_VLNCTRL_VFE;
  3923. IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
  3924. } else {
  3925. vlnctrl &= ~IXGBE_VLNCTRL_VFE;
  3926. IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
  3927. return;
  3928. }
  3929. /* Nothing to do for 82598 */
  3930. if (hw->mac.type == ixgbe_mac_82598EB)
  3931. return;
  3932. /* We are already in VLAN promisc, nothing to do */
  3933. if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)
  3934. return;
  3935. /* Set flag so we don't redo unnecessary work */
  3936. adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC;
  3937. /* Add PF to all active pools */
  3938. for (i = IXGBE_VLVF_ENTRIES; --i;) {
  3939. u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32);
  3940. u32 vlvfb = IXGBE_READ_REG(hw, reg_offset);
  3941. vlvfb |= BIT(VMDQ_P(0) % 32);
  3942. IXGBE_WRITE_REG(hw, reg_offset, vlvfb);
  3943. }
  3944. /* Set all bits in the VLAN filter table array */
  3945. for (i = hw->mac.vft_size; i--;)
  3946. IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), ~0U);
  3947. }
  3948. #define VFTA_BLOCK_SIZE 8
  3949. static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset)
  3950. {
  3951. struct ixgbe_hw *hw = &adapter->hw;
  3952. u32 vfta[VFTA_BLOCK_SIZE] = { 0 };
  3953. u32 vid_start = vfta_offset * 32;
  3954. u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32);
  3955. u32 i, vid, word, bits;
  3956. for (i = IXGBE_VLVF_ENTRIES; --i;) {
  3957. u32 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i));
  3958. /* pull VLAN ID from VLVF */
  3959. vid = vlvf & VLAN_VID_MASK;
  3960. /* only concern outselves with a certain range */
  3961. if (vid < vid_start || vid >= vid_end)
  3962. continue;
  3963. if (vlvf) {
  3964. /* record VLAN ID in VFTA */
  3965. vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
  3966. /* if PF is part of this then continue */
  3967. if (test_bit(vid, adapter->active_vlans))
  3968. continue;
  3969. }
  3970. /* remove PF from the pool */
  3971. word = i * 2 + VMDQ_P(0) / 32;
  3972. bits = ~BIT(VMDQ_P(0) % 32);
  3973. bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
  3974. IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits);
  3975. }
  3976. /* extract values from active_vlans and write back to VFTA */
  3977. for (i = VFTA_BLOCK_SIZE; i--;) {
  3978. vid = (vfta_offset + i) * 32;
  3979. word = vid / BITS_PER_LONG;
  3980. bits = vid % BITS_PER_LONG;
  3981. vfta[i] |= adapter->active_vlans[word] >> bits;
  3982. IXGBE_WRITE_REG(hw, IXGBE_VFTA(vfta_offset + i), vfta[i]);
  3983. }
  3984. }
  3985. static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
  3986. {
  3987. struct ixgbe_hw *hw = &adapter->hw;
  3988. u32 vlnctrl, i;
  3989. /* Set VLAN filtering to enabled */
  3990. vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
  3991. vlnctrl |= IXGBE_VLNCTRL_VFE;
  3992. IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
  3993. if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) ||
  3994. hw->mac.type == ixgbe_mac_82598EB)
  3995. return;
  3996. /* We are not in VLAN promisc, nothing to do */
  3997. if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
  3998. return;
  3999. /* Set flag so we don't redo unnecessary work */
  4000. adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC;
  4001. for (i = 0; i < hw->mac.vft_size; i += VFTA_BLOCK_SIZE)
  4002. ixgbe_scrub_vfta(adapter, i);
  4003. }
  4004. static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
  4005. {
  4006. u16 vid = 1;
  4007. ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
  4008. for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
  4009. ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
  4010. }
  4011. /**
  4012. * ixgbe_write_mc_addr_list - write multicast addresses to MTA
  4013. * @netdev: network interface device structure
  4014. *
  4015. * Writes multicast address list to the MTA hash table.
  4016. * Returns: -ENOMEM on failure
  4017. * 0 on no addresses written
  4018. * X on writing X addresses to MTA
  4019. **/
  4020. static int ixgbe_write_mc_addr_list(struct net_device *netdev)
  4021. {
  4022. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  4023. struct ixgbe_hw *hw = &adapter->hw;
  4024. if (!netif_running(netdev))
  4025. return 0;
  4026. if (hw->mac.ops.update_mc_addr_list)
  4027. hw->mac.ops.update_mc_addr_list(hw, netdev);
  4028. else
  4029. return -ENOMEM;
  4030. #ifdef CONFIG_PCI_IOV
  4031. ixgbe_restore_vf_multicasts(adapter);
  4032. #endif
  4033. return netdev_mc_count(netdev);
  4034. }
  4035. #ifdef CONFIG_PCI_IOV
  4036. void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter)
  4037. {
  4038. struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
  4039. struct ixgbe_hw *hw = &adapter->hw;
  4040. int i;
  4041. for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
  4042. mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED;
  4043. if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
  4044. hw->mac.ops.set_rar(hw, i,
  4045. mac_table->addr,
  4046. mac_table->pool,
  4047. IXGBE_RAH_AV);
  4048. else
  4049. hw->mac.ops.clear_rar(hw, i);
  4050. }
  4051. }
  4052. #endif
  4053. static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter)
  4054. {
  4055. struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
  4056. struct ixgbe_hw *hw = &adapter->hw;
  4057. int i;
  4058. for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
  4059. if (!(mac_table->state & IXGBE_MAC_STATE_MODIFIED))
  4060. continue;
  4061. mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED;
  4062. if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
  4063. hw->mac.ops.set_rar(hw, i,
  4064. mac_table->addr,
  4065. mac_table->pool,
  4066. IXGBE_RAH_AV);
  4067. else
  4068. hw->mac.ops.clear_rar(hw, i);
  4069. }
  4070. }
  4071. static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter)
  4072. {
  4073. struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
  4074. struct ixgbe_hw *hw = &adapter->hw;
  4075. int i;
  4076. for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
  4077. mac_table->state |= IXGBE_MAC_STATE_MODIFIED;
  4078. mac_table->state &= ~IXGBE_MAC_STATE_IN_USE;
  4079. }
  4080. ixgbe_sync_mac_table(adapter);
  4081. }
  4082. static int ixgbe_available_rars(struct ixgbe_adapter *adapter, u16 pool)
  4083. {
  4084. struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
  4085. struct ixgbe_hw *hw = &adapter->hw;
  4086. int i, count = 0;
  4087. for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
  4088. /* do not count default RAR as available */
  4089. if (mac_table->state & IXGBE_MAC_STATE_DEFAULT)
  4090. continue;
  4091. /* only count unused and addresses that belong to us */
  4092. if (mac_table->state & IXGBE_MAC_STATE_IN_USE) {
  4093. if (mac_table->pool != pool)
  4094. continue;
  4095. }
  4096. count++;
  4097. }
  4098. return count;
  4099. }
  4100. /* this function destroys the first RAR entry */
  4101. static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter)
  4102. {
  4103. struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
  4104. struct ixgbe_hw *hw = &adapter->hw;
  4105. memcpy(&mac_table->addr, hw->mac.addr, ETH_ALEN);
  4106. mac_table->pool = VMDQ_P(0);
  4107. mac_table->state = IXGBE_MAC_STATE_DEFAULT | IXGBE_MAC_STATE_IN_USE;
  4108. hw->mac.ops.set_rar(hw, 0, mac_table->addr, mac_table->pool,
  4109. IXGBE_RAH_AV);
  4110. }
  4111. int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
  4112. const u8 *addr, u16 pool)
  4113. {
  4114. struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
  4115. struct ixgbe_hw *hw = &adapter->hw;
  4116. int i;
  4117. if (is_zero_ether_addr(addr))
  4118. return -EINVAL;
  4119. for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
  4120. if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
  4121. continue;
  4122. ether_addr_copy(mac_table->addr, addr);
  4123. mac_table->pool = pool;
  4124. mac_table->state |= IXGBE_MAC_STATE_MODIFIED |
  4125. IXGBE_MAC_STATE_IN_USE;
  4126. ixgbe_sync_mac_table(adapter);
  4127. return i;
  4128. }
  4129. return -ENOMEM;
  4130. }
  4131. int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
  4132. const u8 *addr, u16 pool)
  4133. {
  4134. struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
  4135. struct ixgbe_hw *hw = &adapter->hw;
  4136. int i;
  4137. if (is_zero_ether_addr(addr))
  4138. return -EINVAL;
  4139. /* search table for addr, if found clear IN_USE flag and sync */
  4140. for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
  4141. /* we can only delete an entry if it is in use */
  4142. if (!(mac_table->state & IXGBE_MAC_STATE_IN_USE))
  4143. continue;
  4144. /* we only care about entries that belong to the given pool */
  4145. if (mac_table->pool != pool)
  4146. continue;
  4147. /* we only care about a specific MAC address */
  4148. if (!ether_addr_equal(addr, mac_table->addr))
  4149. continue;
  4150. mac_table->state |= IXGBE_MAC_STATE_MODIFIED;
  4151. mac_table->state &= ~IXGBE_MAC_STATE_IN_USE;
  4152. ixgbe_sync_mac_table(adapter);
  4153. return 0;
  4154. }
  4155. return -ENOMEM;
  4156. }
  4157. /**
  4158. * ixgbe_write_uc_addr_list - write unicast addresses to RAR table
  4159. * @netdev: network interface device structure
  4160. *
  4161. * Writes unicast address list to the RAR table.
  4162. * Returns: -ENOMEM on failure/insufficient address space
  4163. * 0 on no addresses written
  4164. * X on writing X addresses to the RAR table
  4165. **/
  4166. static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn)
  4167. {
  4168. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  4169. int count = 0;
  4170. /* return ENOMEM indicating insufficient memory for addresses */
  4171. if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter, vfn))
  4172. return -ENOMEM;
  4173. if (!netdev_uc_empty(netdev)) {
  4174. struct netdev_hw_addr *ha;
  4175. netdev_for_each_uc_addr(ha, netdev) {
  4176. ixgbe_del_mac_filter(adapter, ha->addr, vfn);
  4177. ixgbe_add_mac_filter(adapter, ha->addr, vfn);
  4178. count++;
  4179. }
  4180. }
  4181. return count;
  4182. }
  4183. static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr)
  4184. {
  4185. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  4186. int ret;
  4187. ret = ixgbe_add_mac_filter(adapter, addr, VMDQ_P(0));
  4188. return min_t(int, ret, 0);
  4189. }
  4190. static int ixgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr)
  4191. {
  4192. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  4193. ixgbe_del_mac_filter(adapter, addr, VMDQ_P(0));
  4194. return 0;
  4195. }
  4196. /**
  4197. * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
  4198. * @netdev: network interface device structure
  4199. *
  4200. * The set_rx_method entry point is called whenever the unicast/multicast
  4201. * address list or the network interface flags are updated. This routine is
  4202. * responsible for configuring the hardware for proper unicast, multicast and
  4203. * promiscuous mode.
  4204. **/
  4205. void ixgbe_set_rx_mode(struct net_device *netdev)
  4206. {
  4207. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  4208. struct ixgbe_hw *hw = &adapter->hw;
  4209. u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
  4210. netdev_features_t features = netdev->features;
  4211. int count;
  4212. /* Check for Promiscuous and All Multicast modes */
  4213. fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
  4214. /* set all bits that we expect to always be set */
  4215. fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */
  4216. fctrl |= IXGBE_FCTRL_BAM;
  4217. fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
  4218. fctrl |= IXGBE_FCTRL_PMCF;
  4219. /* clear the bits we are changing the status of */
  4220. fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
  4221. if (netdev->flags & IFF_PROMISC) {
  4222. hw->addr_ctrl.user_set_promisc = true;
  4223. fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
  4224. vmolr |= IXGBE_VMOLR_MPE;
  4225. features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
  4226. } else {
  4227. if (netdev->flags & IFF_ALLMULTI) {
  4228. fctrl |= IXGBE_FCTRL_MPE;
  4229. vmolr |= IXGBE_VMOLR_MPE;
  4230. }
  4231. hw->addr_ctrl.user_set_promisc = false;
  4232. }
  4233. /*
  4234. * Write addresses to available RAR registers, if there is not
  4235. * sufficient space to store all the addresses then enable
  4236. * unicast promiscuous mode
  4237. */
  4238. if (__dev_uc_sync(netdev, ixgbe_uc_sync, ixgbe_uc_unsync)) {
  4239. fctrl |= IXGBE_FCTRL_UPE;
  4240. vmolr |= IXGBE_VMOLR_ROPE;
  4241. }
  4242. /* Write addresses to the MTA, if the attempt fails
  4243. * then we should just turn on promiscuous mode so
  4244. * that we can at least receive multicast traffic
  4245. */
  4246. count = ixgbe_write_mc_addr_list(netdev);
  4247. if (count < 0) {
  4248. fctrl |= IXGBE_FCTRL_MPE;
  4249. vmolr |= IXGBE_VMOLR_MPE;
  4250. } else if (count) {
  4251. vmolr |= IXGBE_VMOLR_ROMPE;
  4252. }
  4253. if (hw->mac.type != ixgbe_mac_82598EB) {
  4254. vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) &
  4255. ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
  4256. IXGBE_VMOLR_ROPE);
  4257. IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr);
  4258. }
  4259. /* This is useful for sniffing bad packets. */
  4260. if (features & NETIF_F_RXALL) {
  4261. /* UPE and MPE will be handled by normal PROMISC logic
  4262. * in e1000e_set_rx_mode */
  4263. fctrl |= (IXGBE_FCTRL_SBP | /* Receive bad packets */
  4264. IXGBE_FCTRL_BAM | /* RX All Bcast Pkts */
  4265. IXGBE_FCTRL_PMCF); /* RX All MAC Ctrl Pkts */
  4266. fctrl &= ~(IXGBE_FCTRL_DPF);
  4267. /* NOTE: VLAN filtering is disabled by setting PROMISC */
  4268. }
  4269. IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
  4270. if (features & NETIF_F_HW_VLAN_CTAG_RX)
  4271. ixgbe_vlan_strip_enable(adapter);
  4272. else
  4273. ixgbe_vlan_strip_disable(adapter);
  4274. if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
  4275. ixgbe_vlan_promisc_disable(adapter);
  4276. else
  4277. ixgbe_vlan_promisc_enable(adapter);
  4278. }
  4279. static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
  4280. {
  4281. int q_idx;
  4282. for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
  4283. napi_enable(&adapter->q_vector[q_idx]->napi);
  4284. }
  4285. static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
  4286. {
  4287. int q_idx;
  4288. for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
  4289. napi_disable(&adapter->q_vector[q_idx]->napi);
  4290. }
  4291. static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask)
  4292. {
  4293. struct ixgbe_hw *hw = &adapter->hw;
  4294. u32 vxlanctrl;
  4295. if (!(adapter->flags & (IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE |
  4296. IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)))
  4297. return;
  4298. vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) & ~mask;
  4299. IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, vxlanctrl);
  4300. if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK)
  4301. adapter->vxlan_port = 0;
  4302. if (mask & IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK)
  4303. adapter->geneve_port = 0;
  4304. }
  4305. #ifdef CONFIG_IXGBE_DCB
  4306. /**
  4307. * ixgbe_configure_dcb - Configure DCB hardware
  4308. * @adapter: ixgbe adapter struct
  4309. *
  4310. * This is called by the driver on open to configure the DCB hardware.
  4311. * This is also called by the gennetlink interface when reconfiguring
  4312. * the DCB state.
  4313. */
  4314. static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
  4315. {
  4316. struct ixgbe_hw *hw = &adapter->hw;
  4317. int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
  4318. if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
  4319. if (hw->mac.type == ixgbe_mac_82598EB)
  4320. netif_set_gso_max_size(adapter->netdev, 65536);
  4321. return;
  4322. }
  4323. if (hw->mac.type == ixgbe_mac_82598EB)
  4324. netif_set_gso_max_size(adapter->netdev, 32768);
  4325. #ifdef IXGBE_FCOE
  4326. if (adapter->netdev->features & NETIF_F_FCOE_MTU)
  4327. max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
  4328. #endif
  4329. /* reconfigure the hardware */
  4330. if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) {
  4331. ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
  4332. DCB_TX_CONFIG);
  4333. ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
  4334. DCB_RX_CONFIG);
  4335. ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
  4336. } else if (adapter->ixgbe_ieee_ets && adapter->ixgbe_ieee_pfc) {
  4337. ixgbe_dcb_hw_ets(&adapter->hw,
  4338. adapter->ixgbe_ieee_ets,
  4339. max_frame);
  4340. ixgbe_dcb_hw_pfc_config(&adapter->hw,
  4341. adapter->ixgbe_ieee_pfc->pfc_en,
  4342. adapter->ixgbe_ieee_ets->prio_tc);
  4343. }
  4344. /* Enable RSS Hash per TC */
  4345. if (hw->mac.type != ixgbe_mac_82598EB) {
  4346. u32 msb = 0;
  4347. u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1;
  4348. while (rss_i) {
  4349. msb++;
  4350. rss_i >>= 1;
  4351. }
  4352. /* write msb to all 8 TCs in one write */
  4353. IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111);
  4354. }
  4355. }
  4356. #endif
  4357. /* Additional bittime to account for IXGBE framing */
  4358. #define IXGBE_ETH_FRAMING 20
  4359. /**
  4360. * ixgbe_hpbthresh - calculate high water mark for flow control
  4361. *
  4362. * @adapter: board private structure to calculate for
  4363. * @pb: packet buffer to calculate
  4364. */
  4365. static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
  4366. {
  4367. struct ixgbe_hw *hw = &adapter->hw;
  4368. struct net_device *dev = adapter->netdev;
  4369. int link, tc, kb, marker;
  4370. u32 dv_id, rx_pba;
  4371. /* Calculate max LAN frame size */
  4372. tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + IXGBE_ETH_FRAMING;
  4373. #ifdef IXGBE_FCOE
  4374. /* FCoE traffic class uses FCOE jumbo frames */
  4375. if ((dev->features & NETIF_F_FCOE_MTU) &&
  4376. (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
  4377. (pb == ixgbe_fcoe_get_tc(adapter)))
  4378. tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
  4379. #endif
  4380. /* Calculate delay value for device */
  4381. switch (hw->mac.type) {
  4382. case ixgbe_mac_X540:
  4383. case ixgbe_mac_X550:
  4384. case ixgbe_mac_X550EM_x:
  4385. case ixgbe_mac_x550em_a:
  4386. dv_id = IXGBE_DV_X540(link, tc);
  4387. break;
  4388. default:
  4389. dv_id = IXGBE_DV(link, tc);
  4390. break;
  4391. }
  4392. /* Loopback switch introduces additional latency */
  4393. if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
  4394. dv_id += IXGBE_B2BT(tc);
  4395. /* Delay value is calculated in bit times convert to KB */
  4396. kb = IXGBE_BT2KB(dv_id);
  4397. rx_pba = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(pb)) >> 10;
  4398. marker = rx_pba - kb;
  4399. /* It is possible that the packet buffer is not large enough
  4400. * to provide required headroom. In this case throw an error
  4401. * to user and a do the best we can.
  4402. */
  4403. if (marker < 0) {
  4404. e_warn(drv, "Packet Buffer(%i) can not provide enough"
  4405. "headroom to support flow control."
  4406. "Decrease MTU or number of traffic classes\n", pb);
  4407. marker = tc + 1;
  4408. }
  4409. return marker;
  4410. }
  4411. /**
  4412. * ixgbe_lpbthresh - calculate low water mark for for flow control
  4413. *
  4414. * @adapter: board private structure to calculate for
  4415. * @pb: packet buffer to calculate
  4416. */
  4417. static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
  4418. {
  4419. struct ixgbe_hw *hw = &adapter->hw;
  4420. struct net_device *dev = adapter->netdev;
  4421. int tc;
  4422. u32 dv_id;
  4423. /* Calculate max LAN frame size */
  4424. tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
  4425. #ifdef IXGBE_FCOE
  4426. /* FCoE traffic class uses FCOE jumbo frames */
  4427. if ((dev->features & NETIF_F_FCOE_MTU) &&
  4428. (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
  4429. (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up)))
  4430. tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
  4431. #endif
  4432. /* Calculate delay value for device */
  4433. switch (hw->mac.type) {
  4434. case ixgbe_mac_X540:
  4435. case ixgbe_mac_X550:
  4436. case ixgbe_mac_X550EM_x:
  4437. case ixgbe_mac_x550em_a:
  4438. dv_id = IXGBE_LOW_DV_X540(tc);
  4439. break;
  4440. default:
  4441. dv_id = IXGBE_LOW_DV(tc);
  4442. break;
  4443. }
  4444. /* Delay value is calculated in bit times convert to KB */
  4445. return IXGBE_BT2KB(dv_id);
  4446. }
  4447. /*
  4448. * ixgbe_pbthresh_setup - calculate and setup high low water marks
  4449. */
  4450. static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
  4451. {
  4452. struct ixgbe_hw *hw = &adapter->hw;
  4453. int num_tc = netdev_get_num_tc(adapter->netdev);
  4454. int i;
  4455. if (!num_tc)
  4456. num_tc = 1;
  4457. for (i = 0; i < num_tc; i++) {
  4458. hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i);
  4459. hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i);
  4460. /* Low water marks must not be larger than high water marks */
  4461. if (hw->fc.low_water[i] > hw->fc.high_water[i])
  4462. hw->fc.low_water[i] = 0;
  4463. }
  4464. for (; i < MAX_TRAFFIC_CLASS; i++)
  4465. hw->fc.high_water[i] = 0;
  4466. }
  4467. static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
  4468. {
  4469. struct ixgbe_hw *hw = &adapter->hw;
  4470. int hdrm;
  4471. u8 tc = netdev_get_num_tc(adapter->netdev);
  4472. if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
  4473. adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
  4474. hdrm = 32 << adapter->fdir_pballoc;
  4475. else
  4476. hdrm = 0;
  4477. hw->mac.ops.set_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL);
  4478. ixgbe_pbthresh_setup(adapter);
  4479. }
  4480. static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
  4481. {
  4482. struct ixgbe_hw *hw = &adapter->hw;
  4483. struct hlist_node *node2;
  4484. struct ixgbe_fdir_filter *filter;
  4485. spin_lock(&adapter->fdir_perfect_lock);
  4486. if (!hlist_empty(&adapter->fdir_filter_list))
  4487. ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask);
  4488. hlist_for_each_entry_safe(filter, node2,
  4489. &adapter->fdir_filter_list, fdir_node) {
  4490. ixgbe_fdir_write_perfect_filter_82599(hw,
  4491. &filter->filter,
  4492. filter->sw_idx,
  4493. (filter->action == IXGBE_FDIR_DROP_QUEUE) ?
  4494. IXGBE_FDIR_DROP_QUEUE :
  4495. adapter->rx_ring[filter->action]->reg_idx);
  4496. }
  4497. spin_unlock(&adapter->fdir_perfect_lock);
  4498. }
  4499. static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool,
  4500. struct ixgbe_adapter *adapter)
  4501. {
  4502. struct ixgbe_hw *hw = &adapter->hw;
  4503. u32 vmolr;
  4504. /* No unicast promiscuous support for VMDQ devices. */
  4505. vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
  4506. vmolr |= (IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE);
  4507. /* clear the affected bit */
  4508. vmolr &= ~IXGBE_VMOLR_MPE;
  4509. if (dev->flags & IFF_ALLMULTI) {
  4510. vmolr |= IXGBE_VMOLR_MPE;
  4511. } else {
  4512. vmolr |= IXGBE_VMOLR_ROMPE;
  4513. hw->mac.ops.update_mc_addr_list(hw, dev);
  4514. }
  4515. ixgbe_write_uc_addr_list(adapter->netdev, pool);
  4516. IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
  4517. }
  4518. static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
  4519. {
  4520. struct ixgbe_adapter *adapter = vadapter->real_adapter;
  4521. int rss_i = adapter->num_rx_queues_per_pool;
  4522. struct ixgbe_hw *hw = &adapter->hw;
  4523. u16 pool = vadapter->pool;
  4524. u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
  4525. IXGBE_PSRTYPE_UDPHDR |
  4526. IXGBE_PSRTYPE_IPV4HDR |
  4527. IXGBE_PSRTYPE_L2HDR |
  4528. IXGBE_PSRTYPE_IPV6HDR;
  4529. if (hw->mac.type == ixgbe_mac_82598EB)
  4530. return;
  4531. if (rss_i > 3)
  4532. psrtype |= 2u << 29;
  4533. else if (rss_i > 1)
  4534. psrtype |= 1u << 29;
  4535. IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
  4536. }
  4537. /**
  4538. * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
  4539. * @rx_ring: ring to free buffers from
  4540. **/
  4541. static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
  4542. {
  4543. u16 i = rx_ring->next_to_clean;
  4544. struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
  4545. /* Free all the Rx ring sk_buffs */
  4546. while (i != rx_ring->next_to_alloc) {
  4547. if (rx_buffer->skb) {
  4548. struct sk_buff *skb = rx_buffer->skb;
  4549. if (IXGBE_CB(skb)->page_released)
  4550. dma_unmap_page_attrs(rx_ring->dev,
  4551. IXGBE_CB(skb)->dma,
  4552. ixgbe_rx_pg_size(rx_ring),
  4553. DMA_FROM_DEVICE,
  4554. IXGBE_RX_DMA_ATTR);
  4555. dev_kfree_skb(skb);
  4556. }
  4557. /* Invalidate cache lines that may have been written to by
  4558. * device so that we avoid corrupting memory.
  4559. */
  4560. dma_sync_single_range_for_cpu(rx_ring->dev,
  4561. rx_buffer->dma,
  4562. rx_buffer->page_offset,
  4563. ixgbe_rx_bufsz(rx_ring),
  4564. DMA_FROM_DEVICE);
  4565. /* free resources associated with mapping */
  4566. dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
  4567. ixgbe_rx_pg_size(rx_ring),
  4568. DMA_FROM_DEVICE,
  4569. IXGBE_RX_DMA_ATTR);
  4570. __page_frag_cache_drain(rx_buffer->page,
  4571. rx_buffer->pagecnt_bias);
  4572. i++;
  4573. rx_buffer++;
  4574. if (i == rx_ring->count) {
  4575. i = 0;
  4576. rx_buffer = rx_ring->rx_buffer_info;
  4577. }
  4578. }
  4579. rx_ring->next_to_alloc = 0;
  4580. rx_ring->next_to_clean = 0;
  4581. rx_ring->next_to_use = 0;
  4582. }
  4583. static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter,
  4584. struct ixgbe_ring *rx_ring)
  4585. {
  4586. struct ixgbe_adapter *adapter = vadapter->real_adapter;
  4587. int index = rx_ring->queue_index + vadapter->rx_base_queue;
  4588. /* shutdown specific queue receive and wait for dma to settle */
  4589. ixgbe_disable_rx_queue(adapter, rx_ring);
  4590. usleep_range(10000, 20000);
  4591. ixgbe_irq_disable_queues(adapter, BIT_ULL(index));
  4592. ixgbe_clean_rx_ring(rx_ring);
  4593. rx_ring->l2_accel_priv = NULL;
  4594. }
  4595. static int ixgbe_fwd_ring_down(struct net_device *vdev,
  4596. struct ixgbe_fwd_adapter *accel)
  4597. {
  4598. struct ixgbe_adapter *adapter = accel->real_adapter;
  4599. unsigned int rxbase = accel->rx_base_queue;
  4600. unsigned int txbase = accel->tx_base_queue;
  4601. int i;
  4602. netif_tx_stop_all_queues(vdev);
  4603. for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
  4604. ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
  4605. adapter->rx_ring[rxbase + i]->netdev = adapter->netdev;
  4606. }
  4607. for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
  4608. adapter->tx_ring[txbase + i]->l2_accel_priv = NULL;
  4609. adapter->tx_ring[txbase + i]->netdev = adapter->netdev;
  4610. }
  4611. return 0;
  4612. }
  4613. static int ixgbe_fwd_ring_up(struct net_device *vdev,
  4614. struct ixgbe_fwd_adapter *accel)
  4615. {
  4616. struct ixgbe_adapter *adapter = accel->real_adapter;
  4617. unsigned int rxbase, txbase, queues;
  4618. int i, baseq, err = 0;
  4619. if (!test_bit(accel->pool, &adapter->fwd_bitmask))
  4620. return 0;
  4621. baseq = accel->pool * adapter->num_rx_queues_per_pool;
  4622. netdev_dbg(vdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
  4623. accel->pool, adapter->num_rx_pools,
  4624. baseq, baseq + adapter->num_rx_queues_per_pool,
  4625. adapter->fwd_bitmask);
  4626. accel->netdev = vdev;
  4627. accel->rx_base_queue = rxbase = baseq;
  4628. accel->tx_base_queue = txbase = baseq;
  4629. for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
  4630. ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
  4631. for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
  4632. adapter->rx_ring[rxbase + i]->netdev = vdev;
  4633. adapter->rx_ring[rxbase + i]->l2_accel_priv = accel;
  4634. ixgbe_configure_rx_ring(adapter, adapter->rx_ring[rxbase + i]);
  4635. }
  4636. for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
  4637. adapter->tx_ring[txbase + i]->netdev = vdev;
  4638. adapter->tx_ring[txbase + i]->l2_accel_priv = accel;
  4639. }
  4640. queues = min_t(unsigned int,
  4641. adapter->num_rx_queues_per_pool, vdev->num_tx_queues);
  4642. err = netif_set_real_num_tx_queues(vdev, queues);
  4643. if (err)
  4644. goto fwd_queue_err;
  4645. err = netif_set_real_num_rx_queues(vdev, queues);
  4646. if (err)
  4647. goto fwd_queue_err;
  4648. if (is_valid_ether_addr(vdev->dev_addr))
  4649. ixgbe_add_mac_filter(adapter, vdev->dev_addr, accel->pool);
  4650. ixgbe_fwd_psrtype(accel);
  4651. ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter);
  4652. return err;
  4653. fwd_queue_err:
  4654. ixgbe_fwd_ring_down(vdev, accel);
  4655. return err;
  4656. }
  4657. static int ixgbe_upper_dev_walk(struct net_device *upper, void *data)
  4658. {
  4659. if (netif_is_macvlan(upper)) {
  4660. struct macvlan_dev *dfwd = netdev_priv(upper);
  4661. struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
  4662. if (dfwd->fwd_priv)
  4663. ixgbe_fwd_ring_up(upper, vadapter);
  4664. }
  4665. return 0;
  4666. }
  4667. static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter)
  4668. {
  4669. netdev_walk_all_upper_dev_rcu(adapter->netdev,
  4670. ixgbe_upper_dev_walk, NULL);
  4671. }
  4672. static void ixgbe_configure(struct ixgbe_adapter *adapter)
  4673. {
  4674. struct ixgbe_hw *hw = &adapter->hw;
  4675. ixgbe_configure_pb(adapter);
  4676. #ifdef CONFIG_IXGBE_DCB
  4677. ixgbe_configure_dcb(adapter);
  4678. #endif
  4679. /*
  4680. * We must restore virtualization before VLANs or else
  4681. * the VLVF registers will not be populated
  4682. */
  4683. ixgbe_configure_virtualization(adapter);
  4684. ixgbe_set_rx_mode(adapter->netdev);
  4685. ixgbe_restore_vlan(adapter);
  4686. switch (hw->mac.type) {
  4687. case ixgbe_mac_82599EB:
  4688. case ixgbe_mac_X540:
  4689. hw->mac.ops.disable_rx_buff(hw);
  4690. break;
  4691. default:
  4692. break;
  4693. }
  4694. if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
  4695. ixgbe_init_fdir_signature_82599(&adapter->hw,
  4696. adapter->fdir_pballoc);
  4697. } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
  4698. ixgbe_init_fdir_perfect_82599(&adapter->hw,
  4699. adapter->fdir_pballoc);
  4700. ixgbe_fdir_filter_restore(adapter);
  4701. }
  4702. switch (hw->mac.type) {
  4703. case ixgbe_mac_82599EB:
  4704. case ixgbe_mac_X540:
  4705. hw->mac.ops.enable_rx_buff(hw);
  4706. break;
  4707. default:
  4708. break;
  4709. }
  4710. #ifdef CONFIG_IXGBE_DCA
  4711. /* configure DCA */
  4712. if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE)
  4713. ixgbe_setup_dca(adapter);
  4714. #endif /* CONFIG_IXGBE_DCA */
  4715. #ifdef IXGBE_FCOE
  4716. /* configure FCoE L2 filters, redirection table, and Rx control */
  4717. ixgbe_configure_fcoe(adapter);
  4718. #endif /* IXGBE_FCOE */
  4719. ixgbe_configure_tx(adapter);
  4720. ixgbe_configure_rx(adapter);
  4721. ixgbe_configure_dfwd(adapter);
  4722. }
  4723. /**
  4724. * ixgbe_sfp_link_config - set up SFP+ link
  4725. * @adapter: pointer to private adapter struct
  4726. **/
  4727. static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
  4728. {
  4729. /*
  4730. * We are assuming the worst case scenario here, and that
  4731. * is that an SFP was inserted/removed after the reset
  4732. * but before SFP detection was enabled. As such the best
  4733. * solution is to just start searching as soon as we start
  4734. */
  4735. if (adapter->hw.mac.type == ixgbe_mac_82598EB)
  4736. adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
  4737. adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
  4738. adapter->sfp_poll_time = 0;
  4739. }
  4740. /**
  4741. * ixgbe_non_sfp_link_config - set up non-SFP+ link
  4742. * @hw: pointer to private hardware struct
  4743. *
  4744. * Returns 0 on success, negative on failure
  4745. **/
  4746. static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
  4747. {
  4748. u32 speed;
  4749. bool autoneg, link_up = false;
  4750. int ret = IXGBE_ERR_LINK_SETUP;
  4751. if (hw->mac.ops.check_link)
  4752. ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
  4753. if (ret)
  4754. return ret;
  4755. speed = hw->phy.autoneg_advertised;
  4756. if ((!speed) && (hw->mac.ops.get_link_capabilities))
  4757. ret = hw->mac.ops.get_link_capabilities(hw, &speed,
  4758. &autoneg);
  4759. if (ret)
  4760. return ret;
  4761. if (hw->mac.ops.setup_link)
  4762. ret = hw->mac.ops.setup_link(hw, speed, link_up);
  4763. return ret;
  4764. }
  4765. static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
  4766. {
  4767. struct ixgbe_hw *hw = &adapter->hw;
  4768. u32 gpie = 0;
  4769. if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
  4770. gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
  4771. IXGBE_GPIE_OCD;
  4772. gpie |= IXGBE_GPIE_EIAME;
  4773. /*
  4774. * use EIAM to auto-mask when MSI-X interrupt is asserted
  4775. * this saves a register write for every interrupt
  4776. */
  4777. switch (hw->mac.type) {
  4778. case ixgbe_mac_82598EB:
  4779. IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
  4780. break;
  4781. case ixgbe_mac_82599EB:
  4782. case ixgbe_mac_X540:
  4783. case ixgbe_mac_X550:
  4784. case ixgbe_mac_X550EM_x:
  4785. case ixgbe_mac_x550em_a:
  4786. default:
  4787. IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
  4788. IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
  4789. break;
  4790. }
  4791. } else {
  4792. /* legacy interrupts, use EIAM to auto-mask when reading EICR,
  4793. * specifically only auto mask tx and rx interrupts */
  4794. IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
  4795. }
  4796. /* XXX: to interrupt immediately for EICS writes, enable this */
  4797. /* gpie |= IXGBE_GPIE_EIMEN; */
  4798. if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
  4799. gpie &= ~IXGBE_GPIE_VTMODE_MASK;
  4800. switch (adapter->ring_feature[RING_F_VMDQ].mask) {
  4801. case IXGBE_82599_VMDQ_8Q_MASK:
  4802. gpie |= IXGBE_GPIE_VTMODE_16;
  4803. break;
  4804. case IXGBE_82599_VMDQ_4Q_MASK:
  4805. gpie |= IXGBE_GPIE_VTMODE_32;
  4806. break;
  4807. default:
  4808. gpie |= IXGBE_GPIE_VTMODE_64;
  4809. break;
  4810. }
  4811. }
  4812. /* Enable Thermal over heat sensor interrupt */
  4813. if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) {
  4814. switch (adapter->hw.mac.type) {
  4815. case ixgbe_mac_82599EB:
  4816. gpie |= IXGBE_SDP0_GPIEN_8259X;
  4817. break;
  4818. default:
  4819. break;
  4820. }
  4821. }
  4822. /* Enable fan failure interrupt */
  4823. if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
  4824. gpie |= IXGBE_SDP1_GPIEN(hw);
  4825. switch (hw->mac.type) {
  4826. case ixgbe_mac_82599EB:
  4827. gpie |= IXGBE_SDP1_GPIEN_8259X | IXGBE_SDP2_GPIEN_8259X;
  4828. break;
  4829. case ixgbe_mac_X550EM_x:
  4830. case ixgbe_mac_x550em_a:
  4831. gpie |= IXGBE_SDP0_GPIEN_X540;
  4832. break;
  4833. default:
  4834. break;
  4835. }
  4836. IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
  4837. }
  4838. static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
  4839. {
  4840. struct ixgbe_hw *hw = &adapter->hw;
  4841. int err;
  4842. u32 ctrl_ext;
  4843. ixgbe_get_hw_control(adapter);
  4844. ixgbe_setup_gpie(adapter);
  4845. if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
  4846. ixgbe_configure_msix(adapter);
  4847. else
  4848. ixgbe_configure_msi_and_legacy(adapter);
  4849. /* enable the optics for 82599 SFP+ fiber */
  4850. if (hw->mac.ops.enable_tx_laser)
  4851. hw->mac.ops.enable_tx_laser(hw);
  4852. if (hw->phy.ops.set_phy_power)
  4853. hw->phy.ops.set_phy_power(hw, true);
  4854. smp_mb__before_atomic();
  4855. clear_bit(__IXGBE_DOWN, &adapter->state);
  4856. ixgbe_napi_enable_all(adapter);
  4857. if (ixgbe_is_sfp(hw)) {
  4858. ixgbe_sfp_link_config(adapter);
  4859. } else {
  4860. err = ixgbe_non_sfp_link_config(hw);
  4861. if (err)
  4862. e_err(probe, "link_config FAILED %d\n", err);
  4863. }
  4864. /* clear any pending interrupts, may auto mask */
  4865. IXGBE_READ_REG(hw, IXGBE_EICR);
  4866. ixgbe_irq_enable(adapter, true, true);
  4867. /*
  4868. * If this adapter has a fan, check to see if we had a failure
  4869. * before we enabled the interrupt.
  4870. */
  4871. if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
  4872. u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
  4873. if (esdp & IXGBE_ESDP_SDP1)
  4874. e_crit(drv, "Fan has stopped, replace the adapter\n");
  4875. }
  4876. /* bring the link up in the watchdog, this could race with our first
  4877. * link up interrupt but shouldn't be a problem */
  4878. adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
  4879. adapter->link_check_timeout = jiffies;
  4880. mod_timer(&adapter->service_timer, jiffies);
  4881. /* Set PF Reset Done bit so PF/VF Mail Ops can work */
  4882. ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
  4883. ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
  4884. IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
  4885. }
  4886. void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
  4887. {
  4888. WARN_ON(in_interrupt());
  4889. /* put off any impending NetWatchDogTimeout */
  4890. netif_trans_update(adapter->netdev);
  4891. while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
  4892. usleep_range(1000, 2000);
  4893. if (adapter->hw.phy.type == ixgbe_phy_fw)
  4894. ixgbe_watchdog_link_is_down(adapter);
  4895. ixgbe_down(adapter);
  4896. /*
  4897. * If SR-IOV enabled then wait a bit before bringing the adapter
  4898. * back up to give the VFs time to respond to the reset. The
  4899. * two second wait is based upon the watchdog timer cycle in
  4900. * the VF driver.
  4901. */
  4902. if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
  4903. msleep(2000);
  4904. ixgbe_up(adapter);
  4905. clear_bit(__IXGBE_RESETTING, &adapter->state);
  4906. }
  4907. void ixgbe_up(struct ixgbe_adapter *adapter)
  4908. {
  4909. /* hardware has been reset, we need to reload some things */
  4910. ixgbe_configure(adapter);
  4911. ixgbe_up_complete(adapter);
  4912. }
  4913. void ixgbe_reset(struct ixgbe_adapter *adapter)
  4914. {
  4915. struct ixgbe_hw *hw = &adapter->hw;
  4916. struct net_device *netdev = adapter->netdev;
  4917. int err;
  4918. if (ixgbe_removed(hw->hw_addr))
  4919. return;
  4920. /* lock SFP init bit to prevent race conditions with the watchdog */
  4921. while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
  4922. usleep_range(1000, 2000);
  4923. /* clear all SFP and link config related flags while holding SFP_INIT */
  4924. adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP |
  4925. IXGBE_FLAG2_SFP_NEEDS_RESET);
  4926. adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
  4927. err = hw->mac.ops.init_hw(hw);
  4928. switch (err) {
  4929. case 0:
  4930. case IXGBE_ERR_SFP_NOT_PRESENT:
  4931. case IXGBE_ERR_SFP_NOT_SUPPORTED:
  4932. break;
  4933. case IXGBE_ERR_MASTER_REQUESTS_PENDING:
  4934. e_dev_err("master disable timed out\n");
  4935. break;
  4936. case IXGBE_ERR_EEPROM_VERSION:
  4937. /* We are running on a pre-production device, log a warning */
  4938. e_dev_warn("This device is a pre-production adapter/LOM. "
  4939. "Please be aware there may be issues associated with "
  4940. "your hardware. If you are experiencing problems "
  4941. "please contact your Intel or hardware "
  4942. "representative who provided you with this "
  4943. "hardware.\n");
  4944. break;
  4945. default:
  4946. e_dev_err("Hardware Error: %d\n", err);
  4947. }
  4948. clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
  4949. /* flush entries out of MAC table */
  4950. ixgbe_flush_sw_mac_table(adapter);
  4951. __dev_uc_unsync(netdev, NULL);
  4952. /* do not flush user set addresses */
  4953. ixgbe_mac_set_default_filter(adapter);
  4954. /* update SAN MAC vmdq pool selection */
  4955. if (hw->mac.san_mac_rar_index)
  4956. hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
  4957. if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
  4958. ixgbe_ptp_reset(adapter);
  4959. if (hw->phy.ops.set_phy_power) {
  4960. if (!netif_running(adapter->netdev) && !adapter->wol)
  4961. hw->phy.ops.set_phy_power(hw, false);
  4962. else
  4963. hw->phy.ops.set_phy_power(hw, true);
  4964. }
  4965. }
  4966. /**
  4967. * ixgbe_clean_tx_ring - Free Tx Buffers
  4968. * @tx_ring: ring to be cleaned
  4969. **/
  4970. static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
  4971. {
  4972. u16 i = tx_ring->next_to_clean;
  4973. struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
  4974. while (i != tx_ring->next_to_use) {
  4975. union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
  4976. /* Free all the Tx ring sk_buffs */
  4977. if (ring_is_xdp(tx_ring))
  4978. page_frag_free(tx_buffer->data);
  4979. else
  4980. dev_kfree_skb_any(tx_buffer->skb);
  4981. /* unmap skb header data */
  4982. dma_unmap_single(tx_ring->dev,
  4983. dma_unmap_addr(tx_buffer, dma),
  4984. dma_unmap_len(tx_buffer, len),
  4985. DMA_TO_DEVICE);
  4986. /* check for eop_desc to determine the end of the packet */
  4987. eop_desc = tx_buffer->next_to_watch;
  4988. tx_desc = IXGBE_TX_DESC(tx_ring, i);
  4989. /* unmap remaining buffers */
  4990. while (tx_desc != eop_desc) {
  4991. tx_buffer++;
  4992. tx_desc++;
  4993. i++;
  4994. if (unlikely(i == tx_ring->count)) {
  4995. i = 0;
  4996. tx_buffer = tx_ring->tx_buffer_info;
  4997. tx_desc = IXGBE_TX_DESC(tx_ring, 0);
  4998. }
  4999. /* unmap any remaining paged data */
  5000. if (dma_unmap_len(tx_buffer, len))
  5001. dma_unmap_page(tx_ring->dev,
  5002. dma_unmap_addr(tx_buffer, dma),
  5003. dma_unmap_len(tx_buffer, len),
  5004. DMA_TO_DEVICE);
  5005. }
  5006. /* move us one more past the eop_desc for start of next pkt */
  5007. tx_buffer++;
  5008. i++;
  5009. if (unlikely(i == tx_ring->count)) {
  5010. i = 0;
  5011. tx_buffer = tx_ring->tx_buffer_info;
  5012. }
  5013. }
  5014. /* reset BQL for queue */
  5015. if (!ring_is_xdp(tx_ring))
  5016. netdev_tx_reset_queue(txring_txq(tx_ring));
  5017. /* reset next_to_use and next_to_clean */
  5018. tx_ring->next_to_use = 0;
  5019. tx_ring->next_to_clean = 0;
  5020. }
  5021. /**
  5022. * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
  5023. * @adapter: board private structure
  5024. **/
  5025. static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
  5026. {
  5027. int i;
  5028. for (i = 0; i < adapter->num_rx_queues; i++)
  5029. ixgbe_clean_rx_ring(adapter->rx_ring[i]);
  5030. }
  5031. /**
  5032. * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
  5033. * @adapter: board private structure
  5034. **/
  5035. static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
  5036. {
  5037. int i;
  5038. for (i = 0; i < adapter->num_tx_queues; i++)
  5039. ixgbe_clean_tx_ring(adapter->tx_ring[i]);
  5040. for (i = 0; i < adapter->num_xdp_queues; i++)
  5041. ixgbe_clean_tx_ring(adapter->xdp_ring[i]);
  5042. }
  5043. static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
  5044. {
  5045. struct hlist_node *node2;
  5046. struct ixgbe_fdir_filter *filter;
  5047. spin_lock(&adapter->fdir_perfect_lock);
  5048. hlist_for_each_entry_safe(filter, node2,
  5049. &adapter->fdir_filter_list, fdir_node) {
  5050. hlist_del(&filter->fdir_node);
  5051. kfree(filter);
  5052. }
  5053. adapter->fdir_filter_count = 0;
  5054. spin_unlock(&adapter->fdir_perfect_lock);
  5055. }
  5056. static int ixgbe_disable_macvlan(struct net_device *upper, void *data)
  5057. {
  5058. if (netif_is_macvlan(upper)) {
  5059. struct macvlan_dev *vlan = netdev_priv(upper);
  5060. if (vlan->fwd_priv) {
  5061. netif_tx_stop_all_queues(upper);
  5062. netif_carrier_off(upper);
  5063. netif_tx_disable(upper);
  5064. }
  5065. }
  5066. return 0;
  5067. }
  5068. void ixgbe_down(struct ixgbe_adapter *adapter)
  5069. {
  5070. struct net_device *netdev = adapter->netdev;
  5071. struct ixgbe_hw *hw = &adapter->hw;
  5072. int i;
  5073. /* signal that we are down to the interrupt handler */
  5074. if (test_and_set_bit(__IXGBE_DOWN, &adapter->state))
  5075. return; /* do nothing if already down */
  5076. /* disable receives */
  5077. hw->mac.ops.disable_rx(hw);
  5078. /* disable all enabled rx queues */
  5079. for (i = 0; i < adapter->num_rx_queues; i++)
  5080. /* this call also flushes the previous write */
  5081. ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
  5082. usleep_range(10000, 20000);
  5083. /* synchronize_sched() needed for pending XDP buffers to drain */
  5084. if (adapter->xdp_ring[0])
  5085. synchronize_sched();
  5086. netif_tx_stop_all_queues(netdev);
  5087. /* call carrier off first to avoid false dev_watchdog timeouts */
  5088. netif_carrier_off(netdev);
  5089. netif_tx_disable(netdev);
  5090. /* disable any upper devices */
  5091. netdev_walk_all_upper_dev_rcu(adapter->netdev,
  5092. ixgbe_disable_macvlan, NULL);
  5093. ixgbe_irq_disable(adapter);
  5094. ixgbe_napi_disable_all(adapter);
  5095. clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
  5096. adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
  5097. adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
  5098. del_timer_sync(&adapter->service_timer);
  5099. if (adapter->num_vfs) {
  5100. /* Clear EITR Select mapping */
  5101. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
  5102. /* Mark all the VFs as inactive */
  5103. for (i = 0 ; i < adapter->num_vfs; i++)
  5104. adapter->vfinfo[i].clear_to_send = false;
  5105. /* ping all the active vfs to let them know we are going down */
  5106. ixgbe_ping_all_vfs(adapter);
  5107. /* Disable all VFTE/VFRE TX/RX */
  5108. ixgbe_disable_tx_rx(adapter);
  5109. }
  5110. /* disable transmits in the hardware now that interrupts are off */
  5111. for (i = 0; i < adapter->num_tx_queues; i++) {
  5112. u8 reg_idx = adapter->tx_ring[i]->reg_idx;
  5113. IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
  5114. }
  5115. for (i = 0; i < adapter->num_xdp_queues; i++) {
  5116. u8 reg_idx = adapter->xdp_ring[i]->reg_idx;
  5117. IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
  5118. }
  5119. /* Disable the Tx DMA engine on 82599 and later MAC */
  5120. switch (hw->mac.type) {
  5121. case ixgbe_mac_82599EB:
  5122. case ixgbe_mac_X540:
  5123. case ixgbe_mac_X550:
  5124. case ixgbe_mac_X550EM_x:
  5125. case ixgbe_mac_x550em_a:
  5126. IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
  5127. (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
  5128. ~IXGBE_DMATXCTL_TE));
  5129. break;
  5130. default:
  5131. break;
  5132. }
  5133. if (!pci_channel_offline(adapter->pdev))
  5134. ixgbe_reset(adapter);
  5135. /* power down the optics for 82599 SFP+ fiber */
  5136. if (hw->mac.ops.disable_tx_laser)
  5137. hw->mac.ops.disable_tx_laser(hw);
  5138. ixgbe_clean_all_tx_rings(adapter);
  5139. ixgbe_clean_all_rx_rings(adapter);
  5140. }
  5141. /**
  5142. * ixgbe_eee_capable - helper function to determine EEE support on X550
  5143. * @adapter: board private structure
  5144. */
  5145. static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter)
  5146. {
  5147. struct ixgbe_hw *hw = &adapter->hw;
  5148. switch (hw->device_id) {
  5149. case IXGBE_DEV_ID_X550EM_A_1G_T:
  5150. case IXGBE_DEV_ID_X550EM_A_1G_T_L:
  5151. if (!hw->phy.eee_speeds_supported)
  5152. break;
  5153. adapter->flags2 |= IXGBE_FLAG2_EEE_CAPABLE;
  5154. if (!hw->phy.eee_speeds_advertised)
  5155. break;
  5156. adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED;
  5157. break;
  5158. default:
  5159. adapter->flags2 &= ~IXGBE_FLAG2_EEE_CAPABLE;
  5160. adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED;
  5161. break;
  5162. }
  5163. }
  5164. /**
  5165. * ixgbe_tx_timeout - Respond to a Tx Hang
  5166. * @netdev: network interface device structure
  5167. **/
  5168. static void ixgbe_tx_timeout(struct net_device *netdev)
  5169. {
  5170. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  5171. /* Do the reset outside of interrupt context */
  5172. ixgbe_tx_timeout_reset(adapter);
  5173. }
  5174. #ifdef CONFIG_IXGBE_DCB
  5175. static void ixgbe_init_dcb(struct ixgbe_adapter *adapter)
  5176. {
  5177. struct ixgbe_hw *hw = &adapter->hw;
  5178. struct tc_configuration *tc;
  5179. int j;
  5180. switch (hw->mac.type) {
  5181. case ixgbe_mac_82598EB:
  5182. case ixgbe_mac_82599EB:
  5183. adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
  5184. adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
  5185. break;
  5186. case ixgbe_mac_X540:
  5187. case ixgbe_mac_X550:
  5188. adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
  5189. adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
  5190. break;
  5191. case ixgbe_mac_X550EM_x:
  5192. case ixgbe_mac_x550em_a:
  5193. default:
  5194. adapter->dcb_cfg.num_tcs.pg_tcs = DEF_TRAFFIC_CLASS;
  5195. adapter->dcb_cfg.num_tcs.pfc_tcs = DEF_TRAFFIC_CLASS;
  5196. break;
  5197. }
  5198. /* Configure DCB traffic classes */
  5199. for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
  5200. tc = &adapter->dcb_cfg.tc_config[j];
  5201. tc->path[DCB_TX_CONFIG].bwg_id = 0;
  5202. tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
  5203. tc->path[DCB_RX_CONFIG].bwg_id = 0;
  5204. tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
  5205. tc->dcb_pfc = pfc_disabled;
  5206. }
  5207. /* Initialize default user to priority mapping, UPx->TC0 */
  5208. tc = &adapter->dcb_cfg.tc_config[0];
  5209. tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
  5210. tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
  5211. adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
  5212. adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
  5213. adapter->dcb_cfg.pfc_mode_enable = false;
  5214. adapter->dcb_set_bitmap = 0x00;
  5215. if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
  5216. adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
  5217. memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
  5218. sizeof(adapter->temp_dcb_cfg));
  5219. }
  5220. #endif
  5221. /**
  5222. * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
  5223. * @adapter: board private structure to initialize
  5224. *
  5225. * ixgbe_sw_init initializes the Adapter private data structure.
  5226. * Fields are initialized based on PCI device information and
  5227. * OS network device settings (MTU size).
  5228. **/
  5229. static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
  5230. const struct ixgbe_info *ii)
  5231. {
  5232. struct ixgbe_hw *hw = &adapter->hw;
  5233. struct pci_dev *pdev = adapter->pdev;
  5234. unsigned int rss, fdir;
  5235. u32 fwsm;
  5236. int i;
  5237. /* PCI config space info */
  5238. hw->vendor_id = pdev->vendor;
  5239. hw->device_id = pdev->device;
  5240. hw->revision_id = pdev->revision;
  5241. hw->subsystem_vendor_id = pdev->subsystem_vendor;
  5242. hw->subsystem_device_id = pdev->subsystem_device;
  5243. /* get_invariants needs the device IDs */
  5244. ii->get_invariants(hw);
  5245. /* Set common capability flags and settings */
  5246. rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus());
  5247. adapter->ring_feature[RING_F_RSS].limit = rss;
  5248. adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
  5249. adapter->max_q_vectors = MAX_Q_VECTORS_82599;
  5250. adapter->atr_sample_rate = 20;
  5251. fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus());
  5252. adapter->ring_feature[RING_F_FDIR].limit = fdir;
  5253. adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
  5254. #ifdef CONFIG_IXGBE_DCA
  5255. adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
  5256. #endif
  5257. #ifdef CONFIG_IXGBE_DCB
  5258. adapter->flags |= IXGBE_FLAG_DCB_CAPABLE;
  5259. adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
  5260. #endif
  5261. #ifdef IXGBE_FCOE
  5262. adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
  5263. adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
  5264. #ifdef CONFIG_IXGBE_DCB
  5265. /* Default traffic class to use for FCoE */
  5266. adapter->fcoe.up = IXGBE_FCOE_DEFTC;
  5267. #endif /* CONFIG_IXGBE_DCB */
  5268. #endif /* IXGBE_FCOE */
  5269. /* initialize static ixgbe jump table entries */
  5270. adapter->jump_tables[0] = kzalloc(sizeof(*adapter->jump_tables[0]),
  5271. GFP_KERNEL);
  5272. if (!adapter->jump_tables[0])
  5273. return -ENOMEM;
  5274. adapter->jump_tables[0]->mat = ixgbe_ipv4_fields;
  5275. for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++)
  5276. adapter->jump_tables[i] = NULL;
  5277. adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) *
  5278. hw->mac.num_rar_entries,
  5279. GFP_ATOMIC);
  5280. if (!adapter->mac_table)
  5281. return -ENOMEM;
  5282. if (ixgbe_init_rss_key(adapter))
  5283. return -ENOMEM;
  5284. /* Set MAC specific capability flags and exceptions */
  5285. switch (hw->mac.type) {
  5286. case ixgbe_mac_82598EB:
  5287. adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE;
  5288. if (hw->device_id == IXGBE_DEV_ID_82598AT)
  5289. adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
  5290. adapter->max_q_vectors = MAX_Q_VECTORS_82598;
  5291. adapter->ring_feature[RING_F_FDIR].limit = 0;
  5292. adapter->atr_sample_rate = 0;
  5293. adapter->fdir_pballoc = 0;
  5294. #ifdef IXGBE_FCOE
  5295. adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
  5296. adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
  5297. #ifdef CONFIG_IXGBE_DCB
  5298. adapter->fcoe.up = 0;
  5299. #endif /* IXGBE_DCB */
  5300. #endif /* IXGBE_FCOE */
  5301. break;
  5302. case ixgbe_mac_82599EB:
  5303. if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
  5304. adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
  5305. break;
  5306. case ixgbe_mac_X540:
  5307. fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
  5308. if (fwsm & IXGBE_FWSM_TS_ENABLED)
  5309. adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
  5310. break;
  5311. case ixgbe_mac_x550em_a:
  5312. adapter->flags |= IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE;
  5313. switch (hw->device_id) {
  5314. case IXGBE_DEV_ID_X550EM_A_1G_T:
  5315. case IXGBE_DEV_ID_X550EM_A_1G_T_L:
  5316. adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
  5317. break;
  5318. default:
  5319. break;
  5320. }
  5321. /* fall through */
  5322. case ixgbe_mac_X550EM_x:
  5323. #ifdef CONFIG_IXGBE_DCB
  5324. adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE;
  5325. #endif
  5326. #ifdef IXGBE_FCOE
  5327. adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
  5328. #ifdef CONFIG_IXGBE_DCB
  5329. adapter->fcoe.up = 0;
  5330. #endif /* IXGBE_DCB */
  5331. #endif /* IXGBE_FCOE */
  5332. /* Fall Through */
  5333. case ixgbe_mac_X550:
  5334. if (hw->mac.type == ixgbe_mac_X550)
  5335. adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
  5336. #ifdef CONFIG_IXGBE_DCA
  5337. adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
  5338. #endif
  5339. adapter->flags |= IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE;
  5340. break;
  5341. default:
  5342. break;
  5343. }
  5344. #ifdef IXGBE_FCOE
  5345. /* FCoE support exists, always init the FCoE lock */
  5346. spin_lock_init(&adapter->fcoe.lock);
  5347. #endif
  5348. /* n-tuple support exists, always init our spinlock */
  5349. spin_lock_init(&adapter->fdir_perfect_lock);
  5350. #ifdef CONFIG_IXGBE_DCB
  5351. ixgbe_init_dcb(adapter);
  5352. #endif
  5353. /* default flow control settings */
  5354. hw->fc.requested_mode = ixgbe_fc_full;
  5355. hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */
  5356. ixgbe_pbthresh_setup(adapter);
  5357. hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
  5358. hw->fc.send_xon = true;
  5359. hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw);
  5360. #ifdef CONFIG_PCI_IOV
  5361. if (max_vfs > 0)
  5362. e_dev_warn("Enabling SR-IOV VFs using the max_vfs module parameter is deprecated - please use the pci sysfs interface instead.\n");
  5363. /* assign number of SR-IOV VFs */
  5364. if (hw->mac.type != ixgbe_mac_82598EB) {
  5365. if (max_vfs > IXGBE_MAX_VFS_DRV_LIMIT) {
  5366. max_vfs = 0;
  5367. e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n");
  5368. }
  5369. }
  5370. #endif /* CONFIG_PCI_IOV */
  5371. /* enable itr by default in dynamic mode */
  5372. adapter->rx_itr_setting = 1;
  5373. adapter->tx_itr_setting = 1;
  5374. /* set default ring sizes */
  5375. adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
  5376. adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
  5377. /* set default work limits */
  5378. adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
  5379. /* initialize eeprom parameters */
  5380. if (ixgbe_init_eeprom_params_generic(hw)) {
  5381. e_dev_err("EEPROM initialization failed\n");
  5382. return -EIO;
  5383. }
  5384. /* PF holds first pool slot */
  5385. set_bit(0, &adapter->fwd_bitmask);
  5386. set_bit(__IXGBE_DOWN, &adapter->state);
  5387. return 0;
  5388. }
  5389. /**
  5390. * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
  5391. * @tx_ring: tx descriptor ring (for a specific queue) to setup
  5392. *
  5393. * Return 0 on success, negative on failure
  5394. **/
  5395. int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
  5396. {
  5397. struct device *dev = tx_ring->dev;
  5398. int orig_node = dev_to_node(dev);
  5399. int ring_node = -1;
  5400. int size;
  5401. size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
  5402. if (tx_ring->q_vector)
  5403. ring_node = tx_ring->q_vector->numa_node;
  5404. tx_ring->tx_buffer_info = vmalloc_node(size, ring_node);
  5405. if (!tx_ring->tx_buffer_info)
  5406. tx_ring->tx_buffer_info = vmalloc(size);
  5407. if (!tx_ring->tx_buffer_info)
  5408. goto err;
  5409. /* round up to nearest 4K */
  5410. tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
  5411. tx_ring->size = ALIGN(tx_ring->size, 4096);
  5412. set_dev_node(dev, ring_node);
  5413. tx_ring->desc = dma_alloc_coherent(dev,
  5414. tx_ring->size,
  5415. &tx_ring->dma,
  5416. GFP_KERNEL);
  5417. set_dev_node(dev, orig_node);
  5418. if (!tx_ring->desc)
  5419. tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
  5420. &tx_ring->dma, GFP_KERNEL);
  5421. if (!tx_ring->desc)
  5422. goto err;
  5423. tx_ring->next_to_use = 0;
  5424. tx_ring->next_to_clean = 0;
  5425. return 0;
  5426. err:
  5427. vfree(tx_ring->tx_buffer_info);
  5428. tx_ring->tx_buffer_info = NULL;
  5429. dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
  5430. return -ENOMEM;
  5431. }
  5432. /**
  5433. * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
  5434. * @adapter: board private structure
  5435. *
  5436. * If this function returns with an error, then it's possible one or
  5437. * more of the rings is populated (while the rest are not). It is the
  5438. * callers duty to clean those orphaned rings.
  5439. *
  5440. * Return 0 on success, negative on failure
  5441. **/
  5442. static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
  5443. {
  5444. int i, j = 0, err = 0;
  5445. for (i = 0; i < adapter->num_tx_queues; i++) {
  5446. err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
  5447. if (!err)
  5448. continue;
  5449. e_err(probe, "Allocation for Tx Queue %u failed\n", i);
  5450. goto err_setup_tx;
  5451. }
  5452. for (j = 0; j < adapter->num_xdp_queues; j++) {
  5453. err = ixgbe_setup_tx_resources(adapter->xdp_ring[j]);
  5454. if (!err)
  5455. continue;
  5456. e_err(probe, "Allocation for Tx Queue %u failed\n", j);
  5457. goto err_setup_tx;
  5458. }
  5459. return 0;
  5460. err_setup_tx:
  5461. /* rewind the index freeing the rings as we go */
  5462. while (j--)
  5463. ixgbe_free_tx_resources(adapter->xdp_ring[j]);
  5464. while (i--)
  5465. ixgbe_free_tx_resources(adapter->tx_ring[i]);
  5466. return err;
  5467. }
  5468. /**
  5469. * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
  5470. * @rx_ring: rx descriptor ring (for a specific queue) to setup
  5471. *
  5472. * Returns 0 on success, negative on failure
  5473. **/
  5474. int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
  5475. struct ixgbe_ring *rx_ring)
  5476. {
  5477. struct device *dev = rx_ring->dev;
  5478. int orig_node = dev_to_node(dev);
  5479. int ring_node = -1;
  5480. int size;
  5481. size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
  5482. if (rx_ring->q_vector)
  5483. ring_node = rx_ring->q_vector->numa_node;
  5484. rx_ring->rx_buffer_info = vmalloc_node(size, ring_node);
  5485. if (!rx_ring->rx_buffer_info)
  5486. rx_ring->rx_buffer_info = vmalloc(size);
  5487. if (!rx_ring->rx_buffer_info)
  5488. goto err;
  5489. /* Round up to nearest 4K */
  5490. rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
  5491. rx_ring->size = ALIGN(rx_ring->size, 4096);
  5492. set_dev_node(dev, ring_node);
  5493. rx_ring->desc = dma_alloc_coherent(dev,
  5494. rx_ring->size,
  5495. &rx_ring->dma,
  5496. GFP_KERNEL);
  5497. set_dev_node(dev, orig_node);
  5498. if (!rx_ring->desc)
  5499. rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
  5500. &rx_ring->dma, GFP_KERNEL);
  5501. if (!rx_ring->desc)
  5502. goto err;
  5503. rx_ring->next_to_clean = 0;
  5504. rx_ring->next_to_use = 0;
  5505. rx_ring->xdp_prog = adapter->xdp_prog;
  5506. return 0;
  5507. err:
  5508. vfree(rx_ring->rx_buffer_info);
  5509. rx_ring->rx_buffer_info = NULL;
  5510. dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
  5511. return -ENOMEM;
  5512. }
  5513. /**
  5514. * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
  5515. * @adapter: board private structure
  5516. *
  5517. * If this function returns with an error, then it's possible one or
  5518. * more of the rings is populated (while the rest are not). It is the
  5519. * callers duty to clean those orphaned rings.
  5520. *
  5521. * Return 0 on success, negative on failure
  5522. **/
  5523. static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
  5524. {
  5525. int i, err = 0;
  5526. for (i = 0; i < adapter->num_rx_queues; i++) {
  5527. err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
  5528. if (!err)
  5529. continue;
  5530. e_err(probe, "Allocation for Rx Queue %u failed\n", i);
  5531. goto err_setup_rx;
  5532. }
  5533. #ifdef IXGBE_FCOE
  5534. err = ixgbe_setup_fcoe_ddp_resources(adapter);
  5535. if (!err)
  5536. #endif
  5537. return 0;
  5538. err_setup_rx:
  5539. /* rewind the index freeing the rings as we go */
  5540. while (i--)
  5541. ixgbe_free_rx_resources(adapter->rx_ring[i]);
  5542. return err;
  5543. }
  5544. /**
  5545. * ixgbe_free_tx_resources - Free Tx Resources per Queue
  5546. * @tx_ring: Tx descriptor ring for a specific queue
  5547. *
  5548. * Free all transmit software resources
  5549. **/
  5550. void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
  5551. {
  5552. ixgbe_clean_tx_ring(tx_ring);
  5553. vfree(tx_ring->tx_buffer_info);
  5554. tx_ring->tx_buffer_info = NULL;
  5555. /* if not set, then don't free */
  5556. if (!tx_ring->desc)
  5557. return;
  5558. dma_free_coherent(tx_ring->dev, tx_ring->size,
  5559. tx_ring->desc, tx_ring->dma);
  5560. tx_ring->desc = NULL;
  5561. }
  5562. /**
  5563. * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
  5564. * @adapter: board private structure
  5565. *
  5566. * Free all transmit software resources
  5567. **/
  5568. static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
  5569. {
  5570. int i;
  5571. for (i = 0; i < adapter->num_tx_queues; i++)
  5572. if (adapter->tx_ring[i]->desc)
  5573. ixgbe_free_tx_resources(adapter->tx_ring[i]);
  5574. for (i = 0; i < adapter->num_xdp_queues; i++)
  5575. if (adapter->xdp_ring[i]->desc)
  5576. ixgbe_free_tx_resources(adapter->xdp_ring[i]);
  5577. }
  5578. /**
  5579. * ixgbe_free_rx_resources - Free Rx Resources
  5580. * @rx_ring: ring to clean the resources from
  5581. *
  5582. * Free all receive software resources
  5583. **/
  5584. void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
  5585. {
  5586. ixgbe_clean_rx_ring(rx_ring);
  5587. rx_ring->xdp_prog = NULL;
  5588. vfree(rx_ring->rx_buffer_info);
  5589. rx_ring->rx_buffer_info = NULL;
  5590. /* if not set, then don't free */
  5591. if (!rx_ring->desc)
  5592. return;
  5593. dma_free_coherent(rx_ring->dev, rx_ring->size,
  5594. rx_ring->desc, rx_ring->dma);
  5595. rx_ring->desc = NULL;
  5596. }
  5597. /**
  5598. * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
  5599. * @adapter: board private structure
  5600. *
  5601. * Free all receive software resources
  5602. **/
  5603. static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
  5604. {
  5605. int i;
  5606. #ifdef IXGBE_FCOE
  5607. ixgbe_free_fcoe_ddp_resources(adapter);
  5608. #endif
  5609. for (i = 0; i < adapter->num_rx_queues; i++)
  5610. if (adapter->rx_ring[i]->desc)
  5611. ixgbe_free_rx_resources(adapter->rx_ring[i]);
  5612. }
  5613. /**
  5614. * ixgbe_change_mtu - Change the Maximum Transfer Unit
  5615. * @netdev: network interface device structure
  5616. * @new_mtu: new value for maximum frame size
  5617. *
  5618. * Returns 0 on success, negative on failure
  5619. **/
  5620. static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
  5621. {
  5622. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  5623. /*
  5624. * For 82599EB we cannot allow legacy VFs to enable their receive
  5625. * paths when MTU greater than 1500 is configured. So display a
  5626. * warning that legacy VFs will be disabled.
  5627. */
  5628. if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
  5629. (adapter->hw.mac.type == ixgbe_mac_82599EB) &&
  5630. (new_mtu > ETH_DATA_LEN))
  5631. e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
  5632. e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
  5633. /* must set new MTU before calling down or up */
  5634. netdev->mtu = new_mtu;
  5635. if (netif_running(netdev))
  5636. ixgbe_reinit_locked(adapter);
  5637. return 0;
  5638. }
  5639. /**
  5640. * ixgbe_open - Called when a network interface is made active
  5641. * @netdev: network interface device structure
  5642. *
  5643. * Returns 0 on success, negative value on failure
  5644. *
  5645. * The open entry point is called when a network interface is made
  5646. * active by the system (IFF_UP). At this point all resources needed
  5647. * for transmit and receive operations are allocated, the interrupt
  5648. * handler is registered with the OS, the watchdog timer is started,
  5649. * and the stack is notified that the interface is ready.
  5650. **/
  5651. int ixgbe_open(struct net_device *netdev)
  5652. {
  5653. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  5654. struct ixgbe_hw *hw = &adapter->hw;
  5655. int err, queues;
  5656. /* disallow open during test */
  5657. if (test_bit(__IXGBE_TESTING, &adapter->state))
  5658. return -EBUSY;
  5659. netif_carrier_off(netdev);
  5660. /* allocate transmit descriptors */
  5661. err = ixgbe_setup_all_tx_resources(adapter);
  5662. if (err)
  5663. goto err_setup_tx;
  5664. /* allocate receive descriptors */
  5665. err = ixgbe_setup_all_rx_resources(adapter);
  5666. if (err)
  5667. goto err_setup_rx;
  5668. ixgbe_configure(adapter);
  5669. err = ixgbe_request_irq(adapter);
  5670. if (err)
  5671. goto err_req_irq;
  5672. /* Notify the stack of the actual queue counts. */
  5673. if (adapter->num_rx_pools > 1)
  5674. queues = adapter->num_rx_queues_per_pool;
  5675. else
  5676. queues = adapter->num_tx_queues;
  5677. err = netif_set_real_num_tx_queues(netdev, queues);
  5678. if (err)
  5679. goto err_set_queues;
  5680. if (adapter->num_rx_pools > 1 &&
  5681. adapter->num_rx_queues > IXGBE_MAX_L2A_QUEUES)
  5682. queues = IXGBE_MAX_L2A_QUEUES;
  5683. else
  5684. queues = adapter->num_rx_queues;
  5685. err = netif_set_real_num_rx_queues(netdev, queues);
  5686. if (err)
  5687. goto err_set_queues;
  5688. ixgbe_ptp_init(adapter);
  5689. ixgbe_up_complete(adapter);
  5690. ixgbe_clear_udp_tunnel_port(adapter, IXGBE_VXLANCTRL_ALL_UDPPORT_MASK);
  5691. udp_tunnel_get_rx_info(netdev);
  5692. return 0;
  5693. err_set_queues:
  5694. ixgbe_free_irq(adapter);
  5695. err_req_irq:
  5696. ixgbe_free_all_rx_resources(adapter);
  5697. if (hw->phy.ops.set_phy_power && !adapter->wol)
  5698. hw->phy.ops.set_phy_power(&adapter->hw, false);
  5699. err_setup_rx:
  5700. ixgbe_free_all_tx_resources(adapter);
  5701. err_setup_tx:
  5702. ixgbe_reset(adapter);
  5703. return err;
  5704. }
  5705. static void ixgbe_close_suspend(struct ixgbe_adapter *adapter)
  5706. {
  5707. ixgbe_ptp_suspend(adapter);
  5708. if (adapter->hw.phy.ops.enter_lplu) {
  5709. adapter->hw.phy.reset_disable = true;
  5710. ixgbe_down(adapter);
  5711. adapter->hw.phy.ops.enter_lplu(&adapter->hw);
  5712. adapter->hw.phy.reset_disable = false;
  5713. } else {
  5714. ixgbe_down(adapter);
  5715. }
  5716. ixgbe_free_irq(adapter);
  5717. ixgbe_free_all_tx_resources(adapter);
  5718. ixgbe_free_all_rx_resources(adapter);
  5719. }
  5720. /**
  5721. * ixgbe_close - Disables a network interface
  5722. * @netdev: network interface device structure
  5723. *
  5724. * Returns 0, this is not allowed to fail
  5725. *
  5726. * The close entry point is called when an interface is de-activated
  5727. * by the OS. The hardware is still under the drivers control, but
  5728. * needs to be disabled. A global MAC reset is issued to stop the
  5729. * hardware, and all transmit and receive resources are freed.
  5730. **/
  5731. int ixgbe_close(struct net_device *netdev)
  5732. {
  5733. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  5734. ixgbe_ptp_stop(adapter);
  5735. if (netif_device_present(netdev))
  5736. ixgbe_close_suspend(adapter);
  5737. ixgbe_fdir_filter_exit(adapter);
  5738. ixgbe_release_hw_control(adapter);
  5739. return 0;
  5740. }
  5741. #ifdef CONFIG_PM
  5742. static int ixgbe_resume(struct pci_dev *pdev)
  5743. {
  5744. struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
  5745. struct net_device *netdev = adapter->netdev;
  5746. u32 err;
  5747. adapter->hw.hw_addr = adapter->io_addr;
  5748. pci_set_power_state(pdev, PCI_D0);
  5749. pci_restore_state(pdev);
  5750. /*
  5751. * pci_restore_state clears dev->state_saved so call
  5752. * pci_save_state to restore it.
  5753. */
  5754. pci_save_state(pdev);
  5755. err = pci_enable_device_mem(pdev);
  5756. if (err) {
  5757. e_dev_err("Cannot enable PCI device from suspend\n");
  5758. return err;
  5759. }
  5760. smp_mb__before_atomic();
  5761. clear_bit(__IXGBE_DISABLED, &adapter->state);
  5762. pci_set_master(pdev);
  5763. pci_wake_from_d3(pdev, false);
  5764. ixgbe_reset(adapter);
  5765. IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
  5766. rtnl_lock();
  5767. err = ixgbe_init_interrupt_scheme(adapter);
  5768. if (!err && netif_running(netdev))
  5769. err = ixgbe_open(netdev);
  5770. if (!err)
  5771. netif_device_attach(netdev);
  5772. rtnl_unlock();
  5773. return err;
  5774. }
  5775. #endif /* CONFIG_PM */
  5776. static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
  5777. {
  5778. struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
  5779. struct net_device *netdev = adapter->netdev;
  5780. struct ixgbe_hw *hw = &adapter->hw;
  5781. u32 ctrl, fctrl;
  5782. u32 wufc = adapter->wol;
  5783. #ifdef CONFIG_PM
  5784. int retval = 0;
  5785. #endif
  5786. rtnl_lock();
  5787. netif_device_detach(netdev);
  5788. if (netif_running(netdev))
  5789. ixgbe_close_suspend(adapter);
  5790. ixgbe_clear_interrupt_scheme(adapter);
  5791. rtnl_unlock();
  5792. #ifdef CONFIG_PM
  5793. retval = pci_save_state(pdev);
  5794. if (retval)
  5795. return retval;
  5796. #endif
  5797. if (hw->mac.ops.stop_link_on_d3)
  5798. hw->mac.ops.stop_link_on_d3(hw);
  5799. if (wufc) {
  5800. ixgbe_set_rx_mode(netdev);
  5801. /* enable the optics for 82599 SFP+ fiber as we can WoL */
  5802. if (hw->mac.ops.enable_tx_laser)
  5803. hw->mac.ops.enable_tx_laser(hw);
  5804. /* turn on all-multi mode if wake on multicast is enabled */
  5805. if (wufc & IXGBE_WUFC_MC) {
  5806. fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
  5807. fctrl |= IXGBE_FCTRL_MPE;
  5808. IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
  5809. }
  5810. ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
  5811. ctrl |= IXGBE_CTRL_GIO_DIS;
  5812. IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
  5813. IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
  5814. } else {
  5815. IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
  5816. IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
  5817. }
  5818. switch (hw->mac.type) {
  5819. case ixgbe_mac_82598EB:
  5820. pci_wake_from_d3(pdev, false);
  5821. break;
  5822. case ixgbe_mac_82599EB:
  5823. case ixgbe_mac_X540:
  5824. case ixgbe_mac_X550:
  5825. case ixgbe_mac_X550EM_x:
  5826. case ixgbe_mac_x550em_a:
  5827. pci_wake_from_d3(pdev, !!wufc);
  5828. break;
  5829. default:
  5830. break;
  5831. }
  5832. *enable_wake = !!wufc;
  5833. if (hw->phy.ops.set_phy_power && !*enable_wake)
  5834. hw->phy.ops.set_phy_power(hw, false);
  5835. ixgbe_release_hw_control(adapter);
  5836. if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
  5837. pci_disable_device(pdev);
  5838. return 0;
  5839. }
  5840. #ifdef CONFIG_PM
  5841. static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
  5842. {
  5843. int retval;
  5844. bool wake;
  5845. retval = __ixgbe_shutdown(pdev, &wake);
  5846. if (retval)
  5847. return retval;
  5848. if (wake) {
  5849. pci_prepare_to_sleep(pdev);
  5850. } else {
  5851. pci_wake_from_d3(pdev, false);
  5852. pci_set_power_state(pdev, PCI_D3hot);
  5853. }
  5854. return 0;
  5855. }
  5856. #endif /* CONFIG_PM */
  5857. static void ixgbe_shutdown(struct pci_dev *pdev)
  5858. {
  5859. bool wake;
  5860. __ixgbe_shutdown(pdev, &wake);
  5861. if (system_state == SYSTEM_POWER_OFF) {
  5862. pci_wake_from_d3(pdev, wake);
  5863. pci_set_power_state(pdev, PCI_D3hot);
  5864. }
  5865. }
  5866. /**
  5867. * ixgbe_update_stats - Update the board statistics counters.
  5868. * @adapter: board private structure
  5869. **/
  5870. void ixgbe_update_stats(struct ixgbe_adapter *adapter)
  5871. {
  5872. struct net_device *netdev = adapter->netdev;
  5873. struct ixgbe_hw *hw = &adapter->hw;
  5874. struct ixgbe_hw_stats *hwstats = &adapter->stats;
  5875. u64 total_mpc = 0;
  5876. u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
  5877. u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
  5878. u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
  5879. u64 alloc_rx_page = 0;
  5880. u64 bytes = 0, packets = 0, hw_csum_rx_error = 0;
  5881. if (test_bit(__IXGBE_DOWN, &adapter->state) ||
  5882. test_bit(__IXGBE_RESETTING, &adapter->state))
  5883. return;
  5884. if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
  5885. u64 rsc_count = 0;
  5886. u64 rsc_flush = 0;
  5887. for (i = 0; i < adapter->num_rx_queues; i++) {
  5888. rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
  5889. rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
  5890. }
  5891. adapter->rsc_total_count = rsc_count;
  5892. adapter->rsc_total_flush = rsc_flush;
  5893. }
  5894. for (i = 0; i < adapter->num_rx_queues; i++) {
  5895. struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
  5896. non_eop_descs += rx_ring->rx_stats.non_eop_descs;
  5897. alloc_rx_page += rx_ring->rx_stats.alloc_rx_page;
  5898. alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
  5899. alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
  5900. hw_csum_rx_error += rx_ring->rx_stats.csum_err;
  5901. bytes += rx_ring->stats.bytes;
  5902. packets += rx_ring->stats.packets;
  5903. }
  5904. adapter->non_eop_descs = non_eop_descs;
  5905. adapter->alloc_rx_page = alloc_rx_page;
  5906. adapter->alloc_rx_page_failed = alloc_rx_page_failed;
  5907. adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
  5908. adapter->hw_csum_rx_error = hw_csum_rx_error;
  5909. netdev->stats.rx_bytes = bytes;
  5910. netdev->stats.rx_packets = packets;
  5911. bytes = 0;
  5912. packets = 0;
  5913. /* gather some stats to the adapter struct that are per queue */
  5914. for (i = 0; i < adapter->num_tx_queues; i++) {
  5915. struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
  5916. restart_queue += tx_ring->tx_stats.restart_queue;
  5917. tx_busy += tx_ring->tx_stats.tx_busy;
  5918. bytes += tx_ring->stats.bytes;
  5919. packets += tx_ring->stats.packets;
  5920. }
  5921. for (i = 0; i < adapter->num_xdp_queues; i++) {
  5922. struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i];
  5923. restart_queue += xdp_ring->tx_stats.restart_queue;
  5924. tx_busy += xdp_ring->tx_stats.tx_busy;
  5925. bytes += xdp_ring->stats.bytes;
  5926. packets += xdp_ring->stats.packets;
  5927. }
  5928. adapter->restart_queue = restart_queue;
  5929. adapter->tx_busy = tx_busy;
  5930. netdev->stats.tx_bytes = bytes;
  5931. netdev->stats.tx_packets = packets;
  5932. hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
  5933. /* 8 register reads */
  5934. for (i = 0; i < 8; i++) {
  5935. /* for packet buffers not used, the register should read 0 */
  5936. mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
  5937. missed_rx += mpc;
  5938. hwstats->mpc[i] += mpc;
  5939. total_mpc += hwstats->mpc[i];
  5940. hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
  5941. hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
  5942. switch (hw->mac.type) {
  5943. case ixgbe_mac_82598EB:
  5944. hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
  5945. hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
  5946. hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
  5947. hwstats->pxonrxc[i] +=
  5948. IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
  5949. break;
  5950. case ixgbe_mac_82599EB:
  5951. case ixgbe_mac_X540:
  5952. case ixgbe_mac_X550:
  5953. case ixgbe_mac_X550EM_x:
  5954. case ixgbe_mac_x550em_a:
  5955. hwstats->pxonrxc[i] +=
  5956. IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
  5957. break;
  5958. default:
  5959. break;
  5960. }
  5961. }
  5962. /*16 register reads */
  5963. for (i = 0; i < 16; i++) {
  5964. hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
  5965. hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
  5966. if ((hw->mac.type == ixgbe_mac_82599EB) ||
  5967. (hw->mac.type == ixgbe_mac_X540) ||
  5968. (hw->mac.type == ixgbe_mac_X550) ||
  5969. (hw->mac.type == ixgbe_mac_X550EM_x) ||
  5970. (hw->mac.type == ixgbe_mac_x550em_a)) {
  5971. hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
  5972. IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */
  5973. hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
  5974. IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); /* to clear */
  5975. }
  5976. }
  5977. hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
  5978. /* work around hardware counting issue */
  5979. hwstats->gprc -= missed_rx;
  5980. ixgbe_update_xoff_received(adapter);
  5981. /* 82598 hardware only has a 32 bit counter in the high register */
  5982. switch (hw->mac.type) {
  5983. case ixgbe_mac_82598EB:
  5984. hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
  5985. hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
  5986. hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
  5987. hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
  5988. break;
  5989. case ixgbe_mac_X540:
  5990. case ixgbe_mac_X550:
  5991. case ixgbe_mac_X550EM_x:
  5992. case ixgbe_mac_x550em_a:
  5993. /* OS2BMC stats are X540 and later */
  5994. hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
  5995. hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
  5996. hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
  5997. hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC);
  5998. /* fall through */
  5999. case ixgbe_mac_82599EB:
  6000. for (i = 0; i < 16; i++)
  6001. adapter->hw_rx_no_dma_resources +=
  6002. IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
  6003. hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
  6004. IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
  6005. hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
  6006. IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */
  6007. hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
  6008. IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
  6009. hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
  6010. hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
  6011. hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
  6012. #ifdef IXGBE_FCOE
  6013. hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
  6014. hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
  6015. hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
  6016. hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
  6017. hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
  6018. hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
  6019. /* Add up per cpu counters for total ddp aloc fail */
  6020. if (adapter->fcoe.ddp_pool) {
  6021. struct ixgbe_fcoe *fcoe = &adapter->fcoe;
  6022. struct ixgbe_fcoe_ddp_pool *ddp_pool;
  6023. unsigned int cpu;
  6024. u64 noddp = 0, noddp_ext_buff = 0;
  6025. for_each_possible_cpu(cpu) {
  6026. ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
  6027. noddp += ddp_pool->noddp;
  6028. noddp_ext_buff += ddp_pool->noddp_ext_buff;
  6029. }
  6030. hwstats->fcoe_noddp = noddp;
  6031. hwstats->fcoe_noddp_ext_buff = noddp_ext_buff;
  6032. }
  6033. #endif /* IXGBE_FCOE */
  6034. break;
  6035. default:
  6036. break;
  6037. }
  6038. bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
  6039. hwstats->bprc += bprc;
  6040. hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
  6041. if (hw->mac.type == ixgbe_mac_82598EB)
  6042. hwstats->mprc -= bprc;
  6043. hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
  6044. hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
  6045. hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
  6046. hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
  6047. hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
  6048. hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
  6049. hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
  6050. hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
  6051. lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
  6052. hwstats->lxontxc += lxon;
  6053. lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
  6054. hwstats->lxofftxc += lxoff;
  6055. hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
  6056. hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
  6057. /*
  6058. * 82598 errata - tx of flow control packets is included in tx counters
  6059. */
  6060. xon_off_tot = lxon + lxoff;
  6061. hwstats->gptc -= xon_off_tot;
  6062. hwstats->mptc -= xon_off_tot;
  6063. hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
  6064. hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
  6065. hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
  6066. hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
  6067. hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
  6068. hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
  6069. hwstats->ptc64 -= xon_off_tot;
  6070. hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
  6071. hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
  6072. hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
  6073. hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
  6074. hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
  6075. hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
  6076. /* Fill out the OS statistics structure */
  6077. netdev->stats.multicast = hwstats->mprc;
  6078. /* Rx Errors */
  6079. netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec;
  6080. netdev->stats.rx_dropped = 0;
  6081. netdev->stats.rx_length_errors = hwstats->rlec;
  6082. netdev->stats.rx_crc_errors = hwstats->crcerrs;
  6083. netdev->stats.rx_missed_errors = total_mpc;
  6084. }
  6085. /**
  6086. * ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table
  6087. * @adapter: pointer to the device adapter structure
  6088. **/
  6089. static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
  6090. {
  6091. struct ixgbe_hw *hw = &adapter->hw;
  6092. int i;
  6093. if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
  6094. return;
  6095. adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
  6096. /* if interface is down do nothing */
  6097. if (test_bit(__IXGBE_DOWN, &adapter->state))
  6098. return;
  6099. /* do nothing if we are not using signature filters */
  6100. if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE))
  6101. return;
  6102. adapter->fdir_overflow++;
  6103. if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
  6104. for (i = 0; i < adapter->num_tx_queues; i++)
  6105. set_bit(__IXGBE_TX_FDIR_INIT_DONE,
  6106. &(adapter->tx_ring[i]->state));
  6107. for (i = 0; i < adapter->num_xdp_queues; i++)
  6108. set_bit(__IXGBE_TX_FDIR_INIT_DONE,
  6109. &adapter->xdp_ring[i]->state);
  6110. /* re-enable flow director interrupts */
  6111. IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
  6112. } else {
  6113. e_err(probe, "failed to finish FDIR re-initialization, "
  6114. "ignored adding FDIR ATR filters\n");
  6115. }
  6116. }
  6117. /**
  6118. * ixgbe_check_hang_subtask - check for hung queues and dropped interrupts
  6119. * @adapter: pointer to the device adapter structure
  6120. *
  6121. * This function serves two purposes. First it strobes the interrupt lines
  6122. * in order to make certain interrupts are occurring. Secondly it sets the
  6123. * bits needed to check for TX hangs. As a result we should immediately
  6124. * determine if a hang has occurred.
  6125. */
  6126. static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
  6127. {
  6128. struct ixgbe_hw *hw = &adapter->hw;
  6129. u64 eics = 0;
  6130. int i;
  6131. /* If we're down, removing or resetting, just bail */
  6132. if (test_bit(__IXGBE_DOWN, &adapter->state) ||
  6133. test_bit(__IXGBE_REMOVING, &adapter->state) ||
  6134. test_bit(__IXGBE_RESETTING, &adapter->state))
  6135. return;
  6136. /* Force detection of hung controller */
  6137. if (netif_carrier_ok(adapter->netdev)) {
  6138. for (i = 0; i < adapter->num_tx_queues; i++)
  6139. set_check_for_tx_hang(adapter->tx_ring[i]);
  6140. for (i = 0; i < adapter->num_xdp_queues; i++)
  6141. set_check_for_tx_hang(adapter->xdp_ring[i]);
  6142. }
  6143. if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
  6144. /*
  6145. * for legacy and MSI interrupts don't set any bits
  6146. * that are enabled for EIAM, because this operation
  6147. * would set *both* EIMS and EICS for any bit in EIAM
  6148. */
  6149. IXGBE_WRITE_REG(hw, IXGBE_EICS,
  6150. (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
  6151. } else {
  6152. /* get one bit for every active tx/rx interrupt vector */
  6153. for (i = 0; i < adapter->num_q_vectors; i++) {
  6154. struct ixgbe_q_vector *qv = adapter->q_vector[i];
  6155. if (qv->rx.ring || qv->tx.ring)
  6156. eics |= BIT_ULL(i);
  6157. }
  6158. }
  6159. /* Cause software interrupt to ensure rings are cleaned */
  6160. ixgbe_irq_rearm_queues(adapter, eics);
  6161. }
  6162. /**
  6163. * ixgbe_watchdog_update_link - update the link status
  6164. * @adapter: pointer to the device adapter structure
  6165. * @link_speed: pointer to a u32 to store the link_speed
  6166. **/
  6167. static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
  6168. {
  6169. struct ixgbe_hw *hw = &adapter->hw;
  6170. u32 link_speed = adapter->link_speed;
  6171. bool link_up = adapter->link_up;
  6172. bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
  6173. if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
  6174. return;
  6175. if (hw->mac.ops.check_link) {
  6176. hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
  6177. } else {
  6178. /* always assume link is up, if no check link function */
  6179. link_speed = IXGBE_LINK_SPEED_10GB_FULL;
  6180. link_up = true;
  6181. }
  6182. if (adapter->ixgbe_ieee_pfc)
  6183. pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
  6184. if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) {
  6185. hw->mac.ops.fc_enable(hw);
  6186. ixgbe_set_rx_drop_en(adapter);
  6187. }
  6188. if (link_up ||
  6189. time_after(jiffies, (adapter->link_check_timeout +
  6190. IXGBE_TRY_LINK_TIMEOUT))) {
  6191. adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
  6192. IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
  6193. IXGBE_WRITE_FLUSH(hw);
  6194. }
  6195. adapter->link_up = link_up;
  6196. adapter->link_speed = link_speed;
  6197. }
  6198. static void ixgbe_update_default_up(struct ixgbe_adapter *adapter)
  6199. {
  6200. #ifdef CONFIG_IXGBE_DCB
  6201. struct net_device *netdev = adapter->netdev;
  6202. struct dcb_app app = {
  6203. .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE,
  6204. .protocol = 0,
  6205. };
  6206. u8 up = 0;
  6207. if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)
  6208. up = dcb_ieee_getapp_mask(netdev, &app);
  6209. adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0;
  6210. #endif
  6211. }
  6212. static int ixgbe_enable_macvlan(struct net_device *upper, void *data)
  6213. {
  6214. if (netif_is_macvlan(upper)) {
  6215. struct macvlan_dev *vlan = netdev_priv(upper);
  6216. if (vlan->fwd_priv)
  6217. netif_tx_wake_all_queues(upper);
  6218. }
  6219. return 0;
  6220. }
  6221. /**
  6222. * ixgbe_watchdog_link_is_up - update netif_carrier status and
  6223. * print link up message
  6224. * @adapter: pointer to the device adapter structure
  6225. **/
  6226. static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
  6227. {
  6228. struct net_device *netdev = adapter->netdev;
  6229. struct ixgbe_hw *hw = &adapter->hw;
  6230. u32 link_speed = adapter->link_speed;
  6231. const char *speed_str;
  6232. bool flow_rx, flow_tx;
  6233. /* only continue if link was previously down */
  6234. if (netif_carrier_ok(netdev))
  6235. return;
  6236. adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
  6237. switch (hw->mac.type) {
  6238. case ixgbe_mac_82598EB: {
  6239. u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
  6240. u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
  6241. flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
  6242. flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
  6243. }
  6244. break;
  6245. case ixgbe_mac_X540:
  6246. case ixgbe_mac_X550:
  6247. case ixgbe_mac_X550EM_x:
  6248. case ixgbe_mac_x550em_a:
  6249. case ixgbe_mac_82599EB: {
  6250. u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
  6251. u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
  6252. flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
  6253. flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
  6254. }
  6255. break;
  6256. default:
  6257. flow_tx = false;
  6258. flow_rx = false;
  6259. break;
  6260. }
  6261. adapter->last_rx_ptp_check = jiffies;
  6262. if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
  6263. ixgbe_ptp_start_cyclecounter(adapter);
  6264. switch (link_speed) {
  6265. case IXGBE_LINK_SPEED_10GB_FULL:
  6266. speed_str = "10 Gbps";
  6267. break;
  6268. case IXGBE_LINK_SPEED_2_5GB_FULL:
  6269. speed_str = "2.5 Gbps";
  6270. break;
  6271. case IXGBE_LINK_SPEED_1GB_FULL:
  6272. speed_str = "1 Gbps";
  6273. break;
  6274. case IXGBE_LINK_SPEED_100_FULL:
  6275. speed_str = "100 Mbps";
  6276. break;
  6277. case IXGBE_LINK_SPEED_10_FULL:
  6278. speed_str = "10 Mbps";
  6279. break;
  6280. default:
  6281. speed_str = "unknown speed";
  6282. break;
  6283. }
  6284. e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", speed_str,
  6285. ((flow_rx && flow_tx) ? "RX/TX" :
  6286. (flow_rx ? "RX" :
  6287. (flow_tx ? "TX" : "None"))));
  6288. netif_carrier_on(netdev);
  6289. ixgbe_check_vf_rate_limit(adapter);
  6290. /* enable transmits */
  6291. netif_tx_wake_all_queues(adapter->netdev);
  6292. /* enable any upper devices */
  6293. rtnl_lock();
  6294. netdev_walk_all_upper_dev_rcu(adapter->netdev,
  6295. ixgbe_enable_macvlan, NULL);
  6296. rtnl_unlock();
  6297. /* update the default user priority for VFs */
  6298. ixgbe_update_default_up(adapter);
  6299. /* ping all the active vfs to let them know link has changed */
  6300. ixgbe_ping_all_vfs(adapter);
  6301. }
  6302. /**
  6303. * ixgbe_watchdog_link_is_down - update netif_carrier status and
  6304. * print link down message
  6305. * @adapter: pointer to the adapter structure
  6306. **/
  6307. static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
  6308. {
  6309. struct net_device *netdev = adapter->netdev;
  6310. struct ixgbe_hw *hw = &adapter->hw;
  6311. adapter->link_up = false;
  6312. adapter->link_speed = 0;
  6313. /* only continue if link was up previously */
  6314. if (!netif_carrier_ok(netdev))
  6315. return;
  6316. /* poll for SFP+ cable when link is down */
  6317. if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
  6318. adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
  6319. if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
  6320. ixgbe_ptp_start_cyclecounter(adapter);
  6321. e_info(drv, "NIC Link is Down\n");
  6322. netif_carrier_off(netdev);
  6323. /* ping all the active vfs to let them know link has changed */
  6324. ixgbe_ping_all_vfs(adapter);
  6325. }
  6326. static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter)
  6327. {
  6328. int i;
  6329. for (i = 0; i < adapter->num_tx_queues; i++) {
  6330. struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
  6331. if (tx_ring->next_to_use != tx_ring->next_to_clean)
  6332. return true;
  6333. }
  6334. for (i = 0; i < adapter->num_xdp_queues; i++) {
  6335. struct ixgbe_ring *ring = adapter->xdp_ring[i];
  6336. if (ring->next_to_use != ring->next_to_clean)
  6337. return true;
  6338. }
  6339. return false;
  6340. }
  6341. static bool ixgbe_vf_tx_pending(struct ixgbe_adapter *adapter)
  6342. {
  6343. struct ixgbe_hw *hw = &adapter->hw;
  6344. struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
  6345. u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
  6346. int i, j;
  6347. if (!adapter->num_vfs)
  6348. return false;
  6349. /* resetting the PF is only needed for MAC before X550 */
  6350. if (hw->mac.type >= ixgbe_mac_X550)
  6351. return false;
  6352. for (i = 0; i < adapter->num_vfs; i++) {
  6353. for (j = 0; j < q_per_pool; j++) {
  6354. u32 h, t;
  6355. h = IXGBE_READ_REG(hw, IXGBE_PVFTDHN(q_per_pool, i, j));
  6356. t = IXGBE_READ_REG(hw, IXGBE_PVFTDTN(q_per_pool, i, j));
  6357. if (h != t)
  6358. return true;
  6359. }
  6360. }
  6361. return false;
  6362. }
  6363. /**
  6364. * ixgbe_watchdog_flush_tx - flush queues on link down
  6365. * @adapter: pointer to the device adapter structure
  6366. **/
  6367. static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
  6368. {
  6369. if (!netif_carrier_ok(adapter->netdev)) {
  6370. if (ixgbe_ring_tx_pending(adapter) ||
  6371. ixgbe_vf_tx_pending(adapter)) {
  6372. /* We've lost link, so the controller stops DMA,
  6373. * but we've got queued Tx work that's never going
  6374. * to get done, so reset controller to flush Tx.
  6375. * (Do the reset outside of interrupt context).
  6376. */
  6377. e_warn(drv, "initiating reset to clear Tx work after link loss\n");
  6378. set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
  6379. }
  6380. }
  6381. }
  6382. #ifdef CONFIG_PCI_IOV
  6383. static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
  6384. {
  6385. struct ixgbe_hw *hw = &adapter->hw;
  6386. struct pci_dev *pdev = adapter->pdev;
  6387. unsigned int vf;
  6388. u32 gpc;
  6389. if (!(netif_carrier_ok(adapter->netdev)))
  6390. return;
  6391. gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC);
  6392. if (gpc) /* If incrementing then no need for the check below */
  6393. return;
  6394. /* Check to see if a bad DMA write target from an errant or
  6395. * malicious VF has caused a PCIe error. If so then we can
  6396. * issue a VFLR to the offending VF(s) and then resume without
  6397. * requesting a full slot reset.
  6398. */
  6399. if (!pdev)
  6400. return;
  6401. /* check status reg for all VFs owned by this PF */
  6402. for (vf = 0; vf < adapter->num_vfs; ++vf) {
  6403. struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev;
  6404. u16 status_reg;
  6405. if (!vfdev)
  6406. continue;
  6407. pci_read_config_word(vfdev, PCI_STATUS, &status_reg);
  6408. if (status_reg != IXGBE_FAILED_READ_CFG_WORD &&
  6409. status_reg & PCI_STATUS_REC_MASTER_ABORT)
  6410. pcie_flr(vfdev);
  6411. }
  6412. }
  6413. static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
  6414. {
  6415. u32 ssvpc;
  6416. /* Do not perform spoof check for 82598 or if not in IOV mode */
  6417. if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
  6418. adapter->num_vfs == 0)
  6419. return;
  6420. ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC);
  6421. /*
  6422. * ssvpc register is cleared on read, if zero then no
  6423. * spoofed packets in the last interval.
  6424. */
  6425. if (!ssvpc)
  6426. return;
  6427. e_warn(drv, "%u Spoofed packets detected\n", ssvpc);
  6428. }
  6429. #else
  6430. static void ixgbe_spoof_check(struct ixgbe_adapter __always_unused *adapter)
  6431. {
  6432. }
  6433. static void
  6434. ixgbe_check_for_bad_vf(struct ixgbe_adapter __always_unused *adapter)
  6435. {
  6436. }
  6437. #endif /* CONFIG_PCI_IOV */
  6438. /**
  6439. * ixgbe_watchdog_subtask - check and bring link up
  6440. * @adapter: pointer to the device adapter structure
  6441. **/
  6442. static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
  6443. {
  6444. /* if interface is down, removing or resetting, do nothing */
  6445. if (test_bit(__IXGBE_DOWN, &adapter->state) ||
  6446. test_bit(__IXGBE_REMOVING, &adapter->state) ||
  6447. test_bit(__IXGBE_RESETTING, &adapter->state))
  6448. return;
  6449. ixgbe_watchdog_update_link(adapter);
  6450. if (adapter->link_up)
  6451. ixgbe_watchdog_link_is_up(adapter);
  6452. else
  6453. ixgbe_watchdog_link_is_down(adapter);
  6454. ixgbe_check_for_bad_vf(adapter);
  6455. ixgbe_spoof_check(adapter);
  6456. ixgbe_update_stats(adapter);
  6457. ixgbe_watchdog_flush_tx(adapter);
  6458. }
  6459. /**
  6460. * ixgbe_sfp_detection_subtask - poll for SFP+ cable
  6461. * @adapter: the ixgbe adapter structure
  6462. **/
  6463. static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
  6464. {
  6465. struct ixgbe_hw *hw = &adapter->hw;
  6466. s32 err;
  6467. /* not searching for SFP so there is nothing to do here */
  6468. if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
  6469. !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
  6470. return;
  6471. if (adapter->sfp_poll_time &&
  6472. time_after(adapter->sfp_poll_time, jiffies))
  6473. return; /* If not yet time to poll for SFP */
  6474. /* someone else is in init, wait until next service event */
  6475. if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
  6476. return;
  6477. adapter->sfp_poll_time = jiffies + IXGBE_SFP_POLL_JIFFIES - 1;
  6478. err = hw->phy.ops.identify_sfp(hw);
  6479. if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
  6480. goto sfp_out;
  6481. if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
  6482. /* If no cable is present, then we need to reset
  6483. * the next time we find a good cable. */
  6484. adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
  6485. }
  6486. /* exit on error */
  6487. if (err)
  6488. goto sfp_out;
  6489. /* exit if reset not needed */
  6490. if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
  6491. goto sfp_out;
  6492. adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET;
  6493. /*
  6494. * A module may be identified correctly, but the EEPROM may not have
  6495. * support for that module. setup_sfp() will fail in that case, so
  6496. * we should not allow that module to load.
  6497. */
  6498. if (hw->mac.type == ixgbe_mac_82598EB)
  6499. err = hw->phy.ops.reset(hw);
  6500. else
  6501. err = hw->mac.ops.setup_sfp(hw);
  6502. if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
  6503. goto sfp_out;
  6504. adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
  6505. e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type);
  6506. sfp_out:
  6507. clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
  6508. if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) &&
  6509. (adapter->netdev->reg_state == NETREG_REGISTERED)) {
  6510. e_dev_err("failed to initialize because an unsupported "
  6511. "SFP+ module type was detected.\n");
  6512. e_dev_err("Reload the driver after installing a "
  6513. "supported module.\n");
  6514. unregister_netdev(adapter->netdev);
  6515. }
  6516. }
  6517. /**
  6518. * ixgbe_sfp_link_config_subtask - set up link SFP after module install
  6519. * @adapter: the ixgbe adapter structure
  6520. **/
  6521. static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
  6522. {
  6523. struct ixgbe_hw *hw = &adapter->hw;
  6524. u32 speed;
  6525. bool autoneg = false;
  6526. if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG))
  6527. return;
  6528. /* someone else is in init, wait until next service event */
  6529. if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
  6530. return;
  6531. adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
  6532. speed = hw->phy.autoneg_advertised;
  6533. if ((!speed) && (hw->mac.ops.get_link_capabilities)) {
  6534. hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg);
  6535. /* setup the highest link when no autoneg */
  6536. if (!autoneg) {
  6537. if (speed & IXGBE_LINK_SPEED_10GB_FULL)
  6538. speed = IXGBE_LINK_SPEED_10GB_FULL;
  6539. }
  6540. }
  6541. if (hw->mac.ops.setup_link)
  6542. hw->mac.ops.setup_link(hw, speed, true);
  6543. adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
  6544. adapter->link_check_timeout = jiffies;
  6545. clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
  6546. }
  6547. /**
  6548. * ixgbe_service_timer - Timer Call-back
  6549. * @data: pointer to adapter cast into an unsigned long
  6550. **/
  6551. static void ixgbe_service_timer(struct timer_list *t)
  6552. {
  6553. struct ixgbe_adapter *adapter = from_timer(adapter, t, service_timer);
  6554. unsigned long next_event_offset;
  6555. /* poll faster when waiting for link */
  6556. if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
  6557. next_event_offset = HZ / 10;
  6558. else
  6559. next_event_offset = HZ * 2;
  6560. /* Reset the timer */
  6561. mod_timer(&adapter->service_timer, next_event_offset + jiffies);
  6562. ixgbe_service_event_schedule(adapter);
  6563. }
  6564. static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter)
  6565. {
  6566. struct ixgbe_hw *hw = &adapter->hw;
  6567. u32 status;
  6568. if (!(adapter->flags2 & IXGBE_FLAG2_PHY_INTERRUPT))
  6569. return;
  6570. adapter->flags2 &= ~IXGBE_FLAG2_PHY_INTERRUPT;
  6571. if (!hw->phy.ops.handle_lasi)
  6572. return;
  6573. status = hw->phy.ops.handle_lasi(&adapter->hw);
  6574. if (status != IXGBE_ERR_OVERTEMP)
  6575. return;
  6576. e_crit(drv, "%s\n", ixgbe_overheat_msg);
  6577. }
  6578. static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
  6579. {
  6580. if (!test_and_clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state))
  6581. return;
  6582. /* If we're already down, removing or resetting, just bail */
  6583. if (test_bit(__IXGBE_DOWN, &adapter->state) ||
  6584. test_bit(__IXGBE_REMOVING, &adapter->state) ||
  6585. test_bit(__IXGBE_RESETTING, &adapter->state))
  6586. return;
  6587. ixgbe_dump(adapter);
  6588. netdev_err(adapter->netdev, "Reset adapter\n");
  6589. adapter->tx_timeout_count++;
  6590. rtnl_lock();
  6591. ixgbe_reinit_locked(adapter);
  6592. rtnl_unlock();
  6593. }
  6594. /**
  6595. * ixgbe_service_task - manages and runs subtasks
  6596. * @work: pointer to work_struct containing our data
  6597. **/
  6598. static void ixgbe_service_task(struct work_struct *work)
  6599. {
  6600. struct ixgbe_adapter *adapter = container_of(work,
  6601. struct ixgbe_adapter,
  6602. service_task);
  6603. if (ixgbe_removed(adapter->hw.hw_addr)) {
  6604. if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
  6605. rtnl_lock();
  6606. ixgbe_down(adapter);
  6607. rtnl_unlock();
  6608. }
  6609. ixgbe_service_event_complete(adapter);
  6610. return;
  6611. }
  6612. if (adapter->flags2 & IXGBE_FLAG2_UDP_TUN_REREG_NEEDED) {
  6613. rtnl_lock();
  6614. adapter->flags2 &= ~IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
  6615. udp_tunnel_get_rx_info(adapter->netdev);
  6616. rtnl_unlock();
  6617. }
  6618. ixgbe_reset_subtask(adapter);
  6619. ixgbe_phy_interrupt_subtask(adapter);
  6620. ixgbe_sfp_detection_subtask(adapter);
  6621. ixgbe_sfp_link_config_subtask(adapter);
  6622. ixgbe_check_overtemp_subtask(adapter);
  6623. ixgbe_watchdog_subtask(adapter);
  6624. ixgbe_fdir_reinit_subtask(adapter);
  6625. ixgbe_check_hang_subtask(adapter);
  6626. if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) {
  6627. ixgbe_ptp_overflow_check(adapter);
  6628. ixgbe_ptp_rx_hang(adapter);
  6629. ixgbe_ptp_tx_hang(adapter);
  6630. }
  6631. ixgbe_service_event_complete(adapter);
  6632. }
  6633. static int ixgbe_tso(struct ixgbe_ring *tx_ring,
  6634. struct ixgbe_tx_buffer *first,
  6635. u8 *hdr_len)
  6636. {
  6637. u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
  6638. struct sk_buff *skb = first->skb;
  6639. union {
  6640. struct iphdr *v4;
  6641. struct ipv6hdr *v6;
  6642. unsigned char *hdr;
  6643. } ip;
  6644. union {
  6645. struct tcphdr *tcp;
  6646. unsigned char *hdr;
  6647. } l4;
  6648. u32 paylen, l4_offset;
  6649. int err;
  6650. if (skb->ip_summed != CHECKSUM_PARTIAL)
  6651. return 0;
  6652. if (!skb_is_gso(skb))
  6653. return 0;
  6654. err = skb_cow_head(skb, 0);
  6655. if (err < 0)
  6656. return err;
  6657. if (eth_p_mpls(first->protocol))
  6658. ip.hdr = skb_inner_network_header(skb);
  6659. else
  6660. ip.hdr = skb_network_header(skb);
  6661. l4.hdr = skb_checksum_start(skb);
  6662. /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
  6663. type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
  6664. /* initialize outer IP header fields */
  6665. if (ip.v4->version == 4) {
  6666. unsigned char *csum_start = skb_checksum_start(skb);
  6667. unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
  6668. /* IP header will have to cancel out any data that
  6669. * is not a part of the outer IP header
  6670. */
  6671. ip.v4->check = csum_fold(csum_partial(trans_start,
  6672. csum_start - trans_start,
  6673. 0));
  6674. type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
  6675. ip.v4->tot_len = 0;
  6676. first->tx_flags |= IXGBE_TX_FLAGS_TSO |
  6677. IXGBE_TX_FLAGS_CSUM |
  6678. IXGBE_TX_FLAGS_IPV4;
  6679. } else {
  6680. ip.v6->payload_len = 0;
  6681. first->tx_flags |= IXGBE_TX_FLAGS_TSO |
  6682. IXGBE_TX_FLAGS_CSUM;
  6683. }
  6684. /* determine offset of inner transport header */
  6685. l4_offset = l4.hdr - skb->data;
  6686. /* compute length of segmentation header */
  6687. *hdr_len = (l4.tcp->doff * 4) + l4_offset;
  6688. /* remove payload length from inner checksum */
  6689. paylen = skb->len - l4_offset;
  6690. csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
  6691. /* update gso size and bytecount with header size */
  6692. first->gso_segs = skb_shinfo(skb)->gso_segs;
  6693. first->bytecount += (first->gso_segs - 1) * *hdr_len;
  6694. /* mss_l4len_id: use 0 as index for TSO */
  6695. mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
  6696. mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
  6697. /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
  6698. vlan_macip_lens = l4.hdr - ip.hdr;
  6699. vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
  6700. vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
  6701. ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd,
  6702. mss_l4len_idx);
  6703. return 1;
  6704. }
  6705. static inline bool ixgbe_ipv6_csum_is_sctp(struct sk_buff *skb)
  6706. {
  6707. unsigned int offset = 0;
  6708. ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
  6709. return offset == skb_checksum_start_offset(skb);
  6710. }
  6711. static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
  6712. struct ixgbe_tx_buffer *first)
  6713. {
  6714. struct sk_buff *skb = first->skb;
  6715. u32 vlan_macip_lens = 0;
  6716. u32 type_tucmd = 0;
  6717. if (skb->ip_summed != CHECKSUM_PARTIAL) {
  6718. csum_failed:
  6719. if (!(first->tx_flags & (IXGBE_TX_FLAGS_HW_VLAN |
  6720. IXGBE_TX_FLAGS_CC)))
  6721. return;
  6722. goto no_csum;
  6723. }
  6724. switch (skb->csum_offset) {
  6725. case offsetof(struct tcphdr, check):
  6726. type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
  6727. /* fall through */
  6728. case offsetof(struct udphdr, check):
  6729. break;
  6730. case offsetof(struct sctphdr, checksum):
  6731. /* validate that this is actually an SCTP request */
  6732. if (((first->protocol == htons(ETH_P_IP)) &&
  6733. (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
  6734. ((first->protocol == htons(ETH_P_IPV6)) &&
  6735. ixgbe_ipv6_csum_is_sctp(skb))) {
  6736. type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
  6737. break;
  6738. }
  6739. /* fall through */
  6740. default:
  6741. skb_checksum_help(skb);
  6742. goto csum_failed;
  6743. }
  6744. /* update TX checksum flag */
  6745. first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
  6746. vlan_macip_lens = skb_checksum_start_offset(skb) -
  6747. skb_network_offset(skb);
  6748. no_csum:
  6749. /* vlan_macip_lens: MACLEN, VLAN tag */
  6750. vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
  6751. vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
  6752. ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, 0);
  6753. }
  6754. #define IXGBE_SET_FLAG(_input, _flag, _result) \
  6755. ((_flag <= _result) ? \
  6756. ((u32)(_input & _flag) * (_result / _flag)) : \
  6757. ((u32)(_input & _flag) / (_flag / _result)))
  6758. static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
  6759. {
  6760. /* set type for advanced descriptor with frame checksum insertion */
  6761. u32 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
  6762. IXGBE_ADVTXD_DCMD_DEXT |
  6763. IXGBE_ADVTXD_DCMD_IFCS;
  6764. /* set HW vlan bit if vlan is present */
  6765. cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_HW_VLAN,
  6766. IXGBE_ADVTXD_DCMD_VLE);
  6767. /* set segmentation enable bits for TSO/FSO */
  6768. cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSO,
  6769. IXGBE_ADVTXD_DCMD_TSE);
  6770. /* set timestamp bit if present */
  6771. cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSTAMP,
  6772. IXGBE_ADVTXD_MAC_TSTAMP);
  6773. /* insert frame checksum */
  6774. cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS);
  6775. return cmd_type;
  6776. }
  6777. static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
  6778. u32 tx_flags, unsigned int paylen)
  6779. {
  6780. u32 olinfo_status = paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
  6781. /* enable L4 checksum for TSO and TX checksum offload */
  6782. olinfo_status |= IXGBE_SET_FLAG(tx_flags,
  6783. IXGBE_TX_FLAGS_CSUM,
  6784. IXGBE_ADVTXD_POPTS_TXSM);
  6785. /* enble IPv4 checksum for TSO */
  6786. olinfo_status |= IXGBE_SET_FLAG(tx_flags,
  6787. IXGBE_TX_FLAGS_IPV4,
  6788. IXGBE_ADVTXD_POPTS_IXSM);
  6789. /*
  6790. * Check Context must be set if Tx switch is enabled, which it
  6791. * always is for case where virtual functions are running
  6792. */
  6793. olinfo_status |= IXGBE_SET_FLAG(tx_flags,
  6794. IXGBE_TX_FLAGS_CC,
  6795. IXGBE_ADVTXD_CC);
  6796. tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
  6797. }
  6798. static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
  6799. {
  6800. netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
  6801. /* Herbert's original patch had:
  6802. * smp_mb__after_netif_stop_queue();
  6803. * but since that doesn't exist yet, just open code it.
  6804. */
  6805. smp_mb();
  6806. /* We need to check again in a case another CPU has just
  6807. * made room available.
  6808. */
  6809. if (likely(ixgbe_desc_unused(tx_ring) < size))
  6810. return -EBUSY;
  6811. /* A reprieve! - use start_queue because it doesn't call schedule */
  6812. netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
  6813. ++tx_ring->tx_stats.restart_queue;
  6814. return 0;
  6815. }
  6816. static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
  6817. {
  6818. if (likely(ixgbe_desc_unused(tx_ring) >= size))
  6819. return 0;
  6820. return __ixgbe_maybe_stop_tx(tx_ring, size);
  6821. }
  6822. #define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
  6823. IXGBE_TXD_CMD_RS)
  6824. static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
  6825. struct ixgbe_tx_buffer *first,
  6826. const u8 hdr_len)
  6827. {
  6828. struct sk_buff *skb = first->skb;
  6829. struct ixgbe_tx_buffer *tx_buffer;
  6830. union ixgbe_adv_tx_desc *tx_desc;
  6831. struct skb_frag_struct *frag;
  6832. dma_addr_t dma;
  6833. unsigned int data_len, size;
  6834. u32 tx_flags = first->tx_flags;
  6835. u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags);
  6836. u16 i = tx_ring->next_to_use;
  6837. tx_desc = IXGBE_TX_DESC(tx_ring, i);
  6838. ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
  6839. size = skb_headlen(skb);
  6840. data_len = skb->data_len;
  6841. #ifdef IXGBE_FCOE
  6842. if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
  6843. if (data_len < sizeof(struct fcoe_crc_eof)) {
  6844. size -= sizeof(struct fcoe_crc_eof) - data_len;
  6845. data_len = 0;
  6846. } else {
  6847. data_len -= sizeof(struct fcoe_crc_eof);
  6848. }
  6849. }
  6850. #endif
  6851. dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
  6852. tx_buffer = first;
  6853. for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
  6854. if (dma_mapping_error(tx_ring->dev, dma))
  6855. goto dma_error;
  6856. /* record length, and DMA address */
  6857. dma_unmap_len_set(tx_buffer, len, size);
  6858. dma_unmap_addr_set(tx_buffer, dma, dma);
  6859. tx_desc->read.buffer_addr = cpu_to_le64(dma);
  6860. while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
  6861. tx_desc->read.cmd_type_len =
  6862. cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD);
  6863. i++;
  6864. tx_desc++;
  6865. if (i == tx_ring->count) {
  6866. tx_desc = IXGBE_TX_DESC(tx_ring, 0);
  6867. i = 0;
  6868. }
  6869. tx_desc->read.olinfo_status = 0;
  6870. dma += IXGBE_MAX_DATA_PER_TXD;
  6871. size -= IXGBE_MAX_DATA_PER_TXD;
  6872. tx_desc->read.buffer_addr = cpu_to_le64(dma);
  6873. }
  6874. if (likely(!data_len))
  6875. break;
  6876. tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
  6877. i++;
  6878. tx_desc++;
  6879. if (i == tx_ring->count) {
  6880. tx_desc = IXGBE_TX_DESC(tx_ring, 0);
  6881. i = 0;
  6882. }
  6883. tx_desc->read.olinfo_status = 0;
  6884. #ifdef IXGBE_FCOE
  6885. size = min_t(unsigned int, data_len, skb_frag_size(frag));
  6886. #else
  6887. size = skb_frag_size(frag);
  6888. #endif
  6889. data_len -= size;
  6890. dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
  6891. DMA_TO_DEVICE);
  6892. tx_buffer = &tx_ring->tx_buffer_info[i];
  6893. }
  6894. /* write last descriptor with RS and EOP bits */
  6895. cmd_type |= size | IXGBE_TXD_CMD;
  6896. tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
  6897. netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
  6898. /* set the timestamp */
  6899. first->time_stamp = jiffies;
  6900. /*
  6901. * Force memory writes to complete before letting h/w know there
  6902. * are new descriptors to fetch. (Only applicable for weak-ordered
  6903. * memory model archs, such as IA-64).
  6904. *
  6905. * We also need this memory barrier to make certain all of the
  6906. * status bits have been updated before next_to_watch is written.
  6907. */
  6908. wmb();
  6909. /* set next_to_watch value indicating a packet is present */
  6910. first->next_to_watch = tx_desc;
  6911. i++;
  6912. if (i == tx_ring->count)
  6913. i = 0;
  6914. tx_ring->next_to_use = i;
  6915. ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
  6916. if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
  6917. writel(i, tx_ring->tail);
  6918. /* we need this if more than one processor can write to our tail
  6919. * at a time, it synchronizes IO on IA64/Altix systems
  6920. */
  6921. mmiowb();
  6922. }
  6923. return 0;
  6924. dma_error:
  6925. dev_err(tx_ring->dev, "TX DMA map failed\n");
  6926. /* clear dma mappings for failed tx_buffer_info map */
  6927. for (;;) {
  6928. tx_buffer = &tx_ring->tx_buffer_info[i];
  6929. if (dma_unmap_len(tx_buffer, len))
  6930. dma_unmap_page(tx_ring->dev,
  6931. dma_unmap_addr(tx_buffer, dma),
  6932. dma_unmap_len(tx_buffer, len),
  6933. DMA_TO_DEVICE);
  6934. dma_unmap_len_set(tx_buffer, len, 0);
  6935. if (tx_buffer == first)
  6936. break;
  6937. if (i == 0)
  6938. i += tx_ring->count;
  6939. i--;
  6940. }
  6941. dev_kfree_skb_any(first->skb);
  6942. first->skb = NULL;
  6943. tx_ring->next_to_use = i;
  6944. return -1;
  6945. }
  6946. static void ixgbe_atr(struct ixgbe_ring *ring,
  6947. struct ixgbe_tx_buffer *first)
  6948. {
  6949. struct ixgbe_q_vector *q_vector = ring->q_vector;
  6950. union ixgbe_atr_hash_dword input = { .dword = 0 };
  6951. union ixgbe_atr_hash_dword common = { .dword = 0 };
  6952. union {
  6953. unsigned char *network;
  6954. struct iphdr *ipv4;
  6955. struct ipv6hdr *ipv6;
  6956. } hdr;
  6957. struct tcphdr *th;
  6958. unsigned int hlen;
  6959. struct sk_buff *skb;
  6960. __be16 vlan_id;
  6961. int l4_proto;
  6962. /* if ring doesn't have a interrupt vector, cannot perform ATR */
  6963. if (!q_vector)
  6964. return;
  6965. /* do nothing if sampling is disabled */
  6966. if (!ring->atr_sample_rate)
  6967. return;
  6968. ring->atr_count++;
  6969. /* currently only IPv4/IPv6 with TCP is supported */
  6970. if ((first->protocol != htons(ETH_P_IP)) &&
  6971. (first->protocol != htons(ETH_P_IPV6)))
  6972. return;
  6973. /* snag network header to get L4 type and address */
  6974. skb = first->skb;
  6975. hdr.network = skb_network_header(skb);
  6976. if (unlikely(hdr.network <= skb->data))
  6977. return;
  6978. if (skb->encapsulation &&
  6979. first->protocol == htons(ETH_P_IP) &&
  6980. hdr.ipv4->protocol == IPPROTO_UDP) {
  6981. struct ixgbe_adapter *adapter = q_vector->adapter;
  6982. if (unlikely(skb_tail_pointer(skb) < hdr.network +
  6983. VXLAN_HEADROOM))
  6984. return;
  6985. /* verify the port is recognized as VXLAN */
  6986. if (adapter->vxlan_port &&
  6987. udp_hdr(skb)->dest == adapter->vxlan_port)
  6988. hdr.network = skb_inner_network_header(skb);
  6989. if (adapter->geneve_port &&
  6990. udp_hdr(skb)->dest == adapter->geneve_port)
  6991. hdr.network = skb_inner_network_header(skb);
  6992. }
  6993. /* Make sure we have at least [minimum IPv4 header + TCP]
  6994. * or [IPv6 header] bytes
  6995. */
  6996. if (unlikely(skb_tail_pointer(skb) < hdr.network + 40))
  6997. return;
  6998. /* Currently only IPv4/IPv6 with TCP is supported */
  6999. switch (hdr.ipv4->version) {
  7000. case IPVERSION:
  7001. /* access ihl as u8 to avoid unaligned access on ia64 */
  7002. hlen = (hdr.network[0] & 0x0F) << 2;
  7003. l4_proto = hdr.ipv4->protocol;
  7004. break;
  7005. case 6:
  7006. hlen = hdr.network - skb->data;
  7007. l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL);
  7008. hlen -= hdr.network - skb->data;
  7009. break;
  7010. default:
  7011. return;
  7012. }
  7013. if (l4_proto != IPPROTO_TCP)
  7014. return;
  7015. if (unlikely(skb_tail_pointer(skb) < hdr.network +
  7016. hlen + sizeof(struct tcphdr)))
  7017. return;
  7018. th = (struct tcphdr *)(hdr.network + hlen);
  7019. /* skip this packet since the socket is closing */
  7020. if (th->fin)
  7021. return;
  7022. /* sample on all syn packets or once every atr sample count */
  7023. if (!th->syn && (ring->atr_count < ring->atr_sample_rate))
  7024. return;
  7025. /* reset sample count */
  7026. ring->atr_count = 0;
  7027. vlan_id = htons(first->tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT);
  7028. /*
  7029. * src and dst are inverted, think how the receiver sees them
  7030. *
  7031. * The input is broken into two sections, a non-compressed section
  7032. * containing vm_pool, vlan_id, and flow_type. The rest of the data
  7033. * is XORed together and stored in the compressed dword.
  7034. */
  7035. input.formatted.vlan_id = vlan_id;
  7036. /*
  7037. * since src port and flex bytes occupy the same word XOR them together
  7038. * and write the value to source port portion of compressed dword
  7039. */
  7040. if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN))
  7041. common.port.src ^= th->dest ^ htons(ETH_P_8021Q);
  7042. else
  7043. common.port.src ^= th->dest ^ first->protocol;
  7044. common.port.dst ^= th->source;
  7045. switch (hdr.ipv4->version) {
  7046. case IPVERSION:
  7047. input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
  7048. common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
  7049. break;
  7050. case 6:
  7051. input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
  7052. common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
  7053. hdr.ipv6->saddr.s6_addr32[1] ^
  7054. hdr.ipv6->saddr.s6_addr32[2] ^
  7055. hdr.ipv6->saddr.s6_addr32[3] ^
  7056. hdr.ipv6->daddr.s6_addr32[0] ^
  7057. hdr.ipv6->daddr.s6_addr32[1] ^
  7058. hdr.ipv6->daddr.s6_addr32[2] ^
  7059. hdr.ipv6->daddr.s6_addr32[3];
  7060. break;
  7061. default:
  7062. break;
  7063. }
  7064. if (hdr.network != skb_network_header(skb))
  7065. input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK;
  7066. /* This assumes the Rx queue and Tx queue are bound to the same CPU */
  7067. ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
  7068. input, common, ring->queue_index);
  7069. }
  7070. static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
  7071. void *accel_priv, select_queue_fallback_t fallback)
  7072. {
  7073. struct ixgbe_fwd_adapter *fwd_adapter = accel_priv;
  7074. #ifdef IXGBE_FCOE
  7075. struct ixgbe_adapter *adapter;
  7076. struct ixgbe_ring_feature *f;
  7077. int txq;
  7078. #endif
  7079. if (fwd_adapter)
  7080. return skb->queue_mapping + fwd_adapter->tx_base_queue;
  7081. #ifdef IXGBE_FCOE
  7082. /*
  7083. * only execute the code below if protocol is FCoE
  7084. * or FIP and we have FCoE enabled on the adapter
  7085. */
  7086. switch (vlan_get_protocol(skb)) {
  7087. case htons(ETH_P_FCOE):
  7088. case htons(ETH_P_FIP):
  7089. adapter = netdev_priv(dev);
  7090. if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
  7091. break;
  7092. /* fall through */
  7093. default:
  7094. return fallback(dev, skb);
  7095. }
  7096. f = &adapter->ring_feature[RING_F_FCOE];
  7097. txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
  7098. smp_processor_id();
  7099. while (txq >= f->indices)
  7100. txq -= f->indices;
  7101. return txq + f->offset;
  7102. #else
  7103. return fallback(dev, skb);
  7104. #endif
  7105. }
  7106. static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
  7107. struct xdp_buff *xdp)
  7108. {
  7109. struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
  7110. struct ixgbe_tx_buffer *tx_buffer;
  7111. union ixgbe_adv_tx_desc *tx_desc;
  7112. u32 len, cmd_type;
  7113. dma_addr_t dma;
  7114. u16 i;
  7115. len = xdp->data_end - xdp->data;
  7116. if (unlikely(!ixgbe_desc_unused(ring)))
  7117. return IXGBE_XDP_CONSUMED;
  7118. dma = dma_map_single(ring->dev, xdp->data, len, DMA_TO_DEVICE);
  7119. if (dma_mapping_error(ring->dev, dma))
  7120. return IXGBE_XDP_CONSUMED;
  7121. /* record the location of the first descriptor for this packet */
  7122. tx_buffer = &ring->tx_buffer_info[ring->next_to_use];
  7123. tx_buffer->bytecount = len;
  7124. tx_buffer->gso_segs = 1;
  7125. tx_buffer->protocol = 0;
  7126. i = ring->next_to_use;
  7127. tx_desc = IXGBE_TX_DESC(ring, i);
  7128. dma_unmap_len_set(tx_buffer, len, len);
  7129. dma_unmap_addr_set(tx_buffer, dma, dma);
  7130. tx_buffer->data = xdp->data;
  7131. tx_desc->read.buffer_addr = cpu_to_le64(dma);
  7132. /* put descriptor type bits */
  7133. cmd_type = IXGBE_ADVTXD_DTYP_DATA |
  7134. IXGBE_ADVTXD_DCMD_DEXT |
  7135. IXGBE_ADVTXD_DCMD_IFCS;
  7136. cmd_type |= len | IXGBE_TXD_CMD;
  7137. tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
  7138. tx_desc->read.olinfo_status =
  7139. cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT);
  7140. /* Avoid any potential race with xdp_xmit and cleanup */
  7141. smp_wmb();
  7142. /* set next_to_watch value indicating a packet is present */
  7143. i++;
  7144. if (i == ring->count)
  7145. i = 0;
  7146. tx_buffer->next_to_watch = tx_desc;
  7147. ring->next_to_use = i;
  7148. return IXGBE_XDP_TX;
  7149. }
  7150. netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
  7151. struct ixgbe_adapter *adapter,
  7152. struct ixgbe_ring *tx_ring)
  7153. {
  7154. struct ixgbe_tx_buffer *first;
  7155. int tso;
  7156. u32 tx_flags = 0;
  7157. unsigned short f;
  7158. u16 count = TXD_USE_COUNT(skb_headlen(skb));
  7159. __be16 protocol = skb->protocol;
  7160. u8 hdr_len = 0;
  7161. /*
  7162. * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
  7163. * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
  7164. * + 2 desc gap to keep tail from touching head,
  7165. * + 1 desc for context descriptor,
  7166. * otherwise try next time
  7167. */
  7168. for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
  7169. count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
  7170. if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
  7171. tx_ring->tx_stats.tx_busy++;
  7172. return NETDEV_TX_BUSY;
  7173. }
  7174. /* record the location of the first descriptor for this packet */
  7175. first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
  7176. first->skb = skb;
  7177. first->bytecount = skb->len;
  7178. first->gso_segs = 1;
  7179. /* if we have a HW VLAN tag being added default to the HW one */
  7180. if (skb_vlan_tag_present(skb)) {
  7181. tx_flags |= skb_vlan_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
  7182. tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
  7183. /* else if it is a SW VLAN check the next protocol and store the tag */
  7184. } else if (protocol == htons(ETH_P_8021Q)) {
  7185. struct vlan_hdr *vhdr, _vhdr;
  7186. vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
  7187. if (!vhdr)
  7188. goto out_drop;
  7189. tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
  7190. IXGBE_TX_FLAGS_VLAN_SHIFT;
  7191. tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
  7192. }
  7193. protocol = vlan_get_protocol(skb);
  7194. if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
  7195. adapter->ptp_clock) {
  7196. if (!test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
  7197. &adapter->state)) {
  7198. skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
  7199. tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
  7200. /* schedule check for Tx timestamp */
  7201. adapter->ptp_tx_skb = skb_get(skb);
  7202. adapter->ptp_tx_start = jiffies;
  7203. schedule_work(&adapter->ptp_tx_work);
  7204. } else {
  7205. adapter->tx_hwtstamp_skipped++;
  7206. }
  7207. }
  7208. skb_tx_timestamp(skb);
  7209. #ifdef CONFIG_PCI_IOV
  7210. /*
  7211. * Use the l2switch_enable flag - would be false if the DMA
  7212. * Tx switch had been disabled.
  7213. */
  7214. if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
  7215. tx_flags |= IXGBE_TX_FLAGS_CC;
  7216. #endif
  7217. /* DCB maps skb priorities 0-7 onto 3 bit PCP of VLAN tag. */
  7218. if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
  7219. ((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) ||
  7220. (skb->priority != TC_PRIO_CONTROL))) {
  7221. tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
  7222. tx_flags |= (skb->priority & 0x7) <<
  7223. IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT;
  7224. if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) {
  7225. struct vlan_ethhdr *vhdr;
  7226. if (skb_cow_head(skb, 0))
  7227. goto out_drop;
  7228. vhdr = (struct vlan_ethhdr *)skb->data;
  7229. vhdr->h_vlan_TCI = htons(tx_flags >>
  7230. IXGBE_TX_FLAGS_VLAN_SHIFT);
  7231. } else {
  7232. tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
  7233. }
  7234. }
  7235. /* record initial flags and protocol */
  7236. first->tx_flags = tx_flags;
  7237. first->protocol = protocol;
  7238. #ifdef IXGBE_FCOE
  7239. /* setup tx offload for FCoE */
  7240. if ((protocol == htons(ETH_P_FCOE)) &&
  7241. (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) {
  7242. tso = ixgbe_fso(tx_ring, first, &hdr_len);
  7243. if (tso < 0)
  7244. goto out_drop;
  7245. goto xmit_fcoe;
  7246. }
  7247. #endif /* IXGBE_FCOE */
  7248. tso = ixgbe_tso(tx_ring, first, &hdr_len);
  7249. if (tso < 0)
  7250. goto out_drop;
  7251. else if (!tso)
  7252. ixgbe_tx_csum(tx_ring, first);
  7253. /* add the ATR filter if ATR is on */
  7254. if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
  7255. ixgbe_atr(tx_ring, first);
  7256. #ifdef IXGBE_FCOE
  7257. xmit_fcoe:
  7258. #endif /* IXGBE_FCOE */
  7259. if (ixgbe_tx_map(tx_ring, first, hdr_len))
  7260. goto cleanup_tx_timestamp;
  7261. return NETDEV_TX_OK;
  7262. out_drop:
  7263. dev_kfree_skb_any(first->skb);
  7264. first->skb = NULL;
  7265. cleanup_tx_timestamp:
  7266. if (unlikely(tx_flags & IXGBE_TX_FLAGS_TSTAMP)) {
  7267. dev_kfree_skb_any(adapter->ptp_tx_skb);
  7268. adapter->ptp_tx_skb = NULL;
  7269. cancel_work_sync(&adapter->ptp_tx_work);
  7270. clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state);
  7271. }
  7272. return NETDEV_TX_OK;
  7273. }
  7274. static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb,
  7275. struct net_device *netdev,
  7276. struct ixgbe_ring *ring)
  7277. {
  7278. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  7279. struct ixgbe_ring *tx_ring;
  7280. /*
  7281. * The minimum packet size for olinfo paylen is 17 so pad the skb
  7282. * in order to meet this minimum size requirement.
  7283. */
  7284. if (skb_put_padto(skb, 17))
  7285. return NETDEV_TX_OK;
  7286. tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping];
  7287. return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
  7288. }
  7289. static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
  7290. struct net_device *netdev)
  7291. {
  7292. return __ixgbe_xmit_frame(skb, netdev, NULL);
  7293. }
  7294. /**
  7295. * ixgbe_set_mac - Change the Ethernet Address of the NIC
  7296. * @netdev: network interface device structure
  7297. * @p: pointer to an address structure
  7298. *
  7299. * Returns 0 on success, negative on failure
  7300. **/
  7301. static int ixgbe_set_mac(struct net_device *netdev, void *p)
  7302. {
  7303. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  7304. struct ixgbe_hw *hw = &adapter->hw;
  7305. struct sockaddr *addr = p;
  7306. if (!is_valid_ether_addr(addr->sa_data))
  7307. return -EADDRNOTAVAIL;
  7308. memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
  7309. memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
  7310. ixgbe_mac_set_default_filter(adapter);
  7311. return 0;
  7312. }
  7313. static int
  7314. ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
  7315. {
  7316. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  7317. struct ixgbe_hw *hw = &adapter->hw;
  7318. u16 value;
  7319. int rc;
  7320. if (prtad != hw->phy.mdio.prtad)
  7321. return -EINVAL;
  7322. rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
  7323. if (!rc)
  7324. rc = value;
  7325. return rc;
  7326. }
  7327. static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
  7328. u16 addr, u16 value)
  7329. {
  7330. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  7331. struct ixgbe_hw *hw = &adapter->hw;
  7332. if (prtad != hw->phy.mdio.prtad)
  7333. return -EINVAL;
  7334. return hw->phy.ops.write_reg(hw, addr, devad, value);
  7335. }
  7336. static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
  7337. {
  7338. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  7339. switch (cmd) {
  7340. case SIOCSHWTSTAMP:
  7341. return ixgbe_ptp_set_ts_config(adapter, req);
  7342. case SIOCGHWTSTAMP:
  7343. return ixgbe_ptp_get_ts_config(adapter, req);
  7344. case SIOCGMIIPHY:
  7345. if (!adapter->hw.phy.ops.read_reg)
  7346. return -EOPNOTSUPP;
  7347. /* fall through */
  7348. default:
  7349. return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
  7350. }
  7351. }
  7352. /**
  7353. * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
  7354. * netdev->dev_addrs
  7355. * @netdev: network interface device structure
  7356. *
  7357. * Returns non-zero on failure
  7358. **/
  7359. static int ixgbe_add_sanmac_netdev(struct net_device *dev)
  7360. {
  7361. int err = 0;
  7362. struct ixgbe_adapter *adapter = netdev_priv(dev);
  7363. struct ixgbe_hw *hw = &adapter->hw;
  7364. if (is_valid_ether_addr(hw->mac.san_addr)) {
  7365. rtnl_lock();
  7366. err = dev_addr_add(dev, hw->mac.san_addr, NETDEV_HW_ADDR_T_SAN);
  7367. rtnl_unlock();
  7368. /* update SAN MAC vmdq pool selection */
  7369. hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
  7370. }
  7371. return err;
  7372. }
  7373. /**
  7374. * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
  7375. * netdev->dev_addrs
  7376. * @netdev: network interface device structure
  7377. *
  7378. * Returns non-zero on failure
  7379. **/
  7380. static int ixgbe_del_sanmac_netdev(struct net_device *dev)
  7381. {
  7382. int err = 0;
  7383. struct ixgbe_adapter *adapter = netdev_priv(dev);
  7384. struct ixgbe_mac_info *mac = &adapter->hw.mac;
  7385. if (is_valid_ether_addr(mac->san_addr)) {
  7386. rtnl_lock();
  7387. err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
  7388. rtnl_unlock();
  7389. }
  7390. return err;
  7391. }
  7392. #ifdef CONFIG_NET_POLL_CONTROLLER
  7393. /*
  7394. * Polling 'interrupt' - used by things like netconsole to send skbs
  7395. * without having to re-enable interrupts. It's not called while
  7396. * the interrupt routine is executing.
  7397. */
  7398. static void ixgbe_netpoll(struct net_device *netdev)
  7399. {
  7400. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  7401. int i;
  7402. /* if interface is down do nothing */
  7403. if (test_bit(__IXGBE_DOWN, &adapter->state))
  7404. return;
  7405. /* loop through and schedule all active queues */
  7406. for (i = 0; i < adapter->num_q_vectors; i++)
  7407. ixgbe_msix_clean_rings(0, adapter->q_vector[i]);
  7408. }
  7409. #endif
  7410. static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats,
  7411. struct ixgbe_ring *ring)
  7412. {
  7413. u64 bytes, packets;
  7414. unsigned int start;
  7415. if (ring) {
  7416. do {
  7417. start = u64_stats_fetch_begin_irq(&ring->syncp);
  7418. packets = ring->stats.packets;
  7419. bytes = ring->stats.bytes;
  7420. } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
  7421. stats->tx_packets += packets;
  7422. stats->tx_bytes += bytes;
  7423. }
  7424. }
  7425. static void ixgbe_get_stats64(struct net_device *netdev,
  7426. struct rtnl_link_stats64 *stats)
  7427. {
  7428. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  7429. int i;
  7430. rcu_read_lock();
  7431. for (i = 0; i < adapter->num_rx_queues; i++) {
  7432. struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
  7433. u64 bytes, packets;
  7434. unsigned int start;
  7435. if (ring) {
  7436. do {
  7437. start = u64_stats_fetch_begin_irq(&ring->syncp);
  7438. packets = ring->stats.packets;
  7439. bytes = ring->stats.bytes;
  7440. } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
  7441. stats->rx_packets += packets;
  7442. stats->rx_bytes += bytes;
  7443. }
  7444. }
  7445. for (i = 0; i < adapter->num_tx_queues; i++) {
  7446. struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]);
  7447. ixgbe_get_ring_stats64(stats, ring);
  7448. }
  7449. for (i = 0; i < adapter->num_xdp_queues; i++) {
  7450. struct ixgbe_ring *ring = ACCESS_ONCE(adapter->xdp_ring[i]);
  7451. ixgbe_get_ring_stats64(stats, ring);
  7452. }
  7453. rcu_read_unlock();
  7454. /* following stats updated by ixgbe_watchdog_task() */
  7455. stats->multicast = netdev->stats.multicast;
  7456. stats->rx_errors = netdev->stats.rx_errors;
  7457. stats->rx_length_errors = netdev->stats.rx_length_errors;
  7458. stats->rx_crc_errors = netdev->stats.rx_crc_errors;
  7459. stats->rx_missed_errors = netdev->stats.rx_missed_errors;
  7460. }
  7461. #ifdef CONFIG_IXGBE_DCB
  7462. /**
  7463. * ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid.
  7464. * @adapter: pointer to ixgbe_adapter
  7465. * @tc: number of traffic classes currently enabled
  7466. *
  7467. * Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm
  7468. * 802.1Q priority maps to a packet buffer that exists.
  7469. */
  7470. static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)
  7471. {
  7472. struct ixgbe_hw *hw = &adapter->hw;
  7473. u32 reg, rsave;
  7474. int i;
  7475. /* 82598 have a static priority to TC mapping that can not
  7476. * be changed so no validation is needed.
  7477. */
  7478. if (hw->mac.type == ixgbe_mac_82598EB)
  7479. return;
  7480. reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
  7481. rsave = reg;
  7482. for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
  7483. u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT);
  7484. /* If up2tc is out of bounds default to zero */
  7485. if (up2tc > tc)
  7486. reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT);
  7487. }
  7488. if (reg != rsave)
  7489. IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
  7490. return;
  7491. }
  7492. /**
  7493. * ixgbe_set_prio_tc_map - Configure netdev prio tc map
  7494. * @adapter: Pointer to adapter struct
  7495. *
  7496. * Populate the netdev user priority to tc map
  7497. */
  7498. static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
  7499. {
  7500. struct net_device *dev = adapter->netdev;
  7501. struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg;
  7502. struct ieee_ets *ets = adapter->ixgbe_ieee_ets;
  7503. u8 prio;
  7504. for (prio = 0; prio < MAX_USER_PRIORITY; prio++) {
  7505. u8 tc = 0;
  7506. if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)
  7507. tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio);
  7508. else if (ets)
  7509. tc = ets->prio_tc[prio];
  7510. netdev_set_prio_tc_map(dev, prio, tc);
  7511. }
  7512. }
  7513. #endif /* CONFIG_IXGBE_DCB */
  7514. /**
  7515. * ixgbe_setup_tc - configure net_device for multiple traffic classes
  7516. *
  7517. * @netdev: net device to configure
  7518. * @tc: number of traffic classes to enable
  7519. */
  7520. int ixgbe_setup_tc(struct net_device *dev, u8 tc)
  7521. {
  7522. struct ixgbe_adapter *adapter = netdev_priv(dev);
  7523. struct ixgbe_hw *hw = &adapter->hw;
  7524. bool pools;
  7525. /* Hardware supports up to 8 traffic classes */
  7526. if (tc > adapter->dcb_cfg.num_tcs.pg_tcs)
  7527. return -EINVAL;
  7528. if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS)
  7529. return -EINVAL;
  7530. pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
  7531. if (tc && pools && adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS)
  7532. return -EBUSY;
  7533. /* Hardware has to reinitialize queues and interrupts to
  7534. * match packet buffer alignment. Unfortunately, the
  7535. * hardware is not flexible enough to do this dynamically.
  7536. */
  7537. if (netif_running(dev))
  7538. ixgbe_close(dev);
  7539. else
  7540. ixgbe_reset(adapter);
  7541. ixgbe_clear_interrupt_scheme(adapter);
  7542. #ifdef CONFIG_IXGBE_DCB
  7543. if (tc) {
  7544. netdev_set_num_tc(dev, tc);
  7545. ixgbe_set_prio_tc_map(adapter);
  7546. adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
  7547. if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
  7548. adapter->last_lfc_mode = adapter->hw.fc.requested_mode;
  7549. adapter->hw.fc.requested_mode = ixgbe_fc_none;
  7550. }
  7551. } else {
  7552. netdev_reset_tc(dev);
  7553. if (adapter->hw.mac.type == ixgbe_mac_82598EB)
  7554. adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
  7555. adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
  7556. adapter->temp_dcb_cfg.pfc_mode_enable = false;
  7557. adapter->dcb_cfg.pfc_mode_enable = false;
  7558. }
  7559. ixgbe_validate_rtr(adapter, tc);
  7560. #endif /* CONFIG_IXGBE_DCB */
  7561. ixgbe_init_interrupt_scheme(adapter);
  7562. if (netif_running(dev))
  7563. return ixgbe_open(dev);
  7564. return 0;
  7565. }
  7566. static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter,
  7567. struct tc_cls_u32_offload *cls)
  7568. {
  7569. u32 hdl = cls->knode.handle;
  7570. u32 uhtid = TC_U32_USERHTID(cls->knode.handle);
  7571. u32 loc = cls->knode.handle & 0xfffff;
  7572. int err = 0, i, j;
  7573. struct ixgbe_jump_table *jump = NULL;
  7574. if (loc > IXGBE_MAX_HW_ENTRIES)
  7575. return -EINVAL;
  7576. if ((uhtid != 0x800) && (uhtid >= IXGBE_MAX_LINK_HANDLE))
  7577. return -EINVAL;
  7578. /* Clear this filter in the link data it is associated with */
  7579. if (uhtid != 0x800) {
  7580. jump = adapter->jump_tables[uhtid];
  7581. if (!jump)
  7582. return -EINVAL;
  7583. if (!test_bit(loc - 1, jump->child_loc_map))
  7584. return -EINVAL;
  7585. clear_bit(loc - 1, jump->child_loc_map);
  7586. }
  7587. /* Check if the filter being deleted is a link */
  7588. for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) {
  7589. jump = adapter->jump_tables[i];
  7590. if (jump && jump->link_hdl == hdl) {
  7591. /* Delete filters in the hardware in the child hash
  7592. * table associated with this link
  7593. */
  7594. for (j = 0; j < IXGBE_MAX_HW_ENTRIES; j++) {
  7595. if (!test_bit(j, jump->child_loc_map))
  7596. continue;
  7597. spin_lock(&adapter->fdir_perfect_lock);
  7598. err = ixgbe_update_ethtool_fdir_entry(adapter,
  7599. NULL,
  7600. j + 1);
  7601. spin_unlock(&adapter->fdir_perfect_lock);
  7602. clear_bit(j, jump->child_loc_map);
  7603. }
  7604. /* Remove resources for this link */
  7605. kfree(jump->input);
  7606. kfree(jump->mask);
  7607. kfree(jump);
  7608. adapter->jump_tables[i] = NULL;
  7609. return err;
  7610. }
  7611. }
  7612. spin_lock(&adapter->fdir_perfect_lock);
  7613. err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, loc);
  7614. spin_unlock(&adapter->fdir_perfect_lock);
  7615. return err;
  7616. }
  7617. static int ixgbe_configure_clsu32_add_hnode(struct ixgbe_adapter *adapter,
  7618. struct tc_cls_u32_offload *cls)
  7619. {
  7620. u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
  7621. if (uhtid >= IXGBE_MAX_LINK_HANDLE)
  7622. return -EINVAL;
  7623. /* This ixgbe devices do not support hash tables at the moment
  7624. * so abort when given hash tables.
  7625. */
  7626. if (cls->hnode.divisor > 0)
  7627. return -EINVAL;
  7628. set_bit(uhtid - 1, &adapter->tables);
  7629. return 0;
  7630. }
  7631. static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter,
  7632. struct tc_cls_u32_offload *cls)
  7633. {
  7634. u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
  7635. if (uhtid >= IXGBE_MAX_LINK_HANDLE)
  7636. return -EINVAL;
  7637. clear_bit(uhtid - 1, &adapter->tables);
  7638. return 0;
  7639. }
  7640. #ifdef CONFIG_NET_CLS_ACT
  7641. struct upper_walk_data {
  7642. struct ixgbe_adapter *adapter;
  7643. u64 action;
  7644. int ifindex;
  7645. u8 queue;
  7646. };
  7647. static int get_macvlan_queue(struct net_device *upper, void *_data)
  7648. {
  7649. if (netif_is_macvlan(upper)) {
  7650. struct macvlan_dev *dfwd = netdev_priv(upper);
  7651. struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
  7652. struct upper_walk_data *data = _data;
  7653. struct ixgbe_adapter *adapter = data->adapter;
  7654. int ifindex = data->ifindex;
  7655. if (vadapter && vadapter->netdev->ifindex == ifindex) {
  7656. data->queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx;
  7657. data->action = data->queue;
  7658. return 1;
  7659. }
  7660. }
  7661. return 0;
  7662. }
  7663. static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex,
  7664. u8 *queue, u64 *action)
  7665. {
  7666. unsigned int num_vfs = adapter->num_vfs, vf;
  7667. struct upper_walk_data data;
  7668. struct net_device *upper;
  7669. /* redirect to a SRIOV VF */
  7670. for (vf = 0; vf < num_vfs; ++vf) {
  7671. upper = pci_get_drvdata(adapter->vfinfo[vf].vfdev);
  7672. if (upper->ifindex == ifindex) {
  7673. if (adapter->num_rx_pools > 1)
  7674. *queue = vf * 2;
  7675. else
  7676. *queue = vf * adapter->num_rx_queues_per_pool;
  7677. *action = vf + 1;
  7678. *action <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
  7679. return 0;
  7680. }
  7681. }
  7682. /* redirect to a offloaded macvlan netdev */
  7683. data.adapter = adapter;
  7684. data.ifindex = ifindex;
  7685. data.action = 0;
  7686. data.queue = 0;
  7687. if (netdev_walk_all_upper_dev_rcu(adapter->netdev,
  7688. get_macvlan_queue, &data)) {
  7689. *action = data.action;
  7690. *queue = data.queue;
  7691. return 0;
  7692. }
  7693. return -EINVAL;
  7694. }
  7695. static int parse_tc_actions(struct ixgbe_adapter *adapter,
  7696. struct tcf_exts *exts, u64 *action, u8 *queue)
  7697. {
  7698. const struct tc_action *a;
  7699. LIST_HEAD(actions);
  7700. int err;
  7701. if (!tcf_exts_has_actions(exts))
  7702. return -EINVAL;
  7703. tcf_exts_to_list(exts, &actions);
  7704. list_for_each_entry(a, &actions, list) {
  7705. /* Drop action */
  7706. if (is_tcf_gact_shot(a)) {
  7707. *action = IXGBE_FDIR_DROP_QUEUE;
  7708. *queue = IXGBE_FDIR_DROP_QUEUE;
  7709. return 0;
  7710. }
  7711. /* Redirect to a VF or a offloaded macvlan */
  7712. if (is_tcf_mirred_egress_redirect(a)) {
  7713. int ifindex = tcf_mirred_ifindex(a);
  7714. err = handle_redirect_action(adapter, ifindex, queue,
  7715. action);
  7716. if (err == 0)
  7717. return err;
  7718. }
  7719. }
  7720. return -EINVAL;
  7721. }
  7722. #else
  7723. static int parse_tc_actions(struct ixgbe_adapter *adapter,
  7724. struct tcf_exts *exts, u64 *action, u8 *queue)
  7725. {
  7726. return -EINVAL;
  7727. }
  7728. #endif /* CONFIG_NET_CLS_ACT */
  7729. static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input,
  7730. union ixgbe_atr_input *mask,
  7731. struct tc_cls_u32_offload *cls,
  7732. struct ixgbe_mat_field *field_ptr,
  7733. struct ixgbe_nexthdr *nexthdr)
  7734. {
  7735. int i, j, off;
  7736. __be32 val, m;
  7737. bool found_entry = false, found_jump_field = false;
  7738. for (i = 0; i < cls->knode.sel->nkeys; i++) {
  7739. off = cls->knode.sel->keys[i].off;
  7740. val = cls->knode.sel->keys[i].val;
  7741. m = cls->knode.sel->keys[i].mask;
  7742. for (j = 0; field_ptr[j].val; j++) {
  7743. if (field_ptr[j].off == off) {
  7744. field_ptr[j].val(input, mask, val, m);
  7745. input->filter.formatted.flow_type |=
  7746. field_ptr[j].type;
  7747. found_entry = true;
  7748. break;
  7749. }
  7750. }
  7751. if (nexthdr) {
  7752. if (nexthdr->off == cls->knode.sel->keys[i].off &&
  7753. nexthdr->val == cls->knode.sel->keys[i].val &&
  7754. nexthdr->mask == cls->knode.sel->keys[i].mask)
  7755. found_jump_field = true;
  7756. else
  7757. continue;
  7758. }
  7759. }
  7760. if (nexthdr && !found_jump_field)
  7761. return -EINVAL;
  7762. if (!found_entry)
  7763. return 0;
  7764. mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
  7765. IXGBE_ATR_L4TYPE_MASK;
  7766. if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
  7767. mask->formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
  7768. return 0;
  7769. }
  7770. static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
  7771. struct tc_cls_u32_offload *cls)
  7772. {
  7773. __be16 protocol = cls->common.protocol;
  7774. u32 loc = cls->knode.handle & 0xfffff;
  7775. struct ixgbe_hw *hw = &adapter->hw;
  7776. struct ixgbe_mat_field *field_ptr;
  7777. struct ixgbe_fdir_filter *input = NULL;
  7778. union ixgbe_atr_input *mask = NULL;
  7779. struct ixgbe_jump_table *jump = NULL;
  7780. int i, err = -EINVAL;
  7781. u8 queue;
  7782. u32 uhtid, link_uhtid;
  7783. uhtid = TC_U32_USERHTID(cls->knode.handle);
  7784. link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
  7785. /* At the moment cls_u32 jumps to network layer and skips past
  7786. * L2 headers. The canonical method to match L2 frames is to use
  7787. * negative values. However this is error prone at best but really
  7788. * just broken because there is no way to "know" what sort of hdr
  7789. * is in front of the network layer. Fix cls_u32 to support L2
  7790. * headers when needed.
  7791. */
  7792. if (protocol != htons(ETH_P_IP))
  7793. return err;
  7794. if (loc >= ((1024 << adapter->fdir_pballoc) - 2)) {
  7795. e_err(drv, "Location out of range\n");
  7796. return err;
  7797. }
  7798. /* cls u32 is a graph starting at root node 0x800. The driver tracks
  7799. * links and also the fields used to advance the parser across each
  7800. * link (e.g. nexthdr/eat parameters from 'tc'). This way we can map
  7801. * the u32 graph onto the hardware parse graph denoted in ixgbe_model.h
  7802. * To add support for new nodes update ixgbe_model.h parse structures
  7803. * this function _should_ be generic try not to hardcode values here.
  7804. */
  7805. if (uhtid == 0x800) {
  7806. field_ptr = (adapter->jump_tables[0])->mat;
  7807. } else {
  7808. if (uhtid >= IXGBE_MAX_LINK_HANDLE)
  7809. return err;
  7810. if (!adapter->jump_tables[uhtid])
  7811. return err;
  7812. field_ptr = (adapter->jump_tables[uhtid])->mat;
  7813. }
  7814. if (!field_ptr)
  7815. return err;
  7816. /* At this point we know the field_ptr is valid and need to either
  7817. * build cls_u32 link or attach filter. Because adding a link to
  7818. * a handle that does not exist is invalid and the same for adding
  7819. * rules to handles that don't exist.
  7820. */
  7821. if (link_uhtid) {
  7822. struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps;
  7823. if (link_uhtid >= IXGBE_MAX_LINK_HANDLE)
  7824. return err;
  7825. if (!test_bit(link_uhtid - 1, &adapter->tables))
  7826. return err;
  7827. /* Multiple filters as links to the same hash table are not
  7828. * supported. To add a new filter with the same next header
  7829. * but different match/jump conditions, create a new hash table
  7830. * and link to it.
  7831. */
  7832. if (adapter->jump_tables[link_uhtid] &&
  7833. (adapter->jump_tables[link_uhtid])->link_hdl) {
  7834. e_err(drv, "Link filter exists for link: %x\n",
  7835. link_uhtid);
  7836. return err;
  7837. }
  7838. for (i = 0; nexthdr[i].jump; i++) {
  7839. if (nexthdr[i].o != cls->knode.sel->offoff ||
  7840. nexthdr[i].s != cls->knode.sel->offshift ||
  7841. nexthdr[i].m != cls->knode.sel->offmask)
  7842. return err;
  7843. jump = kzalloc(sizeof(*jump), GFP_KERNEL);
  7844. if (!jump)
  7845. return -ENOMEM;
  7846. input = kzalloc(sizeof(*input), GFP_KERNEL);
  7847. if (!input) {
  7848. err = -ENOMEM;
  7849. goto free_jump;
  7850. }
  7851. mask = kzalloc(sizeof(*mask), GFP_KERNEL);
  7852. if (!mask) {
  7853. err = -ENOMEM;
  7854. goto free_input;
  7855. }
  7856. jump->input = input;
  7857. jump->mask = mask;
  7858. jump->link_hdl = cls->knode.handle;
  7859. err = ixgbe_clsu32_build_input(input, mask, cls,
  7860. field_ptr, &nexthdr[i]);
  7861. if (!err) {
  7862. jump->mat = nexthdr[i].jump;
  7863. adapter->jump_tables[link_uhtid] = jump;
  7864. break;
  7865. }
  7866. }
  7867. return 0;
  7868. }
  7869. input = kzalloc(sizeof(*input), GFP_KERNEL);
  7870. if (!input)
  7871. return -ENOMEM;
  7872. mask = kzalloc(sizeof(*mask), GFP_KERNEL);
  7873. if (!mask) {
  7874. err = -ENOMEM;
  7875. goto free_input;
  7876. }
  7877. if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) {
  7878. if ((adapter->jump_tables[uhtid])->input)
  7879. memcpy(input, (adapter->jump_tables[uhtid])->input,
  7880. sizeof(*input));
  7881. if ((adapter->jump_tables[uhtid])->mask)
  7882. memcpy(mask, (adapter->jump_tables[uhtid])->mask,
  7883. sizeof(*mask));
  7884. /* Lookup in all child hash tables if this location is already
  7885. * filled with a filter
  7886. */
  7887. for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) {
  7888. struct ixgbe_jump_table *link = adapter->jump_tables[i];
  7889. if (link && (test_bit(loc - 1, link->child_loc_map))) {
  7890. e_err(drv, "Filter exists in location: %x\n",
  7891. loc);
  7892. err = -EINVAL;
  7893. goto err_out;
  7894. }
  7895. }
  7896. }
  7897. err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, NULL);
  7898. if (err)
  7899. goto err_out;
  7900. err = parse_tc_actions(adapter, cls->knode.exts, &input->action,
  7901. &queue);
  7902. if (err < 0)
  7903. goto err_out;
  7904. input->sw_idx = loc;
  7905. spin_lock(&adapter->fdir_perfect_lock);
  7906. if (hlist_empty(&adapter->fdir_filter_list)) {
  7907. memcpy(&adapter->fdir_mask, mask, sizeof(*mask));
  7908. err = ixgbe_fdir_set_input_mask_82599(hw, mask);
  7909. if (err)
  7910. goto err_out_w_lock;
  7911. } else if (memcmp(&adapter->fdir_mask, mask, sizeof(*mask))) {
  7912. err = -EINVAL;
  7913. goto err_out_w_lock;
  7914. }
  7915. ixgbe_atr_compute_perfect_hash_82599(&input->filter, mask);
  7916. err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter,
  7917. input->sw_idx, queue);
  7918. if (!err)
  7919. ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
  7920. spin_unlock(&adapter->fdir_perfect_lock);
  7921. if ((uhtid != 0x800) && (adapter->jump_tables[uhtid]))
  7922. set_bit(loc - 1, (adapter->jump_tables[uhtid])->child_loc_map);
  7923. kfree(mask);
  7924. return err;
  7925. err_out_w_lock:
  7926. spin_unlock(&adapter->fdir_perfect_lock);
  7927. err_out:
  7928. kfree(mask);
  7929. free_input:
  7930. kfree(input);
  7931. free_jump:
  7932. kfree(jump);
  7933. return err;
  7934. }
  7935. static int ixgbe_setup_tc_cls_u32(struct ixgbe_adapter *adapter,
  7936. struct tc_cls_u32_offload *cls_u32)
  7937. {
  7938. if (cls_u32->common.chain_index)
  7939. return -EOPNOTSUPP;
  7940. switch (cls_u32->command) {
  7941. case TC_CLSU32_NEW_KNODE:
  7942. case TC_CLSU32_REPLACE_KNODE:
  7943. return ixgbe_configure_clsu32(adapter, cls_u32);
  7944. case TC_CLSU32_DELETE_KNODE:
  7945. return ixgbe_delete_clsu32(adapter, cls_u32);
  7946. case TC_CLSU32_NEW_HNODE:
  7947. case TC_CLSU32_REPLACE_HNODE:
  7948. return ixgbe_configure_clsu32_add_hnode(adapter, cls_u32);
  7949. case TC_CLSU32_DELETE_HNODE:
  7950. return ixgbe_configure_clsu32_del_hnode(adapter, cls_u32);
  7951. default:
  7952. return -EOPNOTSUPP;
  7953. }
  7954. }
  7955. static int ixgbe_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
  7956. void *cb_priv)
  7957. {
  7958. struct ixgbe_adapter *adapter = cb_priv;
  7959. switch (type) {
  7960. case TC_SETUP_CLSU32:
  7961. return ixgbe_setup_tc_cls_u32(adapter, type_data);
  7962. default:
  7963. return -EOPNOTSUPP;
  7964. }
  7965. }
  7966. static int ixgbe_setup_tc_block(struct net_device *dev,
  7967. struct tc_block_offload *f)
  7968. {
  7969. struct ixgbe_adapter *adapter = netdev_priv(dev);
  7970. if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
  7971. return -EOPNOTSUPP;
  7972. switch (f->command) {
  7973. case TC_BLOCK_BIND:
  7974. return tcf_block_cb_register(f->block, ixgbe_setup_tc_block_cb,
  7975. adapter, adapter);
  7976. case TC_BLOCK_UNBIND:
  7977. tcf_block_cb_unregister(f->block, ixgbe_setup_tc_block_cb,
  7978. adapter);
  7979. return 0;
  7980. default:
  7981. return -EOPNOTSUPP;
  7982. }
  7983. }
  7984. static int ixgbe_setup_tc_mqprio(struct net_device *dev,
  7985. struct tc_mqprio_qopt *mqprio)
  7986. {
  7987. mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
  7988. return ixgbe_setup_tc(dev, mqprio->num_tc);
  7989. }
  7990. static int __ixgbe_setup_tc(struct net_device *dev, enum tc_setup_type type,
  7991. void *type_data)
  7992. {
  7993. switch (type) {
  7994. case TC_SETUP_BLOCK:
  7995. return ixgbe_setup_tc_block(dev, type_data);
  7996. case TC_SETUP_MQPRIO:
  7997. return ixgbe_setup_tc_mqprio(dev, type_data);
  7998. default:
  7999. return -EOPNOTSUPP;
  8000. }
  8001. }
  8002. #ifdef CONFIG_PCI_IOV
  8003. void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
  8004. {
  8005. struct net_device *netdev = adapter->netdev;
  8006. rtnl_lock();
  8007. ixgbe_setup_tc(netdev, netdev_get_num_tc(netdev));
  8008. rtnl_unlock();
  8009. }
  8010. #endif
  8011. void ixgbe_do_reset(struct net_device *netdev)
  8012. {
  8013. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  8014. if (netif_running(netdev))
  8015. ixgbe_reinit_locked(adapter);
  8016. else
  8017. ixgbe_reset(adapter);
  8018. }
  8019. static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
  8020. netdev_features_t features)
  8021. {
  8022. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  8023. /* If Rx checksum is disabled, then RSC/LRO should also be disabled */
  8024. if (!(features & NETIF_F_RXCSUM))
  8025. features &= ~NETIF_F_LRO;
  8026. /* Turn off LRO if not RSC capable */
  8027. if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
  8028. features &= ~NETIF_F_LRO;
  8029. return features;
  8030. }
  8031. static int ixgbe_set_features(struct net_device *netdev,
  8032. netdev_features_t features)
  8033. {
  8034. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  8035. netdev_features_t changed = netdev->features ^ features;
  8036. bool need_reset = false;
  8037. /* Make sure RSC matches LRO, reset if change */
  8038. if (!(features & NETIF_F_LRO)) {
  8039. if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
  8040. need_reset = true;
  8041. adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
  8042. } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
  8043. !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
  8044. if (adapter->rx_itr_setting == 1 ||
  8045. adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
  8046. adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
  8047. need_reset = true;
  8048. } else if ((changed ^ features) & NETIF_F_LRO) {
  8049. e_info(probe, "rx-usecs set too low, "
  8050. "disabling RSC\n");
  8051. }
  8052. }
  8053. /*
  8054. * Check if Flow Director n-tuple support or hw_tc support was
  8055. * enabled or disabled. If the state changed, we need to reset.
  8056. */
  8057. if ((features & NETIF_F_NTUPLE) || (features & NETIF_F_HW_TC)) {
  8058. /* turn off ATR, enable perfect filters and reset */
  8059. if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
  8060. need_reset = true;
  8061. adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
  8062. adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
  8063. } else {
  8064. /* turn off perfect filters, enable ATR and reset */
  8065. if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
  8066. need_reset = true;
  8067. adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
  8068. /* We cannot enable ATR if SR-IOV is enabled */
  8069. if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED ||
  8070. /* We cannot enable ATR if we have 2 or more tcs */
  8071. (netdev_get_num_tc(netdev) > 1) ||
  8072. /* We cannot enable ATR if RSS is disabled */
  8073. (adapter->ring_feature[RING_F_RSS].limit <= 1) ||
  8074. /* A sample rate of 0 indicates ATR disabled */
  8075. (!adapter->atr_sample_rate))
  8076. ; /* do nothing not supported */
  8077. else /* otherwise supported and set the flag */
  8078. adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
  8079. }
  8080. if (changed & NETIF_F_RXALL)
  8081. need_reset = true;
  8082. netdev->features = features;
  8083. if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) {
  8084. if (features & NETIF_F_RXCSUM) {
  8085. adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
  8086. } else {
  8087. u32 port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK;
  8088. ixgbe_clear_udp_tunnel_port(adapter, port_mask);
  8089. }
  8090. }
  8091. if ((adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) {
  8092. if (features & NETIF_F_RXCSUM) {
  8093. adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
  8094. } else {
  8095. u32 port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK;
  8096. ixgbe_clear_udp_tunnel_port(adapter, port_mask);
  8097. }
  8098. }
  8099. if (need_reset)
  8100. ixgbe_do_reset(netdev);
  8101. else if (changed & (NETIF_F_HW_VLAN_CTAG_RX |
  8102. NETIF_F_HW_VLAN_CTAG_FILTER))
  8103. ixgbe_set_rx_mode(netdev);
  8104. return 0;
  8105. }
  8106. /**
  8107. * ixgbe_add_udp_tunnel_port - Get notifications about adding UDP tunnel ports
  8108. * @dev: The port's netdev
  8109. * @ti: Tunnel endpoint information
  8110. **/
  8111. static void ixgbe_add_udp_tunnel_port(struct net_device *dev,
  8112. struct udp_tunnel_info *ti)
  8113. {
  8114. struct ixgbe_adapter *adapter = netdev_priv(dev);
  8115. struct ixgbe_hw *hw = &adapter->hw;
  8116. __be16 port = ti->port;
  8117. u32 port_shift = 0;
  8118. u32 reg;
  8119. if (ti->sa_family != AF_INET)
  8120. return;
  8121. switch (ti->type) {
  8122. case UDP_TUNNEL_TYPE_VXLAN:
  8123. if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
  8124. return;
  8125. if (adapter->vxlan_port == port)
  8126. return;
  8127. if (adapter->vxlan_port) {
  8128. netdev_info(dev,
  8129. "VXLAN port %d set, not adding port %d\n",
  8130. ntohs(adapter->vxlan_port),
  8131. ntohs(port));
  8132. return;
  8133. }
  8134. adapter->vxlan_port = port;
  8135. break;
  8136. case UDP_TUNNEL_TYPE_GENEVE:
  8137. if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))
  8138. return;
  8139. if (adapter->geneve_port == port)
  8140. return;
  8141. if (adapter->geneve_port) {
  8142. netdev_info(dev,
  8143. "GENEVE port %d set, not adding port %d\n",
  8144. ntohs(adapter->geneve_port),
  8145. ntohs(port));
  8146. return;
  8147. }
  8148. port_shift = IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT;
  8149. adapter->geneve_port = port;
  8150. break;
  8151. default:
  8152. return;
  8153. }
  8154. reg = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) | ntohs(port) << port_shift;
  8155. IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, reg);
  8156. }
  8157. /**
  8158. * ixgbe_del_udp_tunnel_port - Get notifications about removing UDP tunnel ports
  8159. * @dev: The port's netdev
  8160. * @ti: Tunnel endpoint information
  8161. **/
  8162. static void ixgbe_del_udp_tunnel_port(struct net_device *dev,
  8163. struct udp_tunnel_info *ti)
  8164. {
  8165. struct ixgbe_adapter *adapter = netdev_priv(dev);
  8166. u32 port_mask;
  8167. if (ti->type != UDP_TUNNEL_TYPE_VXLAN &&
  8168. ti->type != UDP_TUNNEL_TYPE_GENEVE)
  8169. return;
  8170. if (ti->sa_family != AF_INET)
  8171. return;
  8172. switch (ti->type) {
  8173. case UDP_TUNNEL_TYPE_VXLAN:
  8174. if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
  8175. return;
  8176. if (adapter->vxlan_port != ti->port) {
  8177. netdev_info(dev, "VXLAN port %d not found\n",
  8178. ntohs(ti->port));
  8179. return;
  8180. }
  8181. port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK;
  8182. break;
  8183. case UDP_TUNNEL_TYPE_GENEVE:
  8184. if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))
  8185. return;
  8186. if (adapter->geneve_port != ti->port) {
  8187. netdev_info(dev, "GENEVE port %d not found\n",
  8188. ntohs(ti->port));
  8189. return;
  8190. }
  8191. port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK;
  8192. break;
  8193. default:
  8194. return;
  8195. }
  8196. ixgbe_clear_udp_tunnel_port(adapter, port_mask);
  8197. adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
  8198. }
  8199. static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
  8200. struct net_device *dev,
  8201. const unsigned char *addr, u16 vid,
  8202. u16 flags)
  8203. {
  8204. /* guarantee we can provide a unique filter for the unicast address */
  8205. if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
  8206. struct ixgbe_adapter *adapter = netdev_priv(dev);
  8207. u16 pool = VMDQ_P(0);
  8208. if (netdev_uc_count(dev) >= ixgbe_available_rars(adapter, pool))
  8209. return -ENOMEM;
  8210. }
  8211. return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
  8212. }
  8213. /**
  8214. * ixgbe_configure_bridge_mode - set various bridge modes
  8215. * @adapter - the private structure
  8216. * @mode - requested bridge mode
  8217. *
  8218. * Configure some settings require for various bridge modes.
  8219. **/
  8220. static int ixgbe_configure_bridge_mode(struct ixgbe_adapter *adapter,
  8221. __u16 mode)
  8222. {
  8223. struct ixgbe_hw *hw = &adapter->hw;
  8224. unsigned int p, num_pools;
  8225. u32 vmdctl;
  8226. switch (mode) {
  8227. case BRIDGE_MODE_VEPA:
  8228. /* disable Tx loopback, rely on switch hairpin mode */
  8229. IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, 0);
  8230. /* must enable Rx switching replication to allow multicast
  8231. * packet reception on all VFs, and to enable source address
  8232. * pruning.
  8233. */
  8234. vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
  8235. vmdctl |= IXGBE_VT_CTL_REPLEN;
  8236. IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
  8237. /* enable Rx source address pruning. Note, this requires
  8238. * replication to be enabled or else it does nothing.
  8239. */
  8240. num_pools = adapter->num_vfs + adapter->num_rx_pools;
  8241. for (p = 0; p < num_pools; p++) {
  8242. if (hw->mac.ops.set_source_address_pruning)
  8243. hw->mac.ops.set_source_address_pruning(hw,
  8244. true,
  8245. p);
  8246. }
  8247. break;
  8248. case BRIDGE_MODE_VEB:
  8249. /* enable Tx loopback for internal VF/PF communication */
  8250. IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC,
  8251. IXGBE_PFDTXGSWC_VT_LBEN);
  8252. /* disable Rx switching replication unless we have SR-IOV
  8253. * virtual functions
  8254. */
  8255. vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
  8256. if (!adapter->num_vfs)
  8257. vmdctl &= ~IXGBE_VT_CTL_REPLEN;
  8258. IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
  8259. /* disable Rx source address pruning, since we don't expect to
  8260. * be receiving external loopback of our transmitted frames.
  8261. */
  8262. num_pools = adapter->num_vfs + adapter->num_rx_pools;
  8263. for (p = 0; p < num_pools; p++) {
  8264. if (hw->mac.ops.set_source_address_pruning)
  8265. hw->mac.ops.set_source_address_pruning(hw,
  8266. false,
  8267. p);
  8268. }
  8269. break;
  8270. default:
  8271. return -EINVAL;
  8272. }
  8273. adapter->bridge_mode = mode;
  8274. e_info(drv, "enabling bridge mode: %s\n",
  8275. mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
  8276. return 0;
  8277. }
  8278. static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
  8279. struct nlmsghdr *nlh, u16 flags)
  8280. {
  8281. struct ixgbe_adapter *adapter = netdev_priv(dev);
  8282. struct nlattr *attr, *br_spec;
  8283. int rem;
  8284. if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
  8285. return -EOPNOTSUPP;
  8286. br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
  8287. if (!br_spec)
  8288. return -EINVAL;
  8289. nla_for_each_nested(attr, br_spec, rem) {
  8290. int status;
  8291. __u16 mode;
  8292. if (nla_type(attr) != IFLA_BRIDGE_MODE)
  8293. continue;
  8294. if (nla_len(attr) < sizeof(mode))
  8295. return -EINVAL;
  8296. mode = nla_get_u16(attr);
  8297. status = ixgbe_configure_bridge_mode(adapter, mode);
  8298. if (status)
  8299. return status;
  8300. break;
  8301. }
  8302. return 0;
  8303. }
  8304. static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
  8305. struct net_device *dev,
  8306. u32 filter_mask, int nlflags)
  8307. {
  8308. struct ixgbe_adapter *adapter = netdev_priv(dev);
  8309. if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
  8310. return 0;
  8311. return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
  8312. adapter->bridge_mode, 0, 0, nlflags,
  8313. filter_mask, NULL);
  8314. }
  8315. static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
  8316. {
  8317. struct ixgbe_fwd_adapter *fwd_adapter = NULL;
  8318. struct ixgbe_adapter *adapter = netdev_priv(pdev);
  8319. int used_pools = adapter->num_vfs + adapter->num_rx_pools;
  8320. unsigned int limit;
  8321. int pool, err;
  8322. /* Hardware has a limited number of available pools. Each VF, and the
  8323. * PF require a pool. Check to ensure we don't attempt to use more
  8324. * then the available number of pools.
  8325. */
  8326. if (used_pools >= IXGBE_MAX_VF_FUNCTIONS)
  8327. return ERR_PTR(-EINVAL);
  8328. #ifdef CONFIG_RPS
  8329. if (vdev->num_rx_queues != vdev->num_tx_queues) {
  8330. netdev_info(pdev, "%s: Only supports a single queue count for TX and RX\n",
  8331. vdev->name);
  8332. return ERR_PTR(-EINVAL);
  8333. }
  8334. #endif
  8335. /* Check for hardware restriction on number of rx/tx queues */
  8336. if (vdev->num_tx_queues > IXGBE_MAX_L2A_QUEUES ||
  8337. vdev->num_tx_queues == IXGBE_BAD_L2A_QUEUE) {
  8338. netdev_info(pdev,
  8339. "%s: Supports RX/TX Queue counts 1,2, and 4\n",
  8340. pdev->name);
  8341. return ERR_PTR(-EINVAL);
  8342. }
  8343. if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
  8344. adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS - 1) ||
  8345. (adapter->num_rx_pools > IXGBE_MAX_MACVLANS))
  8346. return ERR_PTR(-EBUSY);
  8347. fwd_adapter = kzalloc(sizeof(*fwd_adapter), GFP_KERNEL);
  8348. if (!fwd_adapter)
  8349. return ERR_PTR(-ENOMEM);
  8350. pool = find_first_zero_bit(&adapter->fwd_bitmask, 32);
  8351. adapter->num_rx_pools++;
  8352. set_bit(pool, &adapter->fwd_bitmask);
  8353. limit = find_last_bit(&adapter->fwd_bitmask, 32);
  8354. /* Enable VMDq flag so device will be set in VM mode */
  8355. adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED;
  8356. adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
  8357. adapter->ring_feature[RING_F_RSS].limit = vdev->num_tx_queues;
  8358. /* Force reinit of ring allocation with VMDQ enabled */
  8359. err = ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
  8360. if (err)
  8361. goto fwd_add_err;
  8362. fwd_adapter->pool = pool;
  8363. fwd_adapter->real_adapter = adapter;
  8364. if (netif_running(pdev)) {
  8365. err = ixgbe_fwd_ring_up(vdev, fwd_adapter);
  8366. if (err)
  8367. goto fwd_add_err;
  8368. netif_tx_start_all_queues(vdev);
  8369. }
  8370. return fwd_adapter;
  8371. fwd_add_err:
  8372. /* unwind counter and free adapter struct */
  8373. netdev_info(pdev,
  8374. "%s: dfwd hardware acceleration failed\n", vdev->name);
  8375. clear_bit(pool, &adapter->fwd_bitmask);
  8376. adapter->num_rx_pools--;
  8377. kfree(fwd_adapter);
  8378. return ERR_PTR(err);
  8379. }
  8380. static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
  8381. {
  8382. struct ixgbe_fwd_adapter *fwd_adapter = priv;
  8383. struct ixgbe_adapter *adapter = fwd_adapter->real_adapter;
  8384. unsigned int limit;
  8385. clear_bit(fwd_adapter->pool, &adapter->fwd_bitmask);
  8386. adapter->num_rx_pools--;
  8387. limit = find_last_bit(&adapter->fwd_bitmask, 32);
  8388. adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
  8389. ixgbe_fwd_ring_down(fwd_adapter->netdev, fwd_adapter);
  8390. /* go back to full RSS if we're done with our VMQs */
  8391. if (adapter->ring_feature[RING_F_VMDQ].limit == 1) {
  8392. int rss = min_t(int, ixgbe_max_rss_indices(adapter),
  8393. num_online_cpus());
  8394. adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
  8395. adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
  8396. adapter->ring_feature[RING_F_RSS].limit = rss;
  8397. }
  8398. ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
  8399. netdev_dbg(pdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
  8400. fwd_adapter->pool, adapter->num_rx_pools,
  8401. fwd_adapter->rx_base_queue,
  8402. fwd_adapter->rx_base_queue + adapter->num_rx_queues_per_pool,
  8403. adapter->fwd_bitmask);
  8404. kfree(fwd_adapter);
  8405. }
  8406. #define IXGBE_MAX_MAC_HDR_LEN 127
  8407. #define IXGBE_MAX_NETWORK_HDR_LEN 511
  8408. static netdev_features_t
  8409. ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
  8410. netdev_features_t features)
  8411. {
  8412. unsigned int network_hdr_len, mac_hdr_len;
  8413. /* Make certain the headers can be described by a context descriptor */
  8414. mac_hdr_len = skb_network_header(skb) - skb->data;
  8415. if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN))
  8416. return features & ~(NETIF_F_HW_CSUM |
  8417. NETIF_F_SCTP_CRC |
  8418. NETIF_F_HW_VLAN_CTAG_TX |
  8419. NETIF_F_TSO |
  8420. NETIF_F_TSO6);
  8421. network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
  8422. if (unlikely(network_hdr_len > IXGBE_MAX_NETWORK_HDR_LEN))
  8423. return features & ~(NETIF_F_HW_CSUM |
  8424. NETIF_F_SCTP_CRC |
  8425. NETIF_F_TSO |
  8426. NETIF_F_TSO6);
  8427. /* We can only support IPV4 TSO in tunnels if we can mangle the
  8428. * inner IP ID field, so strip TSO if MANGLEID is not supported.
  8429. */
  8430. if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
  8431. features &= ~NETIF_F_TSO;
  8432. return features;
  8433. }
  8434. static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
  8435. {
  8436. int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
  8437. struct ixgbe_adapter *adapter = netdev_priv(dev);
  8438. struct bpf_prog *old_prog;
  8439. if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
  8440. return -EINVAL;
  8441. if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
  8442. return -EINVAL;
  8443. /* verify ixgbe ring attributes are sufficient for XDP */
  8444. for (i = 0; i < adapter->num_rx_queues; i++) {
  8445. struct ixgbe_ring *ring = adapter->rx_ring[i];
  8446. if (ring_is_rsc_enabled(ring))
  8447. return -EINVAL;
  8448. if (frame_size > ixgbe_rx_bufsz(ring))
  8449. return -EINVAL;
  8450. }
  8451. if (nr_cpu_ids > MAX_XDP_QUEUES)
  8452. return -ENOMEM;
  8453. old_prog = xchg(&adapter->xdp_prog, prog);
  8454. /* If transitioning XDP modes reconfigure rings */
  8455. if (!!prog != !!old_prog) {
  8456. int err = ixgbe_setup_tc(dev, netdev_get_num_tc(dev));
  8457. if (err) {
  8458. rcu_assign_pointer(adapter->xdp_prog, old_prog);
  8459. return -EINVAL;
  8460. }
  8461. } else {
  8462. for (i = 0; i < adapter->num_rx_queues; i++)
  8463. xchg(&adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog);
  8464. }
  8465. if (old_prog)
  8466. bpf_prog_put(old_prog);
  8467. return 0;
  8468. }
  8469. static int ixgbe_xdp(struct net_device *dev, struct netdev_xdp *xdp)
  8470. {
  8471. struct ixgbe_adapter *adapter = netdev_priv(dev);
  8472. switch (xdp->command) {
  8473. case XDP_SETUP_PROG:
  8474. return ixgbe_xdp_setup(dev, xdp->prog);
  8475. case XDP_QUERY_PROG:
  8476. xdp->prog_attached = !!(adapter->xdp_prog);
  8477. xdp->prog_id = adapter->xdp_prog ?
  8478. adapter->xdp_prog->aux->id : 0;
  8479. return 0;
  8480. default:
  8481. return -EINVAL;
  8482. }
  8483. }
  8484. static int ixgbe_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
  8485. {
  8486. struct ixgbe_adapter *adapter = netdev_priv(dev);
  8487. struct ixgbe_ring *ring;
  8488. int err;
  8489. if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state)))
  8490. return -ENETDOWN;
  8491. /* During program transitions its possible adapter->xdp_prog is assigned
  8492. * but ring has not been configured yet. In this case simply abort xmit.
  8493. */
  8494. ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL;
  8495. if (unlikely(!ring))
  8496. return -ENXIO;
  8497. err = ixgbe_xmit_xdp_ring(adapter, xdp);
  8498. if (err != IXGBE_XDP_TX)
  8499. return -ENOSPC;
  8500. return 0;
  8501. }
  8502. static void ixgbe_xdp_flush(struct net_device *dev)
  8503. {
  8504. struct ixgbe_adapter *adapter = netdev_priv(dev);
  8505. struct ixgbe_ring *ring;
  8506. /* Its possible the device went down between xdp xmit and flush so
  8507. * we need to ensure device is still up.
  8508. */
  8509. if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state)))
  8510. return;
  8511. ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL;
  8512. if (unlikely(!ring))
  8513. return;
  8514. /* Force memory writes to complete before letting h/w know there
  8515. * are new descriptors to fetch.
  8516. */
  8517. wmb();
  8518. writel(ring->next_to_use, ring->tail);
  8519. return;
  8520. }
  8521. static const struct net_device_ops ixgbe_netdev_ops = {
  8522. .ndo_open = ixgbe_open,
  8523. .ndo_stop = ixgbe_close,
  8524. .ndo_start_xmit = ixgbe_xmit_frame,
  8525. .ndo_select_queue = ixgbe_select_queue,
  8526. .ndo_set_rx_mode = ixgbe_set_rx_mode,
  8527. .ndo_validate_addr = eth_validate_addr,
  8528. .ndo_set_mac_address = ixgbe_set_mac,
  8529. .ndo_change_mtu = ixgbe_change_mtu,
  8530. .ndo_tx_timeout = ixgbe_tx_timeout,
  8531. .ndo_set_tx_maxrate = ixgbe_tx_maxrate,
  8532. .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
  8533. .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
  8534. .ndo_do_ioctl = ixgbe_ioctl,
  8535. .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
  8536. .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
  8537. .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw,
  8538. .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk,
  8539. .ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en,
  8540. .ndo_set_vf_trust = ixgbe_ndo_set_vf_trust,
  8541. .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
  8542. .ndo_get_stats64 = ixgbe_get_stats64,
  8543. .ndo_setup_tc = __ixgbe_setup_tc,
  8544. #ifdef CONFIG_NET_POLL_CONTROLLER
  8545. .ndo_poll_controller = ixgbe_netpoll,
  8546. #endif
  8547. #ifdef IXGBE_FCOE
  8548. .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
  8549. .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
  8550. .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
  8551. .ndo_fcoe_enable = ixgbe_fcoe_enable,
  8552. .ndo_fcoe_disable = ixgbe_fcoe_disable,
  8553. .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
  8554. .ndo_fcoe_get_hbainfo = ixgbe_fcoe_get_hbainfo,
  8555. #endif /* IXGBE_FCOE */
  8556. .ndo_set_features = ixgbe_set_features,
  8557. .ndo_fix_features = ixgbe_fix_features,
  8558. .ndo_fdb_add = ixgbe_ndo_fdb_add,
  8559. .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink,
  8560. .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
  8561. .ndo_dfwd_add_station = ixgbe_fwd_add,
  8562. .ndo_dfwd_del_station = ixgbe_fwd_del,
  8563. .ndo_udp_tunnel_add = ixgbe_add_udp_tunnel_port,
  8564. .ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port,
  8565. .ndo_features_check = ixgbe_features_check,
  8566. .ndo_xdp = ixgbe_xdp,
  8567. .ndo_xdp_xmit = ixgbe_xdp_xmit,
  8568. .ndo_xdp_flush = ixgbe_xdp_flush,
  8569. };
  8570. /**
  8571. * ixgbe_enumerate_functions - Get the number of ports this device has
  8572. * @adapter: adapter structure
  8573. *
  8574. * This function enumerates the phsyical functions co-located on a single slot,
  8575. * in order to determine how many ports a device has. This is most useful in
  8576. * determining the required GT/s of PCIe bandwidth necessary for optimal
  8577. * performance.
  8578. **/
  8579. static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
  8580. {
  8581. struct pci_dev *entry, *pdev = adapter->pdev;
  8582. int physfns = 0;
  8583. /* Some cards can not use the generic count PCIe functions method,
  8584. * because they are behind a parent switch, so we hardcode these with
  8585. * the correct number of functions.
  8586. */
  8587. if (ixgbe_pcie_from_parent(&adapter->hw))
  8588. physfns = 4;
  8589. list_for_each_entry(entry, &adapter->pdev->bus->devices, bus_list) {
  8590. /* don't count virtual functions */
  8591. if (entry->is_virtfn)
  8592. continue;
  8593. /* When the devices on the bus don't all match our device ID,
  8594. * we can't reliably determine the correct number of
  8595. * functions. This can occur if a function has been direct
  8596. * attached to a virtual machine using VT-d, for example. In
  8597. * this case, simply return -1 to indicate this.
  8598. */
  8599. if ((entry->vendor != pdev->vendor) ||
  8600. (entry->device != pdev->device))
  8601. return -1;
  8602. physfns++;
  8603. }
  8604. return physfns;
  8605. }
  8606. /**
  8607. * ixgbe_wol_supported - Check whether device supports WoL
  8608. * @adapter: the adapter private structure
  8609. * @device_id: the device ID
  8610. * @subdev_id: the subsystem device ID
  8611. *
  8612. * This function is used by probe and ethtool to determine
  8613. * which devices have WoL support
  8614. *
  8615. **/
  8616. bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
  8617. u16 subdevice_id)
  8618. {
  8619. struct ixgbe_hw *hw = &adapter->hw;
  8620. u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
  8621. /* WOL not supported on 82598 */
  8622. if (hw->mac.type == ixgbe_mac_82598EB)
  8623. return false;
  8624. /* check eeprom to see if WOL is enabled for X540 and newer */
  8625. if (hw->mac.type >= ixgbe_mac_X540) {
  8626. if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
  8627. ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
  8628. (hw->bus.func == 0)))
  8629. return true;
  8630. }
  8631. /* WOL is determined based on device IDs for 82599 MACs */
  8632. switch (device_id) {
  8633. case IXGBE_DEV_ID_82599_SFP:
  8634. /* Only these subdevices could supports WOL */
  8635. switch (subdevice_id) {
  8636. case IXGBE_SUBDEV_ID_82599_560FLR:
  8637. case IXGBE_SUBDEV_ID_82599_LOM_SNAP6:
  8638. case IXGBE_SUBDEV_ID_82599_SFP_WOL0:
  8639. case IXGBE_SUBDEV_ID_82599_SFP_2OCP:
  8640. /* only support first port */
  8641. if (hw->bus.func != 0)
  8642. break;
  8643. /* fall through */
  8644. case IXGBE_SUBDEV_ID_82599_SP_560FLR:
  8645. case IXGBE_SUBDEV_ID_82599_SFP:
  8646. case IXGBE_SUBDEV_ID_82599_RNDC:
  8647. case IXGBE_SUBDEV_ID_82599_ECNA_DP:
  8648. case IXGBE_SUBDEV_ID_82599_SFP_1OCP:
  8649. case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1:
  8650. case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2:
  8651. return true;
  8652. }
  8653. break;
  8654. case IXGBE_DEV_ID_82599EN_SFP:
  8655. /* Only these subdevices support WOL */
  8656. switch (subdevice_id) {
  8657. case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1:
  8658. return true;
  8659. }
  8660. break;
  8661. case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
  8662. /* All except this subdevice support WOL */
  8663. if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
  8664. return true;
  8665. break;
  8666. case IXGBE_DEV_ID_82599_KX4:
  8667. return true;
  8668. default:
  8669. break;
  8670. }
  8671. return false;
  8672. }
  8673. /**
  8674. * ixgbe_probe - Device Initialization Routine
  8675. * @pdev: PCI device information struct
  8676. * @ent: entry in ixgbe_pci_tbl
  8677. *
  8678. * Returns 0 on success, negative on failure
  8679. *
  8680. * ixgbe_probe initializes an adapter identified by a pci_dev structure.
  8681. * The OS initialization, configuring of the adapter private structure,
  8682. * and a hardware reset occur.
  8683. **/
  8684. static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  8685. {
  8686. struct net_device *netdev;
  8687. struct ixgbe_adapter *adapter = NULL;
  8688. struct ixgbe_hw *hw;
  8689. const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
  8690. int i, err, pci_using_dac, expected_gts;
  8691. unsigned int indices = MAX_TX_QUEUES;
  8692. u8 part_str[IXGBE_PBANUM_LENGTH];
  8693. bool disable_dev = false;
  8694. #ifdef IXGBE_FCOE
  8695. u16 device_caps;
  8696. #endif
  8697. u32 eec;
  8698. /* Catch broken hardware that put the wrong VF device ID in
  8699. * the PCIe SR-IOV capability.
  8700. */
  8701. if (pdev->is_virtfn) {
  8702. WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
  8703. pci_name(pdev), pdev->vendor, pdev->device);
  8704. return -EINVAL;
  8705. }
  8706. err = pci_enable_device_mem(pdev);
  8707. if (err)
  8708. return err;
  8709. if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
  8710. pci_using_dac = 1;
  8711. } else {
  8712. err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
  8713. if (err) {
  8714. dev_err(&pdev->dev,
  8715. "No usable DMA configuration, aborting\n");
  8716. goto err_dma;
  8717. }
  8718. pci_using_dac = 0;
  8719. }
  8720. err = pci_request_mem_regions(pdev, ixgbe_driver_name);
  8721. if (err) {
  8722. dev_err(&pdev->dev,
  8723. "pci_request_selected_regions failed 0x%x\n", err);
  8724. goto err_pci_reg;
  8725. }
  8726. pci_enable_pcie_error_reporting(pdev);
  8727. pci_set_master(pdev);
  8728. pci_save_state(pdev);
  8729. if (ii->mac == ixgbe_mac_82598EB) {
  8730. #ifdef CONFIG_IXGBE_DCB
  8731. /* 8 TC w/ 4 queues per TC */
  8732. indices = 4 * MAX_TRAFFIC_CLASS;
  8733. #else
  8734. indices = IXGBE_MAX_RSS_INDICES;
  8735. #endif
  8736. }
  8737. netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
  8738. if (!netdev) {
  8739. err = -ENOMEM;
  8740. goto err_alloc_etherdev;
  8741. }
  8742. SET_NETDEV_DEV(netdev, &pdev->dev);
  8743. adapter = netdev_priv(netdev);
  8744. adapter->netdev = netdev;
  8745. adapter->pdev = pdev;
  8746. hw = &adapter->hw;
  8747. hw->back = adapter;
  8748. adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
  8749. hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
  8750. pci_resource_len(pdev, 0));
  8751. adapter->io_addr = hw->hw_addr;
  8752. if (!hw->hw_addr) {
  8753. err = -EIO;
  8754. goto err_ioremap;
  8755. }
  8756. netdev->netdev_ops = &ixgbe_netdev_ops;
  8757. ixgbe_set_ethtool_ops(netdev);
  8758. netdev->watchdog_timeo = 5 * HZ;
  8759. strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
  8760. /* Setup hw api */
  8761. hw->mac.ops = *ii->mac_ops;
  8762. hw->mac.type = ii->mac;
  8763. hw->mvals = ii->mvals;
  8764. if (ii->link_ops)
  8765. hw->link.ops = *ii->link_ops;
  8766. /* EEPROM */
  8767. hw->eeprom.ops = *ii->eeprom_ops;
  8768. eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
  8769. if (ixgbe_removed(hw->hw_addr)) {
  8770. err = -EIO;
  8771. goto err_ioremap;
  8772. }
  8773. /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
  8774. if (!(eec & BIT(8)))
  8775. hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
  8776. /* PHY */
  8777. hw->phy.ops = *ii->phy_ops;
  8778. hw->phy.sfp_type = ixgbe_sfp_type_unknown;
  8779. /* ixgbe_identify_phy_generic will set prtad and mmds properly */
  8780. hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
  8781. hw->phy.mdio.mmds = 0;
  8782. hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
  8783. hw->phy.mdio.dev = netdev;
  8784. hw->phy.mdio.mdio_read = ixgbe_mdio_read;
  8785. hw->phy.mdio.mdio_write = ixgbe_mdio_write;
  8786. /* setup the private structure */
  8787. err = ixgbe_sw_init(adapter, ii);
  8788. if (err)
  8789. goto err_sw_init;
  8790. /* Make sure the SWFW semaphore is in a valid state */
  8791. if (hw->mac.ops.init_swfw_sync)
  8792. hw->mac.ops.init_swfw_sync(hw);
  8793. /* Make it possible the adapter to be woken up via WOL */
  8794. switch (adapter->hw.mac.type) {
  8795. case ixgbe_mac_82599EB:
  8796. case ixgbe_mac_X540:
  8797. case ixgbe_mac_X550:
  8798. case ixgbe_mac_X550EM_x:
  8799. case ixgbe_mac_x550em_a:
  8800. IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
  8801. break;
  8802. default:
  8803. break;
  8804. }
  8805. /*
  8806. * If there is a fan on this device and it has failed log the
  8807. * failure.
  8808. */
  8809. if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
  8810. u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
  8811. if (esdp & IXGBE_ESDP_SDP1)
  8812. e_crit(probe, "Fan has stopped, replace the adapter\n");
  8813. }
  8814. if (allow_unsupported_sfp)
  8815. hw->allow_unsupported_sfp = allow_unsupported_sfp;
  8816. /* reset_hw fills in the perm_addr as well */
  8817. hw->phy.reset_if_overtemp = true;
  8818. err = hw->mac.ops.reset_hw(hw);
  8819. hw->phy.reset_if_overtemp = false;
  8820. ixgbe_set_eee_capable(adapter);
  8821. if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
  8822. err = 0;
  8823. } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
  8824. e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n");
  8825. e_dev_err("Reload the driver after installing a supported module.\n");
  8826. goto err_sw_init;
  8827. } else if (err) {
  8828. e_dev_err("HW Init failed: %d\n", err);
  8829. goto err_sw_init;
  8830. }
  8831. #ifdef CONFIG_PCI_IOV
  8832. /* SR-IOV not supported on the 82598 */
  8833. if (adapter->hw.mac.type == ixgbe_mac_82598EB)
  8834. goto skip_sriov;
  8835. /* Mailbox */
  8836. ixgbe_init_mbx_params_pf(hw);
  8837. hw->mbx.ops = ii->mbx_ops;
  8838. pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT);
  8839. ixgbe_enable_sriov(adapter, max_vfs);
  8840. skip_sriov:
  8841. #endif
  8842. netdev->features = NETIF_F_SG |
  8843. NETIF_F_TSO |
  8844. NETIF_F_TSO6 |
  8845. NETIF_F_RXHASH |
  8846. NETIF_F_RXCSUM |
  8847. NETIF_F_HW_CSUM;
  8848. #define IXGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
  8849. NETIF_F_GSO_GRE_CSUM | \
  8850. NETIF_F_GSO_IPXIP4 | \
  8851. NETIF_F_GSO_IPXIP6 | \
  8852. NETIF_F_GSO_UDP_TUNNEL | \
  8853. NETIF_F_GSO_UDP_TUNNEL_CSUM)
  8854. netdev->gso_partial_features = IXGBE_GSO_PARTIAL_FEATURES;
  8855. netdev->features |= NETIF_F_GSO_PARTIAL |
  8856. IXGBE_GSO_PARTIAL_FEATURES;
  8857. if (hw->mac.type >= ixgbe_mac_82599EB)
  8858. netdev->features |= NETIF_F_SCTP_CRC;
  8859. /* copy netdev features into list of user selectable features */
  8860. netdev->hw_features |= netdev->features |
  8861. NETIF_F_HW_VLAN_CTAG_FILTER |
  8862. NETIF_F_HW_VLAN_CTAG_RX |
  8863. NETIF_F_HW_VLAN_CTAG_TX |
  8864. NETIF_F_RXALL |
  8865. NETIF_F_HW_L2FW_DOFFLOAD;
  8866. if (hw->mac.type >= ixgbe_mac_82599EB)
  8867. netdev->hw_features |= NETIF_F_NTUPLE |
  8868. NETIF_F_HW_TC;
  8869. if (pci_using_dac)
  8870. netdev->features |= NETIF_F_HIGHDMA;
  8871. netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
  8872. netdev->hw_enc_features |= netdev->vlan_features;
  8873. netdev->mpls_features |= NETIF_F_SG |
  8874. NETIF_F_TSO |
  8875. NETIF_F_TSO6 |
  8876. NETIF_F_HW_CSUM;
  8877. netdev->mpls_features |= IXGBE_GSO_PARTIAL_FEATURES;
  8878. /* set this bit last since it cannot be part of vlan_features */
  8879. netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
  8880. NETIF_F_HW_VLAN_CTAG_RX |
  8881. NETIF_F_HW_VLAN_CTAG_TX;
  8882. netdev->priv_flags |= IFF_UNICAST_FLT;
  8883. netdev->priv_flags |= IFF_SUPP_NOFCS;
  8884. /* MTU range: 68 - 9710 */
  8885. netdev->min_mtu = ETH_MIN_MTU;
  8886. netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
  8887. #ifdef CONFIG_IXGBE_DCB
  8888. if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
  8889. netdev->dcbnl_ops = &ixgbe_dcbnl_ops;
  8890. #endif
  8891. #ifdef IXGBE_FCOE
  8892. if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
  8893. unsigned int fcoe_l;
  8894. if (hw->mac.ops.get_device_caps) {
  8895. hw->mac.ops.get_device_caps(hw, &device_caps);
  8896. if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
  8897. adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
  8898. }
  8899. fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus());
  8900. adapter->ring_feature[RING_F_FCOE].limit = fcoe_l;
  8901. netdev->features |= NETIF_F_FSO |
  8902. NETIF_F_FCOE_CRC;
  8903. netdev->vlan_features |= NETIF_F_FSO |
  8904. NETIF_F_FCOE_CRC |
  8905. NETIF_F_FCOE_MTU;
  8906. }
  8907. #endif /* IXGBE_FCOE */
  8908. if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
  8909. netdev->hw_features |= NETIF_F_LRO;
  8910. if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
  8911. netdev->features |= NETIF_F_LRO;
  8912. /* make sure the EEPROM is good */
  8913. if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
  8914. e_dev_err("The EEPROM Checksum Is Not Valid\n");
  8915. err = -EIO;
  8916. goto err_sw_init;
  8917. }
  8918. eth_platform_get_mac_address(&adapter->pdev->dev,
  8919. adapter->hw.mac.perm_addr);
  8920. memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
  8921. if (!is_valid_ether_addr(netdev->dev_addr)) {
  8922. e_dev_err("invalid MAC address\n");
  8923. err = -EIO;
  8924. goto err_sw_init;
  8925. }
  8926. /* Set hw->mac.addr to permanent MAC address */
  8927. ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
  8928. ixgbe_mac_set_default_filter(adapter);
  8929. timer_setup(&adapter->service_timer, ixgbe_service_timer, 0);
  8930. if (ixgbe_removed(hw->hw_addr)) {
  8931. err = -EIO;
  8932. goto err_sw_init;
  8933. }
  8934. INIT_WORK(&adapter->service_task, ixgbe_service_task);
  8935. set_bit(__IXGBE_SERVICE_INITED, &adapter->state);
  8936. clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
  8937. err = ixgbe_init_interrupt_scheme(adapter);
  8938. if (err)
  8939. goto err_sw_init;
  8940. for (i = 0; i < adapter->num_rx_queues; i++)
  8941. u64_stats_init(&adapter->rx_ring[i]->syncp);
  8942. for (i = 0; i < adapter->num_tx_queues; i++)
  8943. u64_stats_init(&adapter->tx_ring[i]->syncp);
  8944. for (i = 0; i < adapter->num_xdp_queues; i++)
  8945. u64_stats_init(&adapter->xdp_ring[i]->syncp);
  8946. /* WOL not supported for all devices */
  8947. adapter->wol = 0;
  8948. hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
  8949. hw->wol_enabled = ixgbe_wol_supported(adapter, pdev->device,
  8950. pdev->subsystem_device);
  8951. if (hw->wol_enabled)
  8952. adapter->wol = IXGBE_WUFC_MAG;
  8953. device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
  8954. /* save off EEPROM version number */
  8955. hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh);
  8956. hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl);
  8957. /* pick up the PCI bus settings for reporting later */
  8958. if (ixgbe_pcie_from_parent(hw))
  8959. ixgbe_get_parent_bus_info(adapter);
  8960. else
  8961. hw->mac.ops.get_bus_info(hw);
  8962. /* calculate the expected PCIe bandwidth required for optimal
  8963. * performance. Note that some older parts will never have enough
  8964. * bandwidth due to being older generation PCIe parts. We clamp these
  8965. * parts to ensure no warning is displayed if it can't be fixed.
  8966. */
  8967. switch (hw->mac.type) {
  8968. case ixgbe_mac_82598EB:
  8969. expected_gts = min(ixgbe_enumerate_functions(adapter) * 10, 16);
  8970. break;
  8971. default:
  8972. expected_gts = ixgbe_enumerate_functions(adapter) * 10;
  8973. break;
  8974. }
  8975. /* don't check link if we failed to enumerate functions */
  8976. if (expected_gts > 0)
  8977. ixgbe_check_minimum_link(adapter, expected_gts);
  8978. err = ixgbe_read_pba_string_generic(hw, part_str, sizeof(part_str));
  8979. if (err)
  8980. strlcpy(part_str, "Unknown", sizeof(part_str));
  8981. if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
  8982. e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
  8983. hw->mac.type, hw->phy.type, hw->phy.sfp_type,
  8984. part_str);
  8985. else
  8986. e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
  8987. hw->mac.type, hw->phy.type, part_str);
  8988. e_dev_info("%pM\n", netdev->dev_addr);
  8989. /* reset the hardware with the new settings */
  8990. err = hw->mac.ops.start_hw(hw);
  8991. if (err == IXGBE_ERR_EEPROM_VERSION) {
  8992. /* We are running on a pre-production device, log a warning */
  8993. e_dev_warn("This device is a pre-production adapter/LOM. "
  8994. "Please be aware there may be issues associated "
  8995. "with your hardware. If you are experiencing "
  8996. "problems please contact your Intel or hardware "
  8997. "representative who provided you with this "
  8998. "hardware.\n");
  8999. }
  9000. strcpy(netdev->name, "eth%d");
  9001. pci_set_drvdata(pdev, adapter);
  9002. err = register_netdev(netdev);
  9003. if (err)
  9004. goto err_register;
  9005. /* power down the optics for 82599 SFP+ fiber */
  9006. if (hw->mac.ops.disable_tx_laser)
  9007. hw->mac.ops.disable_tx_laser(hw);
  9008. /* carrier off reporting is important to ethtool even BEFORE open */
  9009. netif_carrier_off(netdev);
  9010. #ifdef CONFIG_IXGBE_DCA
  9011. if (dca_add_requester(&pdev->dev) == 0) {
  9012. adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
  9013. ixgbe_setup_dca(adapter);
  9014. }
  9015. #endif
  9016. if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
  9017. e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs);
  9018. for (i = 0; i < adapter->num_vfs; i++)
  9019. ixgbe_vf_configuration(pdev, (i | 0x10000000));
  9020. }
  9021. /* firmware requires driver version to be 0xFFFFFFFF
  9022. * since os does not support feature
  9023. */
  9024. if (hw->mac.ops.set_fw_drv_ver)
  9025. hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF,
  9026. sizeof(ixgbe_driver_version) - 1,
  9027. ixgbe_driver_version);
  9028. /* add san mac addr to netdev */
  9029. ixgbe_add_sanmac_netdev(netdev);
  9030. e_dev_info("%s\n", ixgbe_default_device_descr);
  9031. #ifdef CONFIG_IXGBE_HWMON
  9032. if (ixgbe_sysfs_init(adapter))
  9033. e_err(probe, "failed to allocate sysfs resources\n");
  9034. #endif /* CONFIG_IXGBE_HWMON */
  9035. ixgbe_dbg_adapter_init(adapter);
  9036. /* setup link for SFP devices with MNG FW, else wait for IXGBE_UP */
  9037. if (ixgbe_mng_enabled(hw) && ixgbe_is_sfp(hw) && hw->mac.ops.setup_link)
  9038. hw->mac.ops.setup_link(hw,
  9039. IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL,
  9040. true);
  9041. return 0;
  9042. err_register:
  9043. ixgbe_release_hw_control(adapter);
  9044. ixgbe_clear_interrupt_scheme(adapter);
  9045. err_sw_init:
  9046. ixgbe_disable_sriov(adapter);
  9047. adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
  9048. iounmap(adapter->io_addr);
  9049. kfree(adapter->jump_tables[0]);
  9050. kfree(adapter->mac_table);
  9051. kfree(adapter->rss_key);
  9052. err_ioremap:
  9053. disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
  9054. free_netdev(netdev);
  9055. err_alloc_etherdev:
  9056. pci_release_mem_regions(pdev);
  9057. err_pci_reg:
  9058. err_dma:
  9059. if (!adapter || disable_dev)
  9060. pci_disable_device(pdev);
  9061. return err;
  9062. }
  9063. /**
  9064. * ixgbe_remove - Device Removal Routine
  9065. * @pdev: PCI device information struct
  9066. *
  9067. * ixgbe_remove is called by the PCI subsystem to alert the driver
  9068. * that it should release a PCI device. The could be caused by a
  9069. * Hot-Plug event, or because the driver is going to be removed from
  9070. * memory.
  9071. **/
  9072. static void ixgbe_remove(struct pci_dev *pdev)
  9073. {
  9074. struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
  9075. struct net_device *netdev;
  9076. bool disable_dev;
  9077. int i;
  9078. /* if !adapter then we already cleaned up in probe */
  9079. if (!adapter)
  9080. return;
  9081. netdev = adapter->netdev;
  9082. ixgbe_dbg_adapter_exit(adapter);
  9083. set_bit(__IXGBE_REMOVING, &adapter->state);
  9084. cancel_work_sync(&adapter->service_task);
  9085. #ifdef CONFIG_IXGBE_DCA
  9086. if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
  9087. adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
  9088. dca_remove_requester(&pdev->dev);
  9089. IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
  9090. IXGBE_DCA_CTRL_DCA_DISABLE);
  9091. }
  9092. #endif
  9093. #ifdef CONFIG_IXGBE_HWMON
  9094. ixgbe_sysfs_exit(adapter);
  9095. #endif /* CONFIG_IXGBE_HWMON */
  9096. /* remove the added san mac */
  9097. ixgbe_del_sanmac_netdev(netdev);
  9098. #ifdef CONFIG_PCI_IOV
  9099. ixgbe_disable_sriov(adapter);
  9100. #endif
  9101. if (netdev->reg_state == NETREG_REGISTERED)
  9102. unregister_netdev(netdev);
  9103. ixgbe_clear_interrupt_scheme(adapter);
  9104. ixgbe_release_hw_control(adapter);
  9105. #ifdef CONFIG_DCB
  9106. kfree(adapter->ixgbe_ieee_pfc);
  9107. kfree(adapter->ixgbe_ieee_ets);
  9108. #endif
  9109. iounmap(adapter->io_addr);
  9110. pci_release_mem_regions(pdev);
  9111. e_dev_info("complete\n");
  9112. for (i = 0; i < IXGBE_MAX_LINK_HANDLE; i++) {
  9113. if (adapter->jump_tables[i]) {
  9114. kfree(adapter->jump_tables[i]->input);
  9115. kfree(adapter->jump_tables[i]->mask);
  9116. }
  9117. kfree(adapter->jump_tables[i]);
  9118. }
  9119. kfree(adapter->mac_table);
  9120. kfree(adapter->rss_key);
  9121. disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
  9122. free_netdev(netdev);
  9123. pci_disable_pcie_error_reporting(pdev);
  9124. if (disable_dev)
  9125. pci_disable_device(pdev);
  9126. }
  9127. /**
  9128. * ixgbe_io_error_detected - called when PCI error is detected
  9129. * @pdev: Pointer to PCI device
  9130. * @state: The current pci connection state
  9131. *
  9132. * This function is called after a PCI bus error affecting
  9133. * this device has been detected.
  9134. */
  9135. static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
  9136. pci_channel_state_t state)
  9137. {
  9138. struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
  9139. struct net_device *netdev = adapter->netdev;
  9140. #ifdef CONFIG_PCI_IOV
  9141. struct ixgbe_hw *hw = &adapter->hw;
  9142. struct pci_dev *bdev, *vfdev;
  9143. u32 dw0, dw1, dw2, dw3;
  9144. int vf, pos;
  9145. u16 req_id, pf_func;
  9146. if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
  9147. adapter->num_vfs == 0)
  9148. goto skip_bad_vf_detection;
  9149. bdev = pdev->bus->self;
  9150. while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT))
  9151. bdev = bdev->bus->self;
  9152. if (!bdev)
  9153. goto skip_bad_vf_detection;
  9154. pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR);
  9155. if (!pos)
  9156. goto skip_bad_vf_detection;
  9157. dw0 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG);
  9158. dw1 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 4);
  9159. dw2 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 8);
  9160. dw3 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 12);
  9161. if (ixgbe_removed(hw->hw_addr))
  9162. goto skip_bad_vf_detection;
  9163. req_id = dw1 >> 16;
  9164. /* On the 82599 if bit 7 of the requestor ID is set then it's a VF */
  9165. if (!(req_id & 0x0080))
  9166. goto skip_bad_vf_detection;
  9167. pf_func = req_id & 0x01;
  9168. if ((pf_func & 1) == (pdev->devfn & 1)) {
  9169. unsigned int device_id;
  9170. vf = (req_id & 0x7F) >> 1;
  9171. e_dev_err("VF %d has caused a PCIe error\n", vf);
  9172. e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: "
  9173. "%8.8x\tdw3: %8.8x\n",
  9174. dw0, dw1, dw2, dw3);
  9175. switch (adapter->hw.mac.type) {
  9176. case ixgbe_mac_82599EB:
  9177. device_id = IXGBE_82599_VF_DEVICE_ID;
  9178. break;
  9179. case ixgbe_mac_X540:
  9180. device_id = IXGBE_X540_VF_DEVICE_ID;
  9181. break;
  9182. case ixgbe_mac_X550:
  9183. device_id = IXGBE_DEV_ID_X550_VF;
  9184. break;
  9185. case ixgbe_mac_X550EM_x:
  9186. device_id = IXGBE_DEV_ID_X550EM_X_VF;
  9187. break;
  9188. case ixgbe_mac_x550em_a:
  9189. device_id = IXGBE_DEV_ID_X550EM_A_VF;
  9190. break;
  9191. default:
  9192. device_id = 0;
  9193. break;
  9194. }
  9195. /* Find the pci device of the offending VF */
  9196. vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL);
  9197. while (vfdev) {
  9198. if (vfdev->devfn == (req_id & 0xFF))
  9199. break;
  9200. vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
  9201. device_id, vfdev);
  9202. }
  9203. /*
  9204. * There's a slim chance the VF could have been hot plugged,
  9205. * so if it is no longer present we don't need to issue the
  9206. * VFLR. Just clean up the AER in that case.
  9207. */
  9208. if (vfdev) {
  9209. pcie_flr(vfdev);
  9210. /* Free device reference count */
  9211. pci_dev_put(vfdev);
  9212. }
  9213. pci_cleanup_aer_uncorrect_error_status(pdev);
  9214. }
  9215. /*
  9216. * Even though the error may have occurred on the other port
  9217. * we still need to increment the vf error reference count for
  9218. * both ports because the I/O resume function will be called
  9219. * for both of them.
  9220. */
  9221. adapter->vferr_refcount++;
  9222. return PCI_ERS_RESULT_RECOVERED;
  9223. skip_bad_vf_detection:
  9224. #endif /* CONFIG_PCI_IOV */
  9225. if (!test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
  9226. return PCI_ERS_RESULT_DISCONNECT;
  9227. if (!netif_device_present(netdev))
  9228. return PCI_ERS_RESULT_DISCONNECT;
  9229. rtnl_lock();
  9230. netif_device_detach(netdev);
  9231. if (state == pci_channel_io_perm_failure) {
  9232. rtnl_unlock();
  9233. return PCI_ERS_RESULT_DISCONNECT;
  9234. }
  9235. if (netif_running(netdev))
  9236. ixgbe_close_suspend(adapter);
  9237. if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
  9238. pci_disable_device(pdev);
  9239. rtnl_unlock();
  9240. /* Request a slot reset. */
  9241. return PCI_ERS_RESULT_NEED_RESET;
  9242. }
  9243. /**
  9244. * ixgbe_io_slot_reset - called after the pci bus has been reset.
  9245. * @pdev: Pointer to PCI device
  9246. *
  9247. * Restart the card from scratch, as if from a cold-boot.
  9248. */
  9249. static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
  9250. {
  9251. struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
  9252. pci_ers_result_t result;
  9253. int err;
  9254. if (pci_enable_device_mem(pdev)) {
  9255. e_err(probe, "Cannot re-enable PCI device after reset.\n");
  9256. result = PCI_ERS_RESULT_DISCONNECT;
  9257. } else {
  9258. smp_mb__before_atomic();
  9259. clear_bit(__IXGBE_DISABLED, &adapter->state);
  9260. adapter->hw.hw_addr = adapter->io_addr;
  9261. pci_set_master(pdev);
  9262. pci_restore_state(pdev);
  9263. pci_save_state(pdev);
  9264. pci_wake_from_d3(pdev, false);
  9265. ixgbe_reset(adapter);
  9266. IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
  9267. result = PCI_ERS_RESULT_RECOVERED;
  9268. }
  9269. err = pci_cleanup_aer_uncorrect_error_status(pdev);
  9270. if (err) {
  9271. e_dev_err("pci_cleanup_aer_uncorrect_error_status "
  9272. "failed 0x%0x\n", err);
  9273. /* non-fatal, continue */
  9274. }
  9275. return result;
  9276. }
  9277. /**
  9278. * ixgbe_io_resume - called when traffic can start flowing again.
  9279. * @pdev: Pointer to PCI device
  9280. *
  9281. * This callback is called when the error recovery driver tells us that
  9282. * its OK to resume normal operation.
  9283. */
  9284. static void ixgbe_io_resume(struct pci_dev *pdev)
  9285. {
  9286. struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
  9287. struct net_device *netdev = adapter->netdev;
  9288. #ifdef CONFIG_PCI_IOV
  9289. if (adapter->vferr_refcount) {
  9290. e_info(drv, "Resuming after VF err\n");
  9291. adapter->vferr_refcount--;
  9292. return;
  9293. }
  9294. #endif
  9295. rtnl_lock();
  9296. if (netif_running(netdev))
  9297. ixgbe_open(netdev);
  9298. netif_device_attach(netdev);
  9299. rtnl_unlock();
  9300. }
  9301. static const struct pci_error_handlers ixgbe_err_handler = {
  9302. .error_detected = ixgbe_io_error_detected,
  9303. .slot_reset = ixgbe_io_slot_reset,
  9304. .resume = ixgbe_io_resume,
  9305. };
  9306. static struct pci_driver ixgbe_driver = {
  9307. .name = ixgbe_driver_name,
  9308. .id_table = ixgbe_pci_tbl,
  9309. .probe = ixgbe_probe,
  9310. .remove = ixgbe_remove,
  9311. #ifdef CONFIG_PM
  9312. .suspend = ixgbe_suspend,
  9313. .resume = ixgbe_resume,
  9314. #endif
  9315. .shutdown = ixgbe_shutdown,
  9316. .sriov_configure = ixgbe_pci_sriov_configure,
  9317. .err_handler = &ixgbe_err_handler
  9318. };
  9319. /**
  9320. * ixgbe_init_module - Driver Registration Routine
  9321. *
  9322. * ixgbe_init_module is the first routine called when the driver is
  9323. * loaded. All it does is register with the PCI subsystem.
  9324. **/
  9325. static int __init ixgbe_init_module(void)
  9326. {
  9327. int ret;
  9328. pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
  9329. pr_info("%s\n", ixgbe_copyright);
  9330. ixgbe_wq = create_singlethread_workqueue(ixgbe_driver_name);
  9331. if (!ixgbe_wq) {
  9332. pr_err("%s: Failed to create workqueue\n", ixgbe_driver_name);
  9333. return -ENOMEM;
  9334. }
  9335. ixgbe_dbg_init();
  9336. ret = pci_register_driver(&ixgbe_driver);
  9337. if (ret) {
  9338. destroy_workqueue(ixgbe_wq);
  9339. ixgbe_dbg_exit();
  9340. return ret;
  9341. }
  9342. #ifdef CONFIG_IXGBE_DCA
  9343. dca_register_notify(&dca_notifier);
  9344. #endif
  9345. return 0;
  9346. }
  9347. module_init(ixgbe_init_module);
  9348. /**
  9349. * ixgbe_exit_module - Driver Exit Cleanup Routine
  9350. *
  9351. * ixgbe_exit_module is called just before the driver is removed
  9352. * from memory.
  9353. **/
  9354. static void __exit ixgbe_exit_module(void)
  9355. {
  9356. #ifdef CONFIG_IXGBE_DCA
  9357. dca_unregister_notify(&dca_notifier);
  9358. #endif
  9359. pci_unregister_driver(&ixgbe_driver);
  9360. ixgbe_dbg_exit();
  9361. if (ixgbe_wq) {
  9362. destroy_workqueue(ixgbe_wq);
  9363. ixgbe_wq = NULL;
  9364. }
  9365. }
  9366. #ifdef CONFIG_IXGBE_DCA
  9367. static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
  9368. void *p)
  9369. {
  9370. int ret_val;
  9371. ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
  9372. __ixgbe_notify_dca);
  9373. return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
  9374. }
  9375. #endif /* CONFIG_IXGBE_DCA */
  9376. module_exit(ixgbe_exit_module);
  9377. /* ixgbe_main.c */